static void do_thaw_all(struct work_struct *work)
{
- struct super_block *sb;
+ struct super_block *sb, *n;
char b[BDEVNAME_SIZE];
spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
+ list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++;
bdevname(sb->s_bdev, b));
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
}
spin_unlock(&sb_lock);
kfree(work);
static void drop_pagecache(void)
{
- struct super_block *sb;
+ struct super_block *sb, *n;
spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
+ list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++;
drop_pagecache_sb(sb);
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
+ __put_super(sb);
}
spin_unlock(&sb_lock);
}
static int quota_sync_all(int type)
{
- struct super_block *sb;
+ struct super_block *sb, *n;
int ret;
if (type >= MAXQUOTAS)
return ret;
spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
+ list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (!sb->s_qcop || !sb->s_qcop->quota_sync)
sb->s_qcop->quota_sync(sb, type, 1);
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
+ __put_super(sb);
}
spin_unlock(&sb_lock);
*/
void sync_supers(void)
{
- struct super_block *sb;
+ struct super_block *sb, *n;
spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
+ list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_op->write_super && sb->s_dirt) {
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
+ __put_super(sb);
}
}
spin_unlock(&sb_lock);