Loading fs/notify/inotify/inotify.c +13 −19 Original line number Diff line number Diff line Loading @@ -110,14 +110,10 @@ EXPORT_SYMBOL_GPL(get_inotify_watch); int pin_inotify_watch(struct inotify_watch *watch) { struct super_block *sb = watch->inode->i_sb; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); if (atomic_inc_not_zero(&sb->s_active)) { atomic_inc(&watch->count); return 1; } spin_unlock(&sb_lock); return 0; } Loading Loading @@ -518,16 +514,16 @@ EXPORT_SYMBOL_GPL(inotify_init_watch); * ->s_umount, which will almost certainly wait until the superblock is shut * down and the watch in question is pining for fjords. That's fine, but * there is a problem - we might have hit the window between ->s_active * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock * is past the point of no return and is heading for shutdown) and the * moment when deactivate_super() acquires ->s_umount. We could just do * drop_super() yield() and retry, but that's rather antisocial and this * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having * found that we'd got there first (i.e. that ->s_root is non-NULL) we know * that we won't race with inotify_umount_inodes(). So we could grab a * reference to watch and do the rest as above, just with drop_super() instead * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we * could grab ->s_umount. So the watch could've been gone already. * getting to 0 (i.e. the moment when superblock is past the point of no return * and is heading for shutdown) and the moment when deactivate_super() acquires * ->s_umount. We could just do drop_super() yield() and retry, but that's * rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed * ->s_umount and having found that we'd got there first (i.e. that ->s_root is * non-NULL) we know that we won't race with inotify_umount_inodes(). So we * could grab a reference to watch and do the rest as above, just with * drop_super() instead of deactivate_super(), right? Wrong. We had to drop * ih->mutex before we could grab ->s_umount. So the watch could've been gone * already. * * That still can be dealt with - we need to save watch->wd, do idr_find() * and compare its result with our pointer. If they match, we either have Loading Loading @@ -565,14 +561,12 @@ static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) struct super_block *sb = watch->inode->i_sb; s32 wd = watch->wd; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); if (atomic_inc_not_zero(&sb->s_active)) { get_inotify_watch(watch); mutex_unlock(&ih->mutex); return 1; /* the best outcome */ } spin_lock(&sb_lock); sb->s_count++; spin_unlock(&sb_lock); mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ Loading fs/super.c +11 −17 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ static struct super_block *alloc_super(struct file_system_type *type) * subclass. */ down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); s->s_count = S_BIAS; s->s_count = 1; atomic_set(&s->s_active, 1); mutex_init(&s->s_vfs_rename_mutex); mutex_init(&s->s_dquot.dqio_mutex); Loading Loading @@ -189,9 +189,7 @@ void put_super(struct super_block *sb) void deactivate_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); if (atomic_dec_and_test(&s->s_active)) { vfs_dq_off(s, 0); down_write(&s->s_umount); fs->kill_sb(s); Loading @@ -216,9 +214,7 @@ EXPORT_SYMBOL(deactivate_super); void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); if (atomic_dec_and_test(&s->s_active)) { vfs_dq_off(s, 0); fs->kill_sb(s); put_filesystem(fs); Loading @@ -243,21 +239,19 @@ EXPORT_SYMBOL(deactivate_locked_super); */ static int grab_super(struct super_block *s) __releases(sb_lock) { s->s_count++; if (atomic_inc_not_zero(&s->s_active)) { spin_unlock(&sb_lock); down_write(&s->s_umount); if (s->s_root) { spin_lock(&sb_lock); if (s->s_count > S_BIAS) { atomic_inc(&s->s_active); s->s_count--; spin_unlock(&sb_lock); return 1; } /* it's going away */ s->s_count++; spin_unlock(&sb_lock); } /* usually that'll be enough for it to die... */ down_write(&s->s_umount); up_write(&s->s_umount); put_super(s); /* ... but in case it wasn't, let's at least yield() */ yield(); return 0; } Loading include/linux/fs.h +0 −1 Original line number Diff line number Diff line Loading @@ -1314,7 +1314,6 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; #define S_BIAS (1<<30) struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ Loading Loading
fs/notify/inotify/inotify.c +13 −19 Original line number Diff line number Diff line Loading @@ -110,14 +110,10 @@ EXPORT_SYMBOL_GPL(get_inotify_watch); int pin_inotify_watch(struct inotify_watch *watch) { struct super_block *sb = watch->inode->i_sb; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); if (atomic_inc_not_zero(&sb->s_active)) { atomic_inc(&watch->count); return 1; } spin_unlock(&sb_lock); return 0; } Loading Loading @@ -518,16 +514,16 @@ EXPORT_SYMBOL_GPL(inotify_init_watch); * ->s_umount, which will almost certainly wait until the superblock is shut * down and the watch in question is pining for fjords. That's fine, but * there is a problem - we might have hit the window between ->s_active * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock * is past the point of no return and is heading for shutdown) and the * moment when deactivate_super() acquires ->s_umount. We could just do * drop_super() yield() and retry, but that's rather antisocial and this * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having * found that we'd got there first (i.e. that ->s_root is non-NULL) we know * that we won't race with inotify_umount_inodes(). So we could grab a * reference to watch and do the rest as above, just with drop_super() instead * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we * could grab ->s_umount. So the watch could've been gone already. * getting to 0 (i.e. the moment when superblock is past the point of no return * and is heading for shutdown) and the moment when deactivate_super() acquires * ->s_umount. We could just do drop_super() yield() and retry, but that's * rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed * ->s_umount and having found that we'd got there first (i.e. that ->s_root is * non-NULL) we know that we won't race with inotify_umount_inodes(). So we * could grab a reference to watch and do the rest as above, just with * drop_super() instead of deactivate_super(), right? Wrong. We had to drop * ih->mutex before we could grab ->s_umount. So the watch could've been gone * already. * * That still can be dealt with - we need to save watch->wd, do idr_find() * and compare its result with our pointer. If they match, we either have Loading Loading @@ -565,14 +561,12 @@ static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) struct super_block *sb = watch->inode->i_sb; s32 wd = watch->wd; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); if (atomic_inc_not_zero(&sb->s_active)) { get_inotify_watch(watch); mutex_unlock(&ih->mutex); return 1; /* the best outcome */ } spin_lock(&sb_lock); sb->s_count++; spin_unlock(&sb_lock); mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ Loading
fs/super.c +11 −17 Original line number Diff line number Diff line Loading @@ -93,7 +93,7 @@ static struct super_block *alloc_super(struct file_system_type *type) * subclass. */ down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); s->s_count = S_BIAS; s->s_count = 1; atomic_set(&s->s_active, 1); mutex_init(&s->s_vfs_rename_mutex); mutex_init(&s->s_dquot.dqio_mutex); Loading Loading @@ -189,9 +189,7 @@ void put_super(struct super_block *sb) void deactivate_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); if (atomic_dec_and_test(&s->s_active)) { vfs_dq_off(s, 0); down_write(&s->s_umount); fs->kill_sb(s); Loading @@ -216,9 +214,7 @@ EXPORT_SYMBOL(deactivate_super); void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { s->s_count -= S_BIAS-1; spin_unlock(&sb_lock); if (atomic_dec_and_test(&s->s_active)) { vfs_dq_off(s, 0); fs->kill_sb(s); put_filesystem(fs); Loading @@ -243,21 +239,19 @@ EXPORT_SYMBOL(deactivate_locked_super); */ static int grab_super(struct super_block *s) __releases(sb_lock) { s->s_count++; if (atomic_inc_not_zero(&s->s_active)) { spin_unlock(&sb_lock); down_write(&s->s_umount); if (s->s_root) { spin_lock(&sb_lock); if (s->s_count > S_BIAS) { atomic_inc(&s->s_active); s->s_count--; spin_unlock(&sb_lock); return 1; } /* it's going away */ s->s_count++; spin_unlock(&sb_lock); } /* usually that'll be enough for it to die... */ down_write(&s->s_umount); up_write(&s->s_umount); put_super(s); /* ... but in case it wasn't, let's at least yield() */ yield(); return 0; } Loading
include/linux/fs.h +0 −1 Original line number Diff line number Diff line Loading @@ -1314,7 +1314,6 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; #define S_BIAS (1<<30) struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ Loading