spin_unlock(&mark->lock);
+ if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
+ iput(inode);
/* release lock temporarily */
mutex_unlock(&group->mark_mutex);
list_add(&mark->destroy_list, &destroy_list);
spin_unlock(&destroy_lock);
wake_up(&destroy_waitq);
+ /*
+ * We don't necessarily have a ref on mark from caller so the above destroy
+ * may have actually freed it, unless this group provides a 'freeing_mark'
+ * function which must be holding a reference.
+ */
/*
* Some groups like to know that marks are being freed. This is a
* is just a lazy update (and could be a perf win...)
*/
- if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
- iput(inode);
- /*
- * We don't necessarily have a ref on mark from caller so the above iput
- * may have already destroyed it. Don't touch from now on.
- */
-
atomic_dec(&group->num_marks);
- mutex_lock(&group->mark_mutex);
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
}
void fsnotify_destroy_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group)
{
- mutex_lock(&group->mark_mutex);
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
fsnotify_destroy_mark_locked(mark, group);
mutex_unlock(&group->mark_mutex);
}
unsigned int flags)
{
struct fsnotify_mark *lmark, *mark;
- LIST_HEAD(free_list);
- mutex_lock(&group->mark_mutex);
+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
if (mark->flags & flags) {
- list_add(&mark->free_g_list, &free_list);
- list_del_init(&mark->g_list);
fsnotify_get_mark(mark);
+ fsnotify_destroy_mark_locked(mark, group);
+ fsnotify_put_mark(mark);
}
}
mutex_unlock(&group->mark_mutex);
-
- list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
- fsnotify_destroy_mark(mark, group);
- fsnotify_put_mark(mark);
- }
}
/*