#include <linux/lglock.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/atomic.h>
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-static inline void file_free_rcu(struct rcu_head *head)
+static void file_free_rcu(struct rcu_head *head)
{
struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
return;
if (file_check_writeable(file) != 0)
return;
- mnt_drop_write(mnt);
+ __mnt_drop_write(mnt);
file_release_write(file);
}
}
fops_put(file->f_op);
put_pid(file->f_owner.pid);
- file_sb_list_del(file);
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_dec(inode);
if (file->f_mode & FMODE_WRITE)
mntput(mnt);
}
-void fput(struct file *file)
+static DEFINE_SPINLOCK(delayed_fput_lock);
+static LIST_HEAD(delayed_fput_list);
+static void delayed_fput(struct work_struct *unused)
{
- if (atomic_long_dec_and_test(&file->f_count))
- __fput(file);
+ LIST_HEAD(head);
+ spin_lock_irq(&delayed_fput_lock);
+ list_splice_init(&delayed_fput_list, &head);
+ spin_unlock_irq(&delayed_fput_lock);
+ while (!list_empty(&head)) {
+ struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
+ list_del_init(&f->f_u.fu_list);
+ __fput(f);
+ }
}
-EXPORT_SYMBOL(fput);
-
-struct file *fget(unsigned int fd)
+static void ____fput(struct callback_head *work)
{
- struct file *file;
- struct files_struct *files = current->files;
-
- rcu_read_lock();
- file = fcheck_files(files, fd);
- if (file) {
- /* File object ref couldn't be taken */
- if (file->f_mode & FMODE_PATH ||
- !atomic_long_inc_not_zero(&file->f_count))
- file = NULL;
- }
- rcu_read_unlock();
-
- return file;
+ __fput(container_of(work, struct file, f_u.fu_rcuhead));
}
-EXPORT_SYMBOL(fget);
-
-struct file *fget_raw(unsigned int fd)
+/*
+ * If kernel thread really needs to have the final fput() it has done
+ * to complete, call this. The only user right now is the boot - we
+ * *do* need to make sure our writes to binaries on initramfs has
+ * not left us with opened struct file waiting for __fput() - execve()
+ * won't work without that. Please, don't add more callers without
+ * very good reasons; in particular, never call that with locks
+ * held and never call that from a thread that might need to do
+ * some work on any kind of umount.
+ */
+void flush_delayed_fput(void)
{
- struct file *file;
- struct files_struct *files = current->files;
-
- rcu_read_lock();
- file = fcheck_files(files, fd);
- if (file) {
- /* File object ref couldn't be taken */
- if (!atomic_long_inc_not_zero(&file->f_count))
- file = NULL;
- }
- rcu_read_unlock();
-
- return file;
+ delayed_fput(NULL);
}
-EXPORT_SYMBOL(fget_raw);
+static DECLARE_WORK(delayed_fput_work, delayed_fput);
-/*
- * Lightweight file lookup - no refcnt increment if fd table isn't shared.
- *
- * You can use this instead of fget if you satisfy all of the following
- * conditions:
- * 1) You must call fput_light before exiting the syscall and returning control
- * to userspace (i.e. you cannot remember the returned struct file * after
- * returning to userspace).
- * 2) You must not call filp_close on the returned struct file * in between
- * calls to fget_light and fput_light.
- * 3) You must not clone the current task in between the calls to fget_light
- * and fput_light.
- *
- * The fput_needed flag returned by fget_light should be passed to the
- * corresponding fput_light.
- */
-struct file *fget_light(unsigned int fd, int *fput_needed)
+void fput(struct file *file)
{
- struct file *file;
- struct files_struct *files = current->files;
-
- *fput_needed = 0;
- if (atomic_read(&files->count) == 1) {
- file = fcheck_files(files, fd);
- if (file && (file->f_mode & FMODE_PATH))
- file = NULL;
- } else {
- rcu_read_lock();
- file = fcheck_files(files, fd);
- if (file) {
- if (!(file->f_mode & FMODE_PATH) &&
- atomic_long_inc_not_zero(&file->f_count))
- *fput_needed = 1;
- else
- /* Didn't get the reference, someone's freed */
- file = NULL;
+ if (atomic_long_dec_and_test(&file->f_count)) {
+ struct task_struct *task = current;
+ file_sb_list_del(file);
+ if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
+ unsigned long flags;
+ spin_lock_irqsave(&delayed_fput_lock, flags);
+ list_add(&file->f_u.fu_list, &delayed_fput_list);
+ schedule_work(&delayed_fput_work);
+ spin_unlock_irqrestore(&delayed_fput_lock, flags);
+ return;
}
- rcu_read_unlock();
+ init_task_work(&file->f_u.fu_rcuhead, ____fput);
+ task_work_add(task, &file->f_u.fu_rcuhead, true);
}
-
- return file;
}
-struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+/*
+ * synchronous analog of fput(); for kernel threads that might be needed
+ * in some umount() (and thus can't use flush_delayed_fput() without
+ * risking deadlocks), need to wait for completion of __fput() and know
+ * for this specific struct file it won't involve anything that would
+ * need them. Use only if you really need it - at the very least,
+ * don't blindly convert fput() by kernel thread to that.
+ */
+void __fput_sync(struct file *file)
{
- struct file *file;
- struct files_struct *files = current->files;
-
- *fput_needed = 0;
- if (atomic_read(&files->count) == 1) {
- file = fcheck_files(files, fd);
- } else {
- rcu_read_lock();
- file = fcheck_files(files, fd);
- if (file) {
- if (atomic_long_inc_not_zero(&file->f_count))
- *fput_needed = 1;
- else
- /* Didn't get the reference, someone's freed */
- file = NULL;
- }
- rcu_read_unlock();
+ if (atomic_long_dec_and_test(&file->f_count)) {
+ struct task_struct *task = current;
+ file_sb_list_del(file);
+ BUG_ON(!(task->flags & PF_KTHREAD));
+ __fput(file);
}
-
- return file;
}
+EXPORT_SYMBOL(fput);
+
void put_filp(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
{
struct file *f;
-retry:
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
- struct vfsmount *mnt;
if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
continue;
if (!file_count(f))
if (file_check_writeable(f) != 0)
continue;
file_release_write(f);
- mnt = mntget(f->f_path.mnt);
- /* This can sleep, so we can't hold the spinlock. */
- lg_global_unlock(&files_lglock);
- mnt_drop_write(mnt);
- mntput(mnt);
- goto retry;
+ mnt_drop_write_file(f);
} while_file_list_for_each_entry;
lg_global_unlock(&files_lglock);
}