writeback: improve scalability of bdi writeback work queues
[firefly-linux-kernel-4.4.55.git] / fs / fs-writeback.c
index 59b3ee63b624864d79ce421b8f368a65c5615c15..6bca6f8176f0d8f105be5379b31837bf85f27da1 100644 (file)
@@ -49,15 +49,15 @@ struct wb_writeback_args {
  * Work items for the bdi_writeback threads
  */
 struct bdi_work {
-       struct list_head list;
-       struct rcu_head rcu_head;
+       struct list_head list;          /* pending work list */
+       struct rcu_head rcu_head;       /* for RCU free/clear of work */
 
-       unsigned long seen;
-       atomic_t pending;
+       unsigned long seen;             /* threads that have seen this work */
+       atomic_t pending;               /* number of threads still to do work */
 
-       struct wb_writeback_args args;
+       struct wb_writeback_args args;  /* writeback arguments */
 
-       unsigned long state;
+       unsigned long state;            /* flag bits, see WS_* */
 };
 
 enum {
@@ -74,14 +74,10 @@ static inline bool bdi_work_on_stack(struct bdi_work *work)
 }
 
 static inline void bdi_work_init(struct bdi_work *work,
-                                struct writeback_control *wbc)
+                                struct wb_writeback_args *args)
 {
        INIT_RCU_HEAD(&work->rcu_head);
-       work->args.sb = wbc->sb;
-       work->args.nr_pages = wbc->nr_to_write;
-       work->args.sync_mode = wbc->sync_mode;
-       work->args.range_cyclic = wbc->range_cyclic;
-       work->args.for_kupdate = 0;
+       work->args = *args;
        work->state = WS_USED;
 }
 
@@ -155,10 +151,10 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
        BUG_ON(!bdi->wb_cnt);
 
        /*
-        * Make sure stores are seen before it appears on the list
+        * list_add_tail_rcu() contains the necessary barriers to
+        * make sure the above stores are seen before the item is
+        * noticed on the list
         */
-       smp_mb();
-
        spin_lock(&bdi->wb_lock);
        list_add_tail_rcu(&work->list, &bdi->work_list);
        spin_unlock(&bdi->wb_lock);
@@ -194,7 +190,7 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
 }
 
 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
-                                struct writeback_control *wbc)
+                                struct wb_writeback_args *args)
 {
        struct bdi_work *work;
 
@@ -204,7 +200,7 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
         */
        work = kmalloc(sizeof(*work), GFP_ATOMIC);
        if (work) {
-               bdi_work_init(work, wbc);
+               bdi_work_init(work, args);
                bdi_queue_work(bdi, work);
        } else {
                struct bdi_writeback *wb = &bdi->wb;
@@ -214,24 +210,54 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
        }
 }
 
-void bdi_start_writeback(struct writeback_control *wbc)
+/**
+ * bdi_sync_writeback - start and wait for writeback
+ * @bdi: the backing device to write from
+ * @sb: write inodes from this super_block
+ *
+ * Description:
+ *   This does WB_SYNC_ALL data integrity writeback and waits for the
+ *   IO to complete. Callers must hold the sb s_umount semaphore for
+ *   reading, to avoid having the super disappear before we are done.
+ */
+static void bdi_sync_writeback(struct backing_dev_info *bdi,
+                              struct super_block *sb)
 {
-       /*
-        * WB_SYNC_NONE is opportunistic writeback. If this allocation fails,
-        * bdi_queue_work() will wake up the thread and flush old data. This
-        * should ensure some amount of progress in freeing memory.
-        */
-       if (wbc->sync_mode != WB_SYNC_ALL)
-               bdi_alloc_queue_work(wbc->bdi, wbc);
-       else {
-               struct bdi_work work;
+       struct wb_writeback_args args = {
+               .sb             = sb,
+               .sync_mode      = WB_SYNC_ALL,
+               .nr_pages       = LONG_MAX,
+               .range_cyclic   = 0,
+       };
+       struct bdi_work work;
 
-               bdi_work_init(&work, wbc);
-               work.state |= WS_ONSTACK;
+       bdi_work_init(&work, &args);
+       work.state |= WS_ONSTACK;
 
-               bdi_queue_work(wbc->bdi, &work);
-               bdi_wait_on_work_clear(&work);
-       }
+       bdi_queue_work(bdi, &work);
+       bdi_wait_on_work_clear(&work);
+}
+
+/**
+ * bdi_start_writeback - start writeback
+ * @bdi: the backing device to write from
+ * @nr_pages: the number of pages to write
+ *
+ * Description:
+ *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
+ *   started when this function returns, we make no guarentees on
+ *   completion. Caller need not hold sb s_umount semaphore.
+ *
+ */
+void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+{
+       struct wb_writeback_args args = {
+               .sync_mode      = WB_SYNC_NONE,
+               .nr_pages       = nr_pages,
+               .range_cyclic   = 1,
+       };
+
+       bdi_alloc_queue_work(bdi, &args);
 }
 
 /*
@@ -732,7 +758,11 @@ static long wb_writeback(struct bdi_writeback *wb,
 
 /*
  * Return the next bdi_work struct that hasn't been processed by this
- * wb thread yet
+ * wb thread yet. ->seen is initially set for each thread that exists
+ * for this device, when a thread first notices a piece of work it
+ * clears its bit. Depending on writeback type, the thread will notify
+ * completion on either receiving the work (WB_SYNC_NONE) or after
+ * it is done (WB_SYNC_ALL).
  */
 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
                                           struct bdi_writeback *wb)
@@ -742,8 +772,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
        rcu_read_lock();
 
        list_for_each_entry_rcu(work, &bdi->work_list, list) {
-               if (!test_and_clear_bit(wb->nr, &work->seen))
+               if (!test_bit(wb->nr, &work->seen))
                        continue;
+               clear_bit(wb->nr, &work->seen);
 
                ret = work;
                break;
@@ -854,8 +885,7 @@ int bdi_writeback_task(struct bdi_writeback *wb)
                }
 
                wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(wait_jiffies);
+               schedule_timeout_interruptible(wait_jiffies);
                try_to_freeze();
        }
 
@@ -863,23 +893,25 @@ int bdi_writeback_task(struct bdi_writeback *wb)
 }
 
 /*
- * Schedule writeback for all backing devices. Can only be used for
- * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
- * and pass in the superblock.
+ * Schedule writeback for all backing devices. This does WB_SYNC_NONE
+ * writeback, for integrity writeback see bdi_sync_writeback().
  */
-static void bdi_writeback_all(struct writeback_control *wbc)
+static void bdi_writeback_all(struct super_block *sb, long nr_pages)
 {
+       struct wb_writeback_args args = {
+               .sb             = sb,
+               .nr_pages       = nr_pages,
+               .sync_mode      = WB_SYNC_NONE,
+       };
        struct backing_dev_info *bdi;
 
-       WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
-
        rcu_read_lock();
 
        list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
                if (!bdi_has_dirty_io(bdi))
                        continue;
 
-               bdi_alloc_queue_work(bdi, wbc);
+               bdi_alloc_queue_work(bdi, &args);
        }
 
        rcu_read_unlock();
@@ -891,17 +923,10 @@ static void bdi_writeback_all(struct writeback_control *wbc)
  */
 void wakeup_flusher_threads(long nr_pages)
 {
-       struct writeback_control wbc = {
-               .sync_mode      = WB_SYNC_NONE,
-               .older_than_this = NULL,
-               .range_cyclic   = 1,
-       };
-
        if (nr_pages == 0)
                nr_pages = global_page_state(NR_FILE_DIRTY) +
                                global_page_state(NR_UNSTABLE_NFS);
-       wbc.nr_to_write = nr_pages;
-       bdi_writeback_all(&wbc);
+       bdi_writeback_all(NULL, nr_pages);
 }
 
 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
@@ -1048,7 +1073,7 @@ EXPORT_SYMBOL(__mark_inode_dirty);
  * on the writer throttling path, and we get decent balancing between many
  * throttled threads: we don't want them all piling up on inode_sync_wait.
  */
-static void wait_sb_inodes(struct writeback_control *wbc)
+static void wait_sb_inodes(struct super_block *sb)
 {
        struct inode *inode, *old_inode = NULL;
 
@@ -1056,7 +1081,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
         * We need to be protected against the filesystem going from
         * r/o to r/w or vice versa.
         */
-       WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
+       WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
        spin_lock(&inode_lock);
 
@@ -1067,7 +1092,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
         * In which case, the inode may not be on the dirty list, but
         * we still have to wait for that writeout.
         */
-       list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                struct address_space *mapping;
 
                if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
@@ -1107,14 +1132,8 @@ static void wait_sb_inodes(struct writeback_control *wbc)
  * for IO completion of submitted IO. The number of pages submitted is
  * returned.
  */
-long writeback_inodes_sb(struct super_block *sb)
+void writeback_inodes_sb(struct super_block *sb)
 {
-       struct writeback_control wbc = {
-               .sb             = sb,
-               .sync_mode      = WB_SYNC_NONE,
-               .range_start    = 0,
-               .range_end      = LLONG_MAX,
-       };
        unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
        unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
        long nr_to_write;
@@ -1122,9 +1141,7 @@ long writeback_inodes_sb(struct super_block *sb)
        nr_to_write = nr_dirty + nr_unstable +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
 
-       wbc.nr_to_write = nr_to_write;
-       bdi_writeback_all(&wbc);
-       return nr_to_write - wbc.nr_to_write;
+       bdi_writeback_all(sb, nr_to_write);
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
@@ -1135,21 +1152,10 @@ EXPORT_SYMBOL(writeback_inodes_sb);
  * This function writes and waits on any dirty inode belonging to this
  * super_block. The number of pages synced is returned.
  */
-long sync_inodes_sb(struct super_block *sb)
+void sync_inodes_sb(struct super_block *sb)
 {
-       struct writeback_control wbc = {
-               .sb             = sb,
-               .bdi            = sb->s_bdi,
-               .sync_mode      = WB_SYNC_ALL,
-               .range_start    = 0,
-               .range_end      = LLONG_MAX,
-       };
-       long nr_to_write = LONG_MAX; /* doesn't actually matter */
-
-       wbc.nr_to_write = nr_to_write;
-       bdi_start_writeback(&wbc);
-       wait_sb_inodes(&wbc);
-       return nr_to_write - wbc.nr_to_write;
+       bdi_sync_writeback(sb->s_bdi, sb);
+       wait_sb_inodes(sb);
 }
 EXPORT_SYMBOL(sync_inodes_sb);