bdi: make inode_to_bdi() inline
[firefly-linux-kernel-4.4.55.git] / mm / backing-dev.c
index 6dc4580df2af040b10bc10a5f9c423becc3ff47e..b0707d1b1d38d2aa8c20fa2bb5c734c7979a58b0 100644 (file)
@@ -18,6 +18,7 @@ struct backing_dev_info noop_backing_dev_info = {
        .name           = "noop",
        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
+EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 
 static struct class *bdi_class;
 
@@ -66,7 +67,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
        spin_unlock(&wb->list_lock);
 
        global_dirty_limits(&background_thresh, &dirty_thresh);
-       bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+       bdi_thresh = wb_dirty_limit(wb, dirty_thresh);
 
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        seq_printf(m,
@@ -84,19 +85,19 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
                   "b_dirty_time:       %10lu\n"
                   "bdi_list:           %10u\n"
                   "state:              %10lx\n",
-                  (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
-                  (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
+                  (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
+                  (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
                   K(bdi_thresh),
                   K(dirty_thresh),
                   K(background_thresh),
-                  (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
-                  (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
-                  (unsigned long) K(bdi->write_bandwidth),
+                  (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
+                  (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
+                  (unsigned long) K(wb->write_bandwidth),
                   nr_dirty,
                   nr_io,
                   nr_more_io,
                   nr_dirty_time,
-                  !list_empty(&bdi->bdi_list), bdi->state);
+                  !list_empty(&bdi->bdi_list), bdi->wb.state);
 #undef K
 
        return 0;
@@ -261,7 +262,7 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
 }
 
 /*
- * This function is used when the first inode for this bdi is marked dirty. It
+ * This function is used when the first inode for this wb is marked dirty. It
  * wakes-up the corresponding bdi thread which should then take care of the
  * periodic background write-out of dirty inodes. Since the write-out would
  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
@@ -274,29 +275,117 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
  * We have to be careful not to postpone flush work if it is scheduled for
  * earlier. Thus we use queue_delayed_work().
  */
-void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
+void wb_wakeup_delayed(struct bdi_writeback *wb)
 {
        unsigned long timeout;
 
        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
-       spin_lock_bh(&bdi->wb_lock);
-       if (test_bit(BDI_registered, &bdi->state))
-               queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
-       spin_unlock_bh(&bdi->wb_lock);
+       spin_lock_bh(&wb->work_lock);
+       if (test_bit(WB_registered, &wb->state))
+               queue_delayed_work(bdi_wq, &wb->dwork, timeout);
+       spin_unlock_bh(&wb->work_lock);
 }
 
 /*
- * Remove bdi from bdi_list, and ensure that it is no longer visible
+ * Initial write bandwidth: 100 MB/s
  */
-static void bdi_remove_from_list(struct backing_dev_info *bdi)
+#define INIT_BW                (100 << (20 - PAGE_SHIFT))
+
+static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 {
-       spin_lock_bh(&bdi_lock);
-       list_del_rcu(&bdi->bdi_list);
-       spin_unlock_bh(&bdi_lock);
+       int i, err;
 
-       synchronize_rcu_expedited();
+       memset(wb, 0, sizeof(*wb));
+
+       wb->bdi = bdi;
+       wb->last_old_flush = jiffies;
+       INIT_LIST_HEAD(&wb->b_dirty);
+       INIT_LIST_HEAD(&wb->b_io);
+       INIT_LIST_HEAD(&wb->b_more_io);
+       INIT_LIST_HEAD(&wb->b_dirty_time);
+       spin_lock_init(&wb->list_lock);
+
+       wb->bw_time_stamp = jiffies;
+       wb->balanced_dirty_ratelimit = INIT_BW;
+       wb->dirty_ratelimit = INIT_BW;
+       wb->write_bandwidth = INIT_BW;
+       wb->avg_write_bandwidth = INIT_BW;
+
+       spin_lock_init(&wb->work_lock);
+       INIT_LIST_HEAD(&wb->work_list);
+       INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
+
+       err = fprop_local_init_percpu(&wb->completions, GFP_KERNEL);
+       if (err)
+               return err;
+
+       for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
+               err = percpu_counter_init(&wb->stat[i], 0, GFP_KERNEL);
+               if (err) {
+                       while (--i)
+                               percpu_counter_destroy(&wb->stat[i]);
+                       fprop_local_destroy_percpu(&wb->completions);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Remove bdi from the global list and shutdown any threads we have running
+ */
+static void wb_shutdown(struct bdi_writeback *wb)
+{
+       /* Make sure nobody queues further work */
+       spin_lock_bh(&wb->work_lock);
+       if (!test_and_clear_bit(WB_registered, &wb->state)) {
+               spin_unlock_bh(&wb->work_lock);
+               return;
+       }
+       spin_unlock_bh(&wb->work_lock);
+
+       /*
+        * Drain work list and shutdown the delayed_work.  !WB_registered
+        * tells wb_workfn() that @wb is dying and its work_list needs to
+        * be drained no matter what.
+        */
+       mod_delayed_work(bdi_wq, &wb->dwork, 0);
+       flush_delayed_work(&wb->dwork);
+       WARN_ON(!list_empty(&wb->work_list));
 }
 
+static void wb_exit(struct bdi_writeback *wb)
+{
+       int i;
+
+       WARN_ON(delayed_work_pending(&wb->dwork));
+
+       for (i = 0; i < NR_WB_STAT_ITEMS; i++)
+               percpu_counter_destroy(&wb->stat[i]);
+
+       fprop_local_destroy_percpu(&wb->completions);
+}
+
+int bdi_init(struct backing_dev_info *bdi)
+{
+       int err;
+
+       bdi->dev = NULL;
+
+       bdi->min_ratio = 0;
+       bdi->max_ratio = 100;
+       bdi->max_prop_frac = FPROP_FRAC_BASE;
+       INIT_LIST_HEAD(&bdi->bdi_list);
+
+       err = wb_init(&bdi->wb, bdi);
+       if (err)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(bdi_init);
+
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
                const char *fmt, ...)
 {
@@ -315,7 +404,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
        bdi->dev = dev;
 
        bdi_debug_register(bdi, dev_name(dev));
-       set_bit(BDI_registered, &bdi->state);
+       set_bit(WB_registered, &bdi->wb.state);
 
        spin_lock_bh(&bdi_lock);
        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
@@ -333,30 +422,15 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
 EXPORT_SYMBOL(bdi_register_dev);
 
 /*
- * Remove bdi from the global list and shutdown any threads we have running
+ * Remove bdi from bdi_list, and ensure that it is no longer visible
  */
-static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+static void bdi_remove_from_list(struct backing_dev_info *bdi)
 {
-       /* Make sure nobody queues further work */
-       spin_lock_bh(&bdi->wb_lock);
-       if (!test_and_clear_bit(BDI_registered, &bdi->state)) {
-               spin_unlock_bh(&bdi->wb_lock);
-               return;
-       }
-       spin_unlock_bh(&bdi->wb_lock);
-
-       /*
-        * Make sure nobody finds us on the bdi_list anymore
-        */
-       bdi_remove_from_list(bdi);
+       spin_lock_bh(&bdi_lock);
+       list_del_rcu(&bdi->bdi_list);
+       spin_unlock_bh(&bdi_lock);
 
-       /*
-        * Drain work list and shutdown the delayed_work.  At this point,
-        * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
-        * is dying and its work_list needs to be drained no matter what.
-        */
-       mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
-       flush_delayed_work(&bdi->wb.dwork);
+       synchronize_rcu_expedited();
 }
 
 /*
@@ -376,76 +450,11 @@ void bdi_unregister(struct backing_dev_info *bdi)
 }
 EXPORT_SYMBOL(bdi_unregister);
 
-static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
-{
-       memset(wb, 0, sizeof(*wb));
-
-       wb->bdi = bdi;
-       wb->last_old_flush = jiffies;
-       INIT_LIST_HEAD(&wb->b_dirty);
-       INIT_LIST_HEAD(&wb->b_io);
-       INIT_LIST_HEAD(&wb->b_more_io);
-       INIT_LIST_HEAD(&wb->b_dirty_time);
-       spin_lock_init(&wb->list_lock);
-       INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
-}
-
-/*
- * Initial write bandwidth: 100 MB/s
- */
-#define INIT_BW                (100 << (20 - PAGE_SHIFT))
-
-int bdi_init(struct backing_dev_info *bdi)
-{
-       int i, err;
-
-       bdi->dev = NULL;
-
-       bdi->min_ratio = 0;
-       bdi->max_ratio = 100;
-       bdi->max_prop_frac = FPROP_FRAC_BASE;
-       spin_lock_init(&bdi->wb_lock);
-       INIT_LIST_HEAD(&bdi->bdi_list);
-       INIT_LIST_HEAD(&bdi->work_list);
-
-       bdi_wb_init(&bdi->wb, bdi);
-
-       for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
-               err = percpu_counter_init(&bdi->bdi_stat[i], 0, GFP_KERNEL);
-               if (err)
-                       goto err;
-       }
-
-       bdi->dirty_exceeded = 0;
-
-       bdi->bw_time_stamp = jiffies;
-       bdi->written_stamp = 0;
-
-       bdi->balanced_dirty_ratelimit = INIT_BW;
-       bdi->dirty_ratelimit = INIT_BW;
-       bdi->write_bandwidth = INIT_BW;
-       bdi->avg_write_bandwidth = INIT_BW;
-
-       err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL);
-
-       if (err) {
-err:
-               while (i--)
-                       percpu_counter_destroy(&bdi->bdi_stat[i]);
-       }
-
-       return err;
-}
-EXPORT_SYMBOL(bdi_init);
-
 void bdi_destroy(struct backing_dev_info *bdi)
 {
-       int i;
-
-       bdi_wb_shutdown(bdi);
-
-       WARN_ON(!list_empty(&bdi->work_list));
-       WARN_ON(delayed_work_pending(&bdi->wb.dwork));
+       /* make sure nobody finds us on the bdi_list anymore */
+       bdi_remove_from_list(bdi);
+       wb_shutdown(&bdi->wb);
 
        if (bdi->dev) {
                bdi_debug_unregister(bdi);
@@ -453,9 +462,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
                bdi->dev = NULL;
        }
 
-       for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
-               percpu_counter_destroy(&bdi->bdi_stat[i]);
-       fprop_local_destroy_percpu(&bdi->completions);
+       wb_exit(&bdi->wb);
 }
 EXPORT_SYMBOL(bdi_destroy);
 
@@ -492,11 +499,11 @@ static atomic_t nr_bdi_congested[2];
 
 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
-       enum bdi_state bit;
+       enum wb_state bit;
        wait_queue_head_t *wqh = &congestion_wqh[sync];
 
-       bit = sync ? BDI_sync_congested : BDI_async_congested;
-       if (test_and_clear_bit(bit, &bdi->state))
+       bit = sync ? WB_sync_congested : WB_async_congested;
+       if (test_and_clear_bit(bit, &bdi->wb.state))
                atomic_dec(&nr_bdi_congested[sync]);
        smp_mb__after_atomic();
        if (waitqueue_active(wqh))
@@ -506,10 +513,10 @@ EXPORT_SYMBOL(clear_bdi_congested);
 
 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 {
-       enum bdi_state bit;
+       enum wb_state bit;
 
-       bit = sync ? BDI_sync_congested : BDI_async_congested;
-       if (!test_and_set_bit(bit, &bdi->state))
+       bit = sync ? WB_sync_congested : WB_async_congested;
+       if (!test_and_set_bit(bit, &bdi->wb.state))
                atomic_inc(&nr_bdi_congested[sync]);
 }
 EXPORT_SYMBOL(set_bdi_congested);