Merge branch 'perf-core-for-mingo' into perf/urgent
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-thin.c
index be70d38745f7a7f5da51bd4ba83a29afe851b238..53728be84dee35ac8dfabbf48087919841049f1a 100644 (file)
 #include <linux/dm-io.h>
 #include <linux/dm-kcopyd.h>
 #include <linux/list.h>
+#include <linux/rculist.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/rbtree.h>
 
 #define        DM_MSG_PREFIX   "thin"
 
@@ -178,12 +180,10 @@ struct pool {
        unsigned ref_count;
 
        spinlock_t lock;
-       struct bio_list deferred_bios;
        struct bio_list deferred_flush_bios;
        struct list_head prepared_mappings;
        struct list_head prepared_discards;
-
-       struct bio_list retry_on_resume_list;
+       struct list_head active_thins;
 
        struct dm_deferred_set *shared_read_ds;
        struct dm_deferred_set *all_io_ds;
@@ -220,6 +220,7 @@ struct pool_c {
  * Target context for a thin.
  */
 struct thin_c {
+       struct list_head list;
        struct dm_dev *pool_dev;
        struct dm_dev *origin_dev;
        dm_thin_id dev_id;
@@ -227,6 +228,10 @@ struct thin_c {
        struct pool *pool;
        struct dm_thin_device *td;
        bool requeue_mode:1;
+       spinlock_t lock;
+       struct bio_list deferred_bio_list;
+       struct bio_list retry_on_resume_list;
+       struct rb_root sort_bio_list; /* sorted list of deferred bios */
 };
 
 /*----------------------------------------------------------------*/
@@ -287,9 +292,9 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc,
        struct pool *pool = tc->pool;
        unsigned long flags;
 
-       spin_lock_irqsave(&pool->lock, flags);
-       dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
        wake_worker(pool);
 }
@@ -368,6 +373,7 @@ struct dm_thin_endio_hook {
        struct dm_deferred_entry *shared_read_entry;
        struct dm_deferred_entry *all_io_entry;
        struct dm_thin_new_mapping *overwrite_mapping;
+       struct rb_node rb_node;
 };
 
 static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -378,30 +384,22 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 
        bio_list_init(&bios);
 
-       spin_lock_irqsave(&tc->pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
        bio_list_merge(&bios, master);
        bio_list_init(master);
-       spin_unlock_irqrestore(&tc->pool->lock, flags);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
-       while ((bio = bio_list_pop(&bios))) {
-               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
-
-               if (h->tc == tc)
-                       bio_endio(bio, DM_ENDIO_REQUEUE);
-               else
-                       bio_list_add(master, bio);
-       }
+       while ((bio = bio_list_pop(&bios)))
+               bio_endio(bio, DM_ENDIO_REQUEUE);
 }
 
 static void requeue_io(struct thin_c *tc)
 {
-       struct pool *pool = tc->pool;
-
-       requeue_bio_list(tc, &pool->deferred_bios);
-       requeue_bio_list(tc, &pool->retry_on_resume_list);
+       requeue_bio_list(tc, &tc->deferred_bio_list);
+       requeue_bio_list(tc, &tc->retry_on_resume_list);
 }
 
-static void error_retry_list(struct pool *pool)
+static void error_thin_retry_list(struct thin_c *tc)
 {
        struct bio *bio;
        unsigned long flags;
@@ -409,15 +407,25 @@ static void error_retry_list(struct pool *pool)
 
        bio_list_init(&bios);
 
-       spin_lock_irqsave(&pool->lock, flags);
-       bio_list_merge(&bios, &pool->retry_on_resume_list);
-       bio_list_init(&pool->retry_on_resume_list);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       bio_list_merge(&bios, &tc->retry_on_resume_list);
+       bio_list_init(&tc->retry_on_resume_list);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
        while ((bio = bio_list_pop(&bios)))
                bio_io_error(bio);
 }
 
+static void error_retry_list(struct pool *pool)
+{
+       struct thin_c *tc;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(tc, &pool->active_thins, list)
+               error_thin_retry_list(tc);
+       rcu_read_unlock();
+}
+
 /*
  * This section of code contains the logic for processing a thin device's IO.
  * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -608,9 +616,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
        struct pool *pool = tc->pool;
        unsigned long flags;
 
-       spin_lock_irqsave(&pool->lock, flags);
-       cell_release(pool, cell, &pool->deferred_bios);
-       spin_unlock_irqrestore(&tc->pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       cell_release(pool, cell, &tc->deferred_bio_list);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
        wake_worker(pool);
 }
@@ -623,9 +631,9 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
        struct pool *pool = tc->pool;
        unsigned long flags;
 
-       spin_lock_irqsave(&pool->lock, flags);
-       cell_release_no_holder(pool, cell, &pool->deferred_bios);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
        wake_worker(pool);
 }
@@ -1001,12 +1009,11 @@ static void retry_on_resume(struct bio *bio)
 {
        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
        struct thin_c *tc = h->tc;
-       struct pool *pool = tc->pool;
        unsigned long flags;
 
-       spin_lock_irqsave(&pool->lock, flags);
-       bio_list_add(&pool->retry_on_resume_list, bio);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       bio_list_add(&tc->retry_on_resume_list, bio);
+       spin_unlock_irqrestore(&tc->lock, flags);
 }
 
 static bool should_error_unserviceable_bio(struct pool *pool)
@@ -1363,38 +1370,111 @@ static int need_commit_due_to_time(struct pool *pool)
               jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
 }
 
-static void process_deferred_bios(struct pool *pool)
+#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
+#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
+
+static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
+{
+       struct rb_node **rbp, *parent;
+       struct dm_thin_endio_hook *pbd;
+       sector_t bi_sector = bio->bi_iter.bi_sector;
+
+       rbp = &tc->sort_bio_list.rb_node;
+       parent = NULL;
+       while (*rbp) {
+               parent = *rbp;
+               pbd = thin_pbd(parent);
+
+               if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
+                       rbp = &(*rbp)->rb_left;
+               else
+                       rbp = &(*rbp)->rb_right;
+       }
+
+       pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+       rb_link_node(&pbd->rb_node, parent, rbp);
+       rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
+}
+
+static void __extract_sorted_bios(struct thin_c *tc)
+{
+       struct rb_node *node;
+       struct dm_thin_endio_hook *pbd;
+       struct bio *bio;
+
+       for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
+               pbd = thin_pbd(node);
+               bio = thin_bio(pbd);
+
+               bio_list_add(&tc->deferred_bio_list, bio);
+               rb_erase(&pbd->rb_node, &tc->sort_bio_list);
+       }
+
+       WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
+}
+
+static void __sort_thin_deferred_bios(struct thin_c *tc)
+{
+       struct bio *bio;
+       struct bio_list bios;
+
+       bio_list_init(&bios);
+       bio_list_merge(&bios, &tc->deferred_bio_list);
+       bio_list_init(&tc->deferred_bio_list);
+
+       /* Sort deferred_bio_list using rb-tree */
+       while ((bio = bio_list_pop(&bios)))
+               __thin_bio_rb_add(tc, bio);
+
+       /*
+        * Transfer the sorted bios in sort_bio_list back to
+        * deferred_bio_list to allow lockless submission of
+        * all bios.
+        */
+       __extract_sorted_bios(tc);
+}
+
+static void process_thin_deferred_bios(struct thin_c *tc)
 {
+       struct pool *pool = tc->pool;
        unsigned long flags;
        struct bio *bio;
        struct bio_list bios;
+       struct blk_plug plug;
+
+       if (tc->requeue_mode) {
+               requeue_bio_list(tc, &tc->deferred_bio_list);
+               return;
+       }
 
        bio_list_init(&bios);
 
-       spin_lock_irqsave(&pool->lock, flags);
-       bio_list_merge(&bios, &pool->deferred_bios);
-       bio_list_init(&pool->deferred_bios);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
 
-       while ((bio = bio_list_pop(&bios))) {
-               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
-               struct thin_c *tc = h->tc;
+       if (bio_list_empty(&tc->deferred_bio_list)) {
+               spin_unlock_irqrestore(&tc->lock, flags);
+               return;
+       }
 
-               if (tc->requeue_mode) {
-                       bio_endio(bio, DM_ENDIO_REQUEUE);
-                       continue;
-               }
+       __sort_thin_deferred_bios(tc);
+
+       bio_list_merge(&bios, &tc->deferred_bio_list);
+       bio_list_init(&tc->deferred_bio_list);
 
+       spin_unlock_irqrestore(&tc->lock, flags);
+
+       blk_start_plug(&plug);
+       while ((bio = bio_list_pop(&bios))) {
                /*
                 * If we've got no free new_mapping structs, and processing
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
                if (ensure_next_mapping(pool)) {
-                       spin_lock_irqsave(&pool->lock, flags);
-                       bio_list_merge(&pool->deferred_bios, &bios);
-                       spin_unlock_irqrestore(&pool->lock, flags);
-
+                       spin_lock_irqsave(&tc->lock, flags);
+                       bio_list_add(&tc->deferred_bio_list, bio);
+                       bio_list_merge(&tc->deferred_bio_list, &bios);
+                       spin_unlock_irqrestore(&tc->lock, flags);
                        break;
                }
 
@@ -1403,6 +1483,20 @@ static void process_deferred_bios(struct pool *pool)
                else
                        pool->process_bio(tc, bio);
        }
+       blk_finish_plug(&plug);
+}
+
+static void process_deferred_bios(struct pool *pool)
+{
+       unsigned long flags;
+       struct bio *bio;
+       struct bio_list bios;
+       struct thin_c *tc;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(tc, &pool->active_thins, list)
+               process_thin_deferred_bios(tc);
+       rcu_read_unlock();
 
        /*
         * If there are any deferred flush bios, we must commit
@@ -1634,9 +1728,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
        unsigned long flags;
        struct pool *pool = tc->pool;
 
-       spin_lock_irqsave(&pool->lock, flags);
-       bio_list_add(&pool->deferred_bios, bio);
-       spin_unlock_irqrestore(&pool->lock, flags);
+       spin_lock_irqsave(&tc->lock, flags);
+       bio_list_add(&tc->deferred_bio_list, bio);
+       spin_unlock_irqrestore(&tc->lock, flags);
 
        wake_worker(pool);
 }
@@ -1757,26 +1851,29 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
 
 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
 {
-       int r;
-       unsigned long flags;
        struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
+       struct request_queue *q;
 
-       spin_lock_irqsave(&pt->pool->lock, flags);
-       r = !bio_list_empty(&pt->pool->retry_on_resume_list);
-       spin_unlock_irqrestore(&pt->pool->lock, flags);
+       if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
+               return 1;
 
-       if (!r) {
-               struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-               r = bdi_congested(&q->backing_dev_info, bdi_bits);
-       }
-
-       return r;
+       q = bdev_get_queue(pt->data_dev->bdev);
+       return bdi_congested(&q->backing_dev_info, bdi_bits);
 }
 
-static void __requeue_bios(struct pool *pool)
+static void requeue_bios(struct pool *pool)
 {
-       bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
-       bio_list_init(&pool->retry_on_resume_list);
+       unsigned long flags;
+       struct thin_c *tc;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(tc, &pool->active_thins, list) {
+               spin_lock_irqsave(&tc->lock, flags);
+               bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
+               bio_list_init(&tc->retry_on_resume_list);
+               spin_unlock_irqrestore(&tc->lock, flags);
+       }
+       rcu_read_unlock();
 }
 
 /*----------------------------------------------------------------
@@ -1957,12 +2054,11 @@ static struct pool *pool_create(struct mapped_device *pool_md,
        INIT_WORK(&pool->worker, do_worker);
        INIT_DELAYED_WORK(&pool->waker, do_waker);
        spin_lock_init(&pool->lock);
-       bio_list_init(&pool->deferred_bios);
        bio_list_init(&pool->deferred_flush_bios);
        INIT_LIST_HEAD(&pool->prepared_mappings);
        INIT_LIST_HEAD(&pool->prepared_discards);
+       INIT_LIST_HEAD(&pool->active_thins);
        pool->low_water_triggered = false;
-       bio_list_init(&pool->retry_on_resume_list);
 
        pool->shared_read_ds = dm_deferred_set_create();
        if (!pool->shared_read_ds) {
@@ -2507,8 +2603,8 @@ static void pool_resume(struct dm_target *ti)
 
        spin_lock_irqsave(&pool->lock, flags);
        pool->low_water_triggered = false;
-       __requeue_bios(pool);
        spin_unlock_irqrestore(&pool->lock, flags);
+       requeue_bios(pool);
 
        do_waker(&pool->waker.work);
 }
@@ -2947,7 +3043,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 11, 0},
+       .version = {1, 12, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2968,6 +3064,12 @@ static struct target_type pool_target = {
 static void thin_dtr(struct dm_target *ti)
 {
        struct thin_c *tc = ti->private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tc->pool->lock, flags);
+       list_del_rcu(&tc->list);
+       spin_unlock_irqrestore(&tc->pool->lock, flags);
+       synchronize_rcu();
 
        mutex_lock(&dm_thin_pool_table.mutex);
 
@@ -3014,6 +3116,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
                r = -ENOMEM;
                goto out_unlock;
        }
+       spin_lock_init(&tc->lock);
+       bio_list_init(&tc->deferred_bio_list);
+       bio_list_init(&tc->retry_on_resume_list);
+       tc->sort_bio_list = RB_ROOT;
 
        if (argc == 3) {
                r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
@@ -3085,6 +3191,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        mutex_unlock(&dm_thin_pool_table.mutex);
 
+       spin_lock(&tc->pool->lock);
+       list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
+       spin_unlock(&tc->pool->lock);
+       /*
+        * This synchronize_rcu() call is needed here otherwise we risk a
+        * wake_worker() call finding no bios to process (because the newly
+        * added tc isn't yet visible).  So this reduces latency since we
+        * aren't then dependent on the periodic commit to wake_worker().
+        */
+       synchronize_rcu();
+
        return 0;
 
 bad_target_max_io_len:
@@ -3250,7 +3367,7 @@ static int thin_iterate_devices(struct dm_target *ti,
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 11, 0},
+       .version = {1, 12, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,