Merge branch 'for-3.7/core' of git://git.kernel.dk/linux-block
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm.c
index 837879716889d3bc2f6cebe8cbbb8240f0e3a11d..66ceaff6455c9dec8de3680cb7ececc49bef896f 100644 (file)
@@ -860,10 +860,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
 {
        int r = error;
        struct dm_rq_target_io *tio = clone->end_io_data;
-       dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+       dm_request_endio_fn rq_end_io = NULL;
 
-       if (mapped && rq_end_io)
-               r = rq_end_io(tio->ti, clone, error, &tio->info);
+       if (tio->ti) {
+               rq_end_io = tio->ti->type->rq_end_io;
+
+               if (mapped && rq_end_io)
+                       r = rq_end_io(tio->ti, clone, error, &tio->info);
+       }
 
        if (r <= 0)
                /* The target wants to complete the I/O */
@@ -1555,15 +1559,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
        int r, requeued = 0;
        struct dm_rq_target_io *tio = clone->end_io_data;
 
-       /*
-        * Hold the md reference here for the in-flight I/O.
-        * We can't rely on the reference count by device opener,
-        * because the device may be closed during the request completion
-        * when all bios are completed.
-        * See the comment in rq_completed() too.
-        */
-       dm_get(md);
-
        tio->ti = ti;
        r = ti->type->map_rq(ti, clone, &tio->info);
        switch (r) {
@@ -1595,6 +1590,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
        return requeued;
 }
 
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+       struct request *clone;
+
+       blk_start_request(orig);
+       clone = orig->special;
+       atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+       /*
+        * Hold the md reference here for the in-flight I/O.
+        * We can't rely on the reference count by device opener,
+        * because the device may be closed during the request completion
+        * when all bios are completed.
+        * See the comment in rq_completed() too.
+        */
+       dm_get(md);
+
+       return clone;
+}
+
 /*
  * q->request_fn for request-based dm.
  * Called with the queue lock held.
@@ -1624,14 +1639,21 @@ static void dm_request_fn(struct request_queue *q)
                        pos = blk_rq_pos(rq);
 
                ti = dm_table_find_target(map, pos);
-               BUG_ON(!dm_target_is_valid(ti));
+               if (!dm_target_is_valid(ti)) {
+                       /*
+                        * Must perform setup, that dm_done() requires,
+                        * before calling dm_kill_unmapped_request
+                        */
+                       DMERR_LIMIT("request attempted access beyond the end of device");
+                       clone = dm_start_request(md, rq);
+                       dm_kill_unmapped_request(clone, -EIO);
+                       continue;
+               }
 
                if (ti->type->busy && ti->type->busy(ti))
                        goto delay_and_out;
 
-               blk_start_request(rq);
-               clone = rq->special;
-               atomic_inc(&md->pending[rq_data_dir(clone)]);
+               clone = dm_start_request(md, rq);
 
                spin_unlock(q->queue_lock);
                if (map_request(ti, clone, md))
@@ -1651,8 +1673,6 @@ delay_and_out:
        blk_delay_queue(q, HZ / 10);
 out:
        dm_table_put(map);
-
-       return;
 }
 
 int dm_underlying_device_busy(struct request_queue *q)
@@ -2376,7 +2396,7 @@ static void dm_queue_flush(struct mapped_device *md)
  */
 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
 {
-       struct dm_table *map = ERR_PTR(-EINVAL);
+       struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
        struct queue_limits limits;
        int r;
 
@@ -2386,6 +2406,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
        if (!dm_suspended_md(md))
                goto out;
 
+       /*
+        * If the new table has no data devices, retain the existing limits.
+        * This helps multipath with queue_if_no_path if all paths disappear,
+        * then new I/O is queued based on these limits, and then some paths
+        * reappear.
+        */
+       if (dm_table_has_no_data_devices(table)) {
+               live_map = dm_get_live_table(md);
+               if (live_map)
+                       limits = md->queue->limits;
+               dm_table_put(live_map);
+       }
+
        r = dm_calculate_queue_limits(table, &limits);
        if (r) {
                map = ERR_PTR(r);