blk_mq: call preempt_disable/enable in blk_mq_run_hw_queue, and only if needed
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 7 Nov 2014 22:03:59 +0000 (23:03 +0100)
committerJens Axboe <axboe@fb.com>
Tue, 11 Nov 2014 18:04:47 +0000 (11:04 -0700)
preempt_disable/enable surrounds every call to blk_mq_run_hw_queue,
except the one in blk-flush.c.  In fact that one is always asynchronous,
and it does not need smp_processor_id().

We can do the same for all other calls, avoiding preempt_disable when
async is true.  This avoids peppering blk-mq.c with preemption-disabled
regions.

Cc: Jens Axboe <axboe@kernel.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Reported-by: Clark Williams <williams@redhat.com>
Tested-by: Clark Williams <williams@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c

index b355b5957cd72518b7a196fd92f7c9c07a89678b..8b309e81ed0fda22812787c93b23a2d568f1f966 100644 (file)
@@ -801,9 +801,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
        if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
                return;
 
-       if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
-               __blk_mq_run_hw_queue(hctx);
-       else if (hctx->queue->nr_hw_queues == 1)
+       if (!async) {
+               preempt_disable();
+               if (cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) {
+                       __blk_mq_run_hw_queue(hctx);
+                       preempt_enable();
+                       return;
+               }
+
+               preempt_enable();
+       }
+
+       if (hctx->queue->nr_hw_queues == 1)
                kblockd_schedule_delayed_work(&hctx->run_work, 0);
        else {
                unsigned int cpu;
@@ -824,9 +833,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
                    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
                        continue;
 
-               preempt_disable();
                blk_mq_run_hw_queue(hctx, async);
-               preempt_enable();
        }
 }
 EXPORT_SYMBOL(blk_mq_run_queues);
@@ -853,9 +860,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
-       preempt_disable();
        blk_mq_run_hw_queue(hctx, false);
-       preempt_enable();
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
 
@@ -880,9 +885,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
                        continue;
 
                clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
-               preempt_disable();
                blk_mq_run_hw_queue(hctx, async);
-               preempt_enable();
        }
 }
 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);