From 4cafe86c9267f9dd5819df946ba8c038ba958370 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 3 Dec 2021 21:15:34 +0800 Subject: [PATCH] blk-mq: run dispatch lock once in case of issuing from list It isn't necessary to call blk_mq_run_dispatch_ops() once for issuing single request directly, and enough to do it one time when issuing from whole list. Signed-off-by: Ming Lei Link: https://lore.kernel.org/r/20211203131534.3668411-5-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 3 ++- block/blk-mq.c | 14 ++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0d7257848f7e..55488ba97823 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -475,7 +475,8 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, * us one extra enqueue & dequeue to sw queue. */ if (!hctx->dispatch_busy && !run_queue_async) { - blk_mq_try_issue_list_directly(hctx, list); + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_try_issue_list_directly(hctx, list)); if (list_empty(list)) goto out; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 24c65bb8719b..22ec21aa0c22 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2464,12 +2464,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { - blk_status_t ret; - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - - blk_mq_run_dispatch_ops(rq->q, - ret = __blk_mq_try_issue_directly(hctx, rq, true, last)); - return ret; + return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); } static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) @@ -2526,7 +2521,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) plug->rq_count = 0; if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { - blk_mq_plug_issue_direct(plug, false); + blk_mq_run_dispatch_ops(plug->mq_list->q, + blk_mq_plug_issue_direct(plug, false)); if (rq_list_empty(plug->mq_list)) return; } @@ -2867,7 +2863,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * * bypass a potential scheduler on the bottom device for * insert. */ - return blk_mq_request_issue_directly(rq, true); + blk_mq_run_dispatch_ops(rq->q, + ret = blk_mq_request_issue_directly(rq, true)); + return ret; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request);