block: fix hctx checks for batch allocation

[ Upstream commit 7746564793 ]

When there are no read queues read requests will be assigned a
default queue on allocation. However, blk_mq_get_cached_request() is not
prepared for that and will fail all attempts to grab read requests from
the cache. Worst case it doubles the number of requests allocated,
roughly half of which will be returned by blk_mq_free_plug_rqs().

It only affects batched allocations and so is io_uring specific.
For reference, QD8 t/io_uring benchmark improves by 20-35%.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/80d4511011d7d4751b4cf6375c4e38f237d935e3.1673955390.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Pavel Begunkov 2023-01-17 11:42:15 +00:00 committed by Greg Kroah-Hartman
parent 99db989945
commit 6f13860bba
1 changed files with 5 additions and 1 deletions

View File

@ -2858,6 +2858,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs) struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{ {
struct request *rq; struct request *rq;
enum hctx_type type, hctx_type;
if (!plug) if (!plug)
return NULL; return NULL;
@ -2870,7 +2871,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return NULL; return NULL;
} }
if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL; return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL; return NULL;