nvme: enable batched completions of passthrough IO
Now that the normal passthrough end_io path doesn't need the request anymore, we can kill the explicit blk_mq_free_request() and just pass back RQ_END_IO_FREE instead. This enables the batched completion from freeing batches of requests at the time. This brings passthrough IO performance at least on par with bdev based O_DIRECT with io_uring. With this and batche allocations, peak performance goes from 110M IOPS to 122M IOPS. For IRQ based, passthrough is now also about 10% faster than previously, going from ~61M to ~67M IOPS. Reviewed-by: Anuj Gupta <anuj20.g@samsung.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Keith Busch <kbusch@kernel.org> Co-developed-by: Stefan Roesch <shr@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c0a7ba77e8
commit
851eb780de
|
@ -430,8 +430,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
||||||
else
|
else
|
||||||
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
|
io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
|
||||||
|
|
||||||
blk_mq_free_request(req);
|
return RQ_END_IO_FREE;
|
||||||
return RQ_END_IO_NONE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
|
static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
|
||||||
|
|
Loading…
Reference in New Issue