io_uring: Use io_schedule* in cqring wait
I observed poor performance of io_uring compared to synchronous IO. That turns out to be caused by deeper CPU idle states entered with io_uring, due to io_uring using plain schedule(), whereas synchronous IO uses io_schedule(). The losses due to this are substantial. On my cascade lake workstation, t/io_uring from the fio repository e.g. yields regressions between 20% and 40% with the following command: ./t/io_uring -r 5 -X0 -d 1 -s 1 -c 1 -p 0 -S$use_sync -R 0 /mnt/t2/fio/write.0.0 This is repeatable with different filesystems, using raw block devices and using different block devices. Use io_schedule_prepare() / io_schedule_finish() in io_cqring_wait_schedule() to address the difference. After that using io_uring is on par or surpassing synchronous IO (using registered files etc makes it reliably win, but arguably is a less fair comparison). There are other calls to schedule() in io_uring/, but none immediately jump out to be similarly situated, so I did not touch them. Similarly, it's possible that mutex_lock_io() should be used, but it's not clear if there are cases where that matters. Cc: stable@vger.kernel.org # 5.10+ Cc: Pavel Begunkov <asml.silence@gmail.com> Cc: io-uring@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Andres Freund <andres@anarazel.de> Link: https://lore.kernel.org/r/20230707162007.194068-1-andres@anarazel.de [axboe: minor style fixup] Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dfbe5561ae
commit
8a796565ce
|
@ -2489,6 +2489,8 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
|
||||||
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
|
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
|
||||||
struct io_wait_queue *iowq)
|
struct io_wait_queue *iowq)
|
||||||
{
|
{
|
||||||
|
int token, ret;
|
||||||
|
|
||||||
if (unlikely(READ_ONCE(ctx->check_cq)))
|
if (unlikely(READ_ONCE(ctx->check_cq)))
|
||||||
return 1;
|
return 1;
|
||||||
if (unlikely(!llist_empty(&ctx->work_llist)))
|
if (unlikely(!llist_empty(&ctx->work_llist)))
|
||||||
|
@ -2499,11 +2501,20 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
if (unlikely(io_should_wake(iowq)))
|
if (unlikely(io_should_wake(iowq)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use io_schedule_prepare/finish, so cpufreq can take into account
|
||||||
|
* that the task is waiting for IO - turns out to be important for low
|
||||||
|
* QD IO.
|
||||||
|
*/
|
||||||
|
token = io_schedule_prepare();
|
||||||
|
ret = 0;
|
||||||
if (iowq->timeout == KTIME_MAX)
|
if (iowq->timeout == KTIME_MAX)
|
||||||
schedule();
|
schedule();
|
||||||
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
|
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
|
||||||
return -ETIME;
|
ret = -ETIME;
|
||||||
return 0;
|
io_schedule_finish(token);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue