io_uring: remove io_req_tw_post_queue
[ Upstream commit833b5dfffc
] Remove io_req_tw_post() and io_req_tw_post_queue(), we can use io_req_task_complete() instead. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/b9b73c08022c7f1457023ac841f35c0100e70345.1669203009.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Stable-dep-of:ef5c600adb
("io_uring: always prep_async for drain requests") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
65aeb34c0f
commit
85224a3f89
|
@ -1236,18 +1236,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
|
||||
{
|
||||
io_req_complete_post(req);
|
||||
}
|
||||
|
||||
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
|
||||
{
|
||||
io_req_set_res(req, res, cflags);
|
||||
req->io_task_work.func = io_req_tw_post;
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
|
||||
{
|
||||
/* not needed for normal modes, but SQPOLL depends on it */
|
||||
|
|
|
@ -53,7 +53,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
|
|||
void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
|
||||
bool io_is_uring_fops(struct file *file);
|
||||
bool io_alloc_async_data(struct io_kiocb *req);
|
||||
void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
|
||||
void io_req_task_queue(struct io_kiocb *req);
|
||||
void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
|
||||
void io_req_task_complete(struct io_kiocb *req, bool *locked);
|
||||
|
@ -380,4 +379,11 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
|
|||
ctx->submitter_task == current);
|
||||
}
|
||||
|
||||
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
io_req_set_res(req, res, 0);
|
||||
req->io_task_work.func = io_req_task_complete;
|
||||
io_req_task_work_add(req);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -63,7 +63,7 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
|
|||
atomic_set(&req->ctx->cq_timeouts,
|
||||
atomic_read(&req->ctx->cq_timeouts) + 1);
|
||||
list_del_init(&timeout->list);
|
||||
io_req_tw_post_queue(req, status, 0);
|
||||
io_req_queue_tw_complete(req, status);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -161,7 +161,7 @@ void io_disarm_next(struct io_kiocb *req)
|
|||
req->flags &= ~REQ_F_ARM_LTIMEOUT;
|
||||
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
|
||||
io_remove_next_linked(req);
|
||||
io_req_tw_post_queue(link, -ECANCELED, 0);
|
||||
io_req_queue_tw_complete(link, -ECANCELED);
|
||||
}
|
||||
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
@ -170,7 +170,7 @@ void io_disarm_next(struct io_kiocb *req)
|
|||
link = io_disarm_linked_timeout(req);
|
||||
spin_unlock_irq(&ctx->timeout_lock);
|
||||
if (link)
|
||||
io_req_tw_post_queue(link, -ECANCELED, 0);
|
||||
io_req_queue_tw_complete(link, -ECANCELED);
|
||||
}
|
||||
if (unlikely((req->flags & REQ_F_FAIL) &&
|
||||
!(req->flags & REQ_F_HARDLINK)))
|
||||
|
|
Loading…
Reference in New Issue