io_uring: move our reference counting into a header
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
36404b09aa
commit
e418bbc97b
|
@ -91,6 +91,7 @@
|
|||
|
||||
#include "io_uring_types.h"
|
||||
#include "io_uring.h"
|
||||
#include "refs.h"
|
||||
|
||||
#include "xattr.h"
|
||||
#include "nop.h"
|
||||
|
@ -611,54 +612,12 @@ static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
|
|||
#define io_for_each_link(pos, head) \
|
||||
for (pos = (head); pos; pos = pos->link)
|
||||
|
||||
/*
|
||||
* Shamelessly stolen from the mm implementation of page reference checking,
|
||||
* see commit f958d7b528b1 for details.
|
||||
*/
|
||||
#define req_ref_zero_or_close_to_overflow(req) \
|
||||
((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
|
||||
|
||||
static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
|
||||
{
|
||||
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
|
||||
return atomic_inc_not_zero(&req->refs);
|
||||
}
|
||||
|
||||
static inline bool req_ref_put_and_test(struct io_kiocb *req)
|
||||
{
|
||||
if (likely(!(req->flags & REQ_F_REFCOUNT)))
|
||||
return true;
|
||||
|
||||
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||
return atomic_dec_and_test(&req->refs);
|
||||
}
|
||||
|
||||
static inline void req_ref_get(struct io_kiocb *req)
|
||||
{
|
||||
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
|
||||
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||
atomic_inc(&req->refs);
|
||||
}
|
||||
|
||||
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (!wq_list_empty(&ctx->submit_state.compl_reqs))
|
||||
__io_submit_flush_completions(ctx);
|
||||
}
|
||||
|
||||
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
|
||||
{
|
||||
if (!(req->flags & REQ_F_REFCOUNT)) {
|
||||
req->flags |= REQ_F_REFCOUNT;
|
||||
atomic_set(&req->refs, nr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void io_req_set_refcount(struct io_kiocb *req)
|
||||
{
|
||||
__io_req_set_refcount(req, 1);
|
||||
}
|
||||
|
||||
#define IO_RSRC_REF_BATCH 100
|
||||
|
||||
static void io_rsrc_put_node(struct io_rsrc_node *node, int nr)
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
#ifndef IOU_REQ_REF_H
|
||||
#define IOU_REQ_REF_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include "io_uring_types.h"
|
||||
|
||||
/*
|
||||
* Shamelessly stolen from the mm implementation of page reference checking,
|
||||
* see commit f958d7b528b1 for details.
|
||||
*/
|
||||
#define req_ref_zero_or_close_to_overflow(req) \
|
||||
((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
|
||||
|
||||
static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
|
||||
{
|
||||
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
|
||||
return atomic_inc_not_zero(&req->refs);
|
||||
}
|
||||
|
||||
static inline bool req_ref_put_and_test(struct io_kiocb *req)
|
||||
{
|
||||
if (likely(!(req->flags & REQ_F_REFCOUNT)))
|
||||
return true;
|
||||
|
||||
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||
return atomic_dec_and_test(&req->refs);
|
||||
}
|
||||
|
||||
static inline void req_ref_get(struct io_kiocb *req)
|
||||
{
|
||||
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
|
||||
WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
|
||||
atomic_inc(&req->refs);
|
||||
}
|
||||
|
||||
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
|
||||
{
|
||||
if (!(req->flags & REQ_F_REFCOUNT)) {
|
||||
req->flags |= REQ_F_REFCOUNT;
|
||||
atomic_set(&req->refs, nr);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void io_req_set_refcount(struct io_kiocb *req)
|
||||
{
|
||||
__io_req_set_refcount(req, 1);
|
||||
}
|
||||
#endif
|
Loading…
Reference in New Issue