diff options
author | Stefan Roesch <shr@fb.com> | 2022-02-14 10:04:29 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-03-10 06:32:49 -0700 |
commit | d5ec1dfaf59bf1632d7f2114d209bf80bfbd907a (patch) | |
tree | 88fbf13f64c79ad7dccf197516bc4d86bfb8b065 /fs/io_uring.c | |
parent | 86127bb18aea7e553cfd0842bcd33a6dc80bfbc8 (diff) | |
download | linux-d5ec1dfaf59bf1632d7f2114d209bf80bfbd907a.tar.gz |
io-uring: add __fill_cqe function
This introduces the __fill_cqe function. This is necessary
to correctly issue the io_uring_complete tracepoint.
Signed-off-by: Stefan Roesch <shr@fb.com>
Link: https://lore.kernel.org/r/20220214180430.70572-2-shr@fb.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 1dce3f6e7031..b09c40353415 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1914,13 +1914,11 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, return true; } -static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, +static inline bool __fill_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { struct io_uring_cqe *cqe; - trace_io_uring_complete(ctx, user_data, res, cflags); - /* * If we can't get a cq entry, userspace overflowed the * submission (by quite a lot). Increment the overflow count in @@ -1936,17 +1934,24 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, return io_cqring_event_overflow(ctx, user_data, res, cflags); } +static inline bool __io_fill_cqe(struct io_kiocb *req, s32 res, u32 cflags) +{ + trace_io_uring_complete(req->ctx, req->user_data, res, cflags); + return __fill_cqe(req->ctx, req->user_data, res, cflags); +} + static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) { if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe(req->ctx, req->user_data, res, cflags); + __io_fill_cqe(req, res, cflags); } static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { ctx->cq_extra++; - return __io_fill_cqe(ctx, user_data, res, cflags); + trace_io_uring_complete(ctx, user_data, res, cflags); + return __fill_cqe(ctx, user_data, res, cflags); } static void __io_req_complete_post(struct io_kiocb *req, s32 res, @@ -1955,7 +1960,7 @@ static void __io_req_complete_post(struct io_kiocb *req, s32 res, struct io_ring_ctx *ctx = req->ctx; if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe(ctx, req->user_data, res, cflags); + __io_fill_cqe(req, res, cflags); /* * If we're the last reference to this request, add to our locked * free_list cache. @@ -2544,8 +2549,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) comp_list); if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe(ctx, req->user_data, req->result, - req->cflags); + __io_fill_cqe(req, req->result, req->cflags); } io_commit_cqring(ctx); @@ -2667,7 +2671,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (unlikely(req->flags & REQ_F_CQE_SKIP)) continue; - __io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req)); + __io_fill_cqe(req, req->result, io_put_kbuf(req)); nr_events++; } |