From: Stefan Roesch <[email protected]>
To: <[email protected]>, <[email protected]>,
<[email protected]>
Cc: <[email protected]>, <[email protected]>, Jens Axboe <[email protected]>
Subject: [PATCH v3 08/12] io_uring: overflow processing for CQE32
Date: Mon, 25 Apr 2022 11:25:26 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
This adds the overflow processing for large CQE's.
This adds two parameters to the io_cqring_event_overflow function and
uses these fields to initialize the large CQE fields.
Allocate enough space for large CQE's in the overflow structue. If no
large CQE's are used, the size of the allocation is unchanged.
The cqe field can have a different size depending if its a large
CQE or not. To be able to allocate different sizes, the two fields
in the structure are re-ordered.
Co-developed-by: Jens Axboe <[email protected]>
Signed-off-by: Stefan Roesch <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 31 ++++++++++++++++++++++---------
1 file changed, 22 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 68b61d2b356d..3630671325ea 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -220,8 +220,8 @@ struct io_mapped_ubuf {
struct io_ring_ctx;
struct io_overflow_cqe {
- struct io_uring_cqe cqe;
struct list_head list;
+ struct io_uring_cqe cqe;
};
struct io_fixed_file {
@@ -2017,10 +2017,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
bool all_flushed, posted;
+ size_t cqe_size = sizeof(struct io_uring_cqe);
if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
return false;
+ if (ctx->flags & IORING_SETUP_CQE32)
+ cqe_size <<= 1;
+
posted = false;
spin_lock(&ctx->completion_lock);
while (!list_empty(&ctx->cq_overflow_list)) {
@@ -2032,7 +2036,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
if (cqe)
- memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
+ memcpy(cqe, &ocqe->cqe, cqe_size);
else
io_account_cq_overflow(ctx);
@@ -2121,11 +2125,16 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
}
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
+ s32 res, u32 cflags, u64 extra1, u64 extra2)
{
struct io_overflow_cqe *ocqe;
+ size_t ocq_size = sizeof(struct io_overflow_cqe);
+ bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
+
+ if (is_cqe32)
+ ocq_size += sizeof(struct io_uring_cqe);
- ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
+ ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
if (!ocqe) {
/*
* If we're in ring overflow flush mode, or in task cancel mode,
@@ -2144,6 +2153,10 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
ocqe->cqe.user_data = user_data;
ocqe->cqe.res = res;
ocqe->cqe.flags = cflags;
+ if (is_cqe32) {
+ ocqe->cqe.big_cqe[0] = extra1;
+ ocqe->cqe.big_cqe[1] = extra2;
+ }
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
return true;
}
@@ -2165,7 +2178,7 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
WRITE_ONCE(cqe->flags, cflags);
return true;
}
- return io_cqring_event_overflow(ctx, user_data, res, cflags);
+ return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
@@ -2187,7 +2200,7 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
return true;
}
return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags);
+ req->cqe.res, req->cqe.flags, 0, 0);
}
static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
@@ -2213,8 +2226,8 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx,
return true;
}
- return io_cqring_event_overflow(ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags);
+ return io_cqring_event_overflow(ctx, req->cqe.user_data, req->cqe.res,
+ req->cqe.flags, extra1, extra2);
}
static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
@@ -2251,7 +2264,7 @@ static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags
return;
}
- io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags);
+ io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2);
}
static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
--
2.30.2
next prev parent reply other threads:[~2022-04-25 18:26 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20220425182557epcas5p2e1b72edf0fcc4c21b2b96a32910a2736@epcas5p2.samsung.com>
2022-04-25 18:25 ` [PATCH v3 00/12] add large CQE support for io-uring Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 01/12] io_uring: support CQE32 in io_uring_cqe Stefan Roesch
2022-04-26 5:22 ` Kanchan Joshi
2022-04-25 18:25 ` [PATCH v3 02/12] io_uring: wire up inline completion path for CQE32 Stefan Roesch
2022-04-26 5:45 ` Kanchan Joshi
2022-04-25 18:25 ` [PATCH v3 03/12] io_uring: change ring size calculation " Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 04/12] io_uring: add CQE32 setup processing Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 05/12] io_uring: add CQE32 completion processing Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 06/12] io_uring: modify io_get_cqe for CQE32 Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 07/12] io_uring: flush completions " Stefan Roesch
2022-04-25 18:25 ` Stefan Roesch [this message]
2022-04-26 6:28 ` [PATCH v3 08/12] io_uring: overflow processing " Kanchan Joshi
2022-04-26 12:53 ` Jens Axboe
2022-04-25 18:25 ` [PATCH v3 09/12] io_uring: add tracing for additional CQE32 fields Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 10/12] io_uring: support CQE32 in /proc info Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 11/12] io_uring: enable CQE32 Stefan Roesch
2022-04-25 18:25 ` [PATCH v3 12/12] io_uring: support CQE32 for nop operation Stefan Roesch
2022-04-25 18:38 ` [PATCH v3 00/12] add large CQE support for io-uring Jens Axboe
2022-04-26 11:37 ` Kanchan Joshi
2022-04-26 12:54 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox