public inbox for io-uring@vger.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org
Cc: asml.silence@gmail.com
Subject: [RFC 07/16] io_uring: add structure keeping ring offsets
Date: Thu,  6 Nov 2025 17:01:46 +0000	[thread overview]
Message-ID: <b96ded3a1e0bda775291b9b989ee868a0ff6b9c3.1762447538.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1762447538.git.asml.silence@gmail.com>

Add struct io_scq_dim that keeps all offset / size / dimension
information about the rings, and let rings_size() initialise it. It
improves calculation locality and allows to dedup some code.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 60 ++++++++++++++++++++++++---------------------
 io_uring/io_uring.h | 12 +++++++--
 io_uring/register.c | 19 ++++++--------
 3 files changed, 49 insertions(+), 42 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 30ba60974f1d..8166ea9140f8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2757,49 +2757,61 @@ static void io_rings_free(struct io_ring_ctx *ctx)
 	ctx->sq_sqes = NULL;
 }
 
-unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
-			 unsigned int cq_entries, size_t *sq_offset)
+int rings_size(unsigned int flags, unsigned int sq_entries,
+	       unsigned int cq_entries, struct io_scq_dim *dims)
 {
 	struct io_rings *rings;
 	size_t off, sq_array_size;
+	size_t sqe_size;
+
+	dims->sq_array_offset = SIZE_MAX;
+
+	sqe_size = sizeof(struct io_uring_sqe);
+	if (flags & IORING_SETUP_SQE128)
+		sqe_size *= 2;
+
+	dims->sq_size = array_size(sqe_size, sq_entries);
+	if (dims->sq_size == SIZE_MAX)
+		return -EOVERFLOW;
 
 	off = struct_size(rings, cqes, cq_entries);
 	if (off == SIZE_MAX)
-		return SIZE_MAX;
+		return -EOVERFLOW;
 	if (flags & IORING_SETUP_CQE32) {
 		if (check_shl_overflow(off, 1, &off))
-			return SIZE_MAX;
+			return -EOVERFLOW;
 	}
 	if (flags & IORING_SETUP_CQE_MIXED) {
 		if (cq_entries < 2)
-			return SIZE_MAX;
+			return -EOVERFLOW;
 	}
 	if (flags & IORING_SETUP_SQE_MIXED) {
 		if (sq_entries < 2)
-			return SIZE_MAX;
+			return -EOVERFLOW;
 	}
 
 #ifdef CONFIG_SMP
 	off = ALIGN(off, SMP_CACHE_BYTES);
 	if (off == 0)
-		return SIZE_MAX;
+		return -EOVERFLOW;
 #endif
 
 	if (flags & IORING_SETUP_NO_SQARRAY) {
-		*sq_offset = SIZE_MAX;
-		return off;
+		dims->cq_comp_size = off;
+		return 0;
 	}
 
-	*sq_offset = off;
+	dims->sq_array_offset = off;
 
 	sq_array_size = array_size(sizeof(u32), sq_entries);
 	if (sq_array_size == SIZE_MAX)
-		return SIZE_MAX;
+		return -EOVERFLOW;
 
 	if (check_add_overflow(off, sq_array_size, &off))
-		return SIZE_MAX;
+		return -EOVERFLOW;
 
-	return off;
+	dims->cq_comp_size = off;
+	return 0;
 }
 
 static __cold void __io_req_caches_free(struct io_ring_ctx *ctx)
@@ -3354,27 +3366,19 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 {
 	struct io_uring_region_desc rd;
 	struct io_rings *rings;
-	size_t sq_array_offset;
-	size_t sq_size, cq_size, sqe_size;
+	struct io_scq_dim dims;
 	int ret;
 
 	/* make sure these are sane, as we already accounted them */
 	ctx->sq_entries = p->sq_entries;
 	ctx->cq_entries = p->cq_entries;
 
-	sqe_size = sizeof(struct io_uring_sqe);
-	if (p->flags & IORING_SETUP_SQE128)
-		sqe_size *= 2;
-	sq_size = array_size(sqe_size, p->sq_entries);
-	if (sq_size == SIZE_MAX)
-		return -EOVERFLOW;
-	cq_size = rings_size(ctx->flags, p->sq_entries, p->cq_entries,
-			  &sq_array_offset);
-	if (cq_size == SIZE_MAX)
-		return -EOVERFLOW;
+	ret = rings_size(ctx->flags, p->sq_entries, p->cq_entries, &dims);
+	if (ret)
+		return ret;
 
 	memset(&rd, 0, sizeof(rd));
-	rd.size = PAGE_ALIGN(cq_size);
+	rd.size = PAGE_ALIGN(dims.cq_comp_size);
 	if (ctx->flags & IORING_SETUP_NO_MMAP) {
 		rd.user_addr = p->cq_off.user_addr;
 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
@@ -3385,10 +3389,10 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
 	ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
 
 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
-		ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
+		ctx->sq_array = (u32 *)((char *)rings + dims.sq_array_offset);
 
 	memset(&rd, 0, sizeof(rd));
-	rd.size = PAGE_ALIGN(sq_size);
+	rd.size = PAGE_ALIGN(dims.sq_size);
 	if (ctx->flags & IORING_SETUP_NO_MMAP) {
 		rd.user_addr = p->sq_off.user_addr;
 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index c4d47ad7777c..29464be9733c 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -17,6 +17,14 @@
 #include <trace/events/io_uring.h>
 #endif
 
+struct io_scq_dim {
+	size_t sq_array_offset;
+	size_t sq_size;
+
+	/* Compound array mmap'ed together with CQ. */
+	size_t cq_comp_size;
+};
+
 struct io_ctx_config {
 	struct io_uring_params p;
 	struct io_uring_params __user *uptr;
@@ -139,8 +147,8 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
 #define IORING_MAX_ENTRIES	32768
 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
 
-unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
-			 unsigned int cq_entries, size_t *sq_offset);
+int rings_size(unsigned int flags, unsigned int sq_entries,
+	       unsigned int cq_entries, struct io_scq_dim *dims);
 int io_uring_fill_params(struct io_uring_params *p);
 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32);
 int io_run_task_work_sig(struct io_ring_ctx *ctx);
diff --git a/io_uring/register.c b/io_uring/register.c
index 0d70696468f6..85814f983dde 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -402,6 +402,7 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
 	struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
 	size_t size, sq_array_offset;
 	unsigned i, tail, old_head;
+	struct io_scq_dim dims;
 	struct io_uring_params p;
 	int ret;
 
@@ -419,11 +420,12 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
 	ret = io_uring_fill_params(&p);
 	if (unlikely(ret))
 		return ret;
+	ret = rings_size(p.flags, p.sq_entries, p.cq_entries, &dims);
+	if (ret)
+		return ret;
 
-	size = rings_size(p.flags, p.sq_entries, p.cq_entries,
-				&sq_array_offset);
-	if (size == SIZE_MAX)
-		return -EOVERFLOW;
+	size = dims.cq_comp_size;
+	sq_array_offset = dims.sq_array_offset;
 
 	memset(&rd, 0, sizeof(rd));
 	rd.size = PAGE_ALIGN(size);
@@ -455,14 +457,7 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
 		return -EFAULT;
 	}
 
-	if (p.flags & IORING_SETUP_SQE128)
-		size = array_size(2 * sizeof(struct io_uring_sqe), p.sq_entries);
-	else
-		size = array_size(sizeof(struct io_uring_sqe), p.sq_entries);
-	if (size == SIZE_MAX) {
-		io_register_free_rings(ctx, &n);
-		return -EOVERFLOW;
-	}
+	size = dims.sq_size;
 
 	memset(&rd, 0, sizeof(rd));
 	rd.size = PAGE_ALIGN(size);
-- 
2.49.0


  parent reply	other threads:[~2025-11-06 17:02 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-11-06 17:01 [RFC 00/16] Introduce ring flexible placement Pavel Begunkov
2025-11-06 17:01 ` [RFC 01/16] io_uring: add helper calculating region byte size Pavel Begunkov
2025-11-06 17:01 ` [RFC 02/16] io_uring: pass sq entires in the params struct Pavel Begunkov
2025-11-06 17:01 ` [RFC 03/16] io_uring: use mem_is_zero to check ring params Pavel Begunkov
2025-11-06 17:01 ` [RFC 04/16] io_uring: move flags check to io_uring_sanitise_params Pavel Begunkov
2025-11-06 17:01 ` [RFC 05/16] io_uring: introduce struct io_ctx_config Pavel Begunkov
2025-11-06 17:01 ` [RFC 06/16] io_uring: split out config init helper Pavel Begunkov
2025-11-06 17:01 ` Pavel Begunkov [this message]
2025-11-06 17:01 ` [RFC 08/16] io_uring: pre-calculate scq offsets Pavel Begunkov
2025-11-06 17:01 ` [RFC 09/16] io_uring: inroduce helper for setting user offset Pavel Begunkov
2025-11-06 17:01 ` [RFC 10/16] io_uring: separate cqe array from headers Pavel Begunkov
2025-11-06 17:01 ` [RFC 11/16] io_uring/region: introduce io_region_slice Pavel Begunkov
2025-11-06 17:01 ` [RFC 12/16] io_uring: convert pointer init to io_region_slice Pavel Begunkov
2025-11-06 17:01 ` [RFC 13/16] io_uring: refactor rings_size() Pavel Begunkov
2025-11-06 17:01 ` [RFC 14/16] io_uring: extract io_create_mem_region Pavel Begunkov
2025-11-06 17:01 ` [RFC 15/16] io_uring: allow creating mem region at setup Pavel Begunkov
2025-11-06 17:01 ` [RFC 16/16] io_uring: introduce SCQ placement Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b96ded3a1e0bda775291b9b989ee868a0ff6b9c3.1762447538.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox