From: Keith Busch <kbusch@meta.com>
To: <io-uring@vger.kernel.org>, <axboe@kernel.dk>, <csander@purestorage.com>
Cc: Keith Busch <kbusch@kernel.org>
Subject: [PATCHv5 2/4] Add support IORING_SETUP_SQE_MIXED
Date: Mon, 13 Oct 2025 11:00:09 -0700 [thread overview]
Message-ID: <20251013180011.134131-5-kbusch@meta.com> (raw)
In-Reply-To: <20251013180011.134131-1-kbusch@meta.com>
From: Keith Busch <kbusch@kernel.org>
This adds core support for mixed sized SQEs in the same SQ ring. Before
this, SQEs were either 64b in size (the normal size), or 128b if
IORING_SETUP_SQE128 was set in the ring initialization. With the mixed
support, an SQE may be either 64b or 128b on the same SQ ring. If the
SQE is 128b in size, then a 128b opcode will be set in the sqe op. When
acquiring a large sqe at the end of the sq, the client may post a NOP
SQE with IOSQE_CQE_SKIP_SUCCESS set that the kernel will process and
skip posting a CQE.
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
src/include/liburing.h | 71 +++++++++++++++++++++++++++++++--
src/include/liburing/io_uring.h | 8 ++++
2 files changed, 75 insertions(+), 4 deletions(-)
diff --git a/src/include/liburing.h b/src/include/liburing.h
index f7af20aa..d6a45cbb 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -800,6 +800,12 @@ IOURINGINLINE void io_uring_prep_nop(struct io_uring_sqe *sqe)
io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
}
+IOURINGINLINE void io_uring_prep_nop128(struct io_uring_sqe *sqe)
+ LIBURING_NOEXCEPT
+{
+ io_uring_prep_rw(IORING_OP_NOP128, sqe, -1, NULL, 0, 0);
+}
+
IOURINGINLINE void io_uring_prep_timeout(struct io_uring_sqe *sqe,
const struct __kernel_timespec *ts,
unsigned count, unsigned flags)
@@ -1517,12 +1523,13 @@ IOURINGINLINE void io_uring_prep_socket_direct_alloc(struct io_uring_sqe *sqe,
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
}
-IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
- int cmd_op,
- int fd)
+IOURINGINLINE void __io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
+ int op,
+ int cmd_op,
+ int fd)
LIBURING_NOEXCEPT
{
- sqe->opcode = (__u8) IORING_OP_URING_CMD;
+ sqe->opcode = (__u8) op;
sqe->fd = fd;
sqe->cmd_op = cmd_op;
sqe->__pad1 = 0;
@@ -1530,6 +1537,22 @@ IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
sqe->len = 0;
}
+IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
+ int cmd_op,
+ int fd)
+ LIBURING_NOEXCEPT
+{
+ __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD, cmd_op, fd);
+}
+
+IOURINGINLINE void io_uring_prep_uring_cmd128(struct io_uring_sqe *sqe,
+ int cmd_op,
+ int fd)
+ LIBURING_NOEXCEPT
+{
+ __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD128, cmd_op, fd);
+}
+
/*
* Prepare commands for sockets
*/
@@ -1894,6 +1917,46 @@ IOURINGINLINE struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
return sqe;
}
+/*
+ * Return a 128B sqe to fill. Applications must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant 128B sqe, or NULL if we're full. If the current tail is the
+ * last entry in the ring, this function will insert a nop + skip complete such
+ * that the 128b entry wraps back to the beginning of the queue for a
+ * contiguous big sq entry. It's up to the caller to use a 128b opcode in order
+ * for the kernel to know how to advance its sq head pointer.
+ */
+IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe128_mixed(struct io_uring *ring)
+ LIBURING_NOEXCEPT
+{
+ struct io_uring_sq *sq = &ring->sq;
+ unsigned head = io_uring_load_sq_head(ring), tail = sq->sqe_tail;
+ struct io_uring_sqe *sqe;
+
+ if (!(ring->flags & IORING_SETUP_SQE_MIXED))
+ return NULL;
+
+ if (((tail + 1) & sq->ring_mask) == 0) {
+ if ((tail + 2) - head >= sq->ring_entries)
+ return NULL;
+
+ sqe = _io_uring_get_sqe(ring);
+ io_uring_prep_nop(sqe);
+ sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
+ tail = sq->sqe_tail;
+ } else if ((tail + 1) - head >= sq->ring_entries) {
+ return NULL;
+ }
+
+ sqe = &sq->sqes[tail & sq->ring_mask];
+ sq->sqe_tail = tail + 2;
+ io_uring_initialize_sqe(sqe);
+
+ return sqe;
+}
+
/*
* Return the appropriate mask for a buffer ring of size 'ring_entries'
*/
diff --git a/src/include/liburing/io_uring.h b/src/include/liburing/io_uring.h
index 31396057..f2388645 100644
--- a/src/include/liburing/io_uring.h
+++ b/src/include/liburing/io_uring.h
@@ -211,6 +211,12 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_CQE_MIXED (1U << 18)
+/*
+ * Allow both 64b and 128b SQEs. If a 128b SQE is posted, it will have
+ * IOSQE_SQE_128B set in sqe->flags.
+ */
+#define IORING_SETUP_SQE_MIXED (1U << 19)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -275,6 +281,8 @@ enum io_uring_op {
IORING_OP_READV_FIXED,
IORING_OP_WRITEV_FIXED,
IORING_OP_PIPE,
+ IORING_OP_NOP128,
+ IORING_OP_URING_CMD128,
/* this goes last, obviously */
IORING_OP_LAST,
--
2.47.3
next prev parent reply other threads:[~2025-10-13 18:00 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-13 18:00 [PATCHv5 0/4] liburing: support for mix sized sqe's Keith Busch
2025-10-13 18:00 ` [PATCHv5 0/1] io_uring: mixed submission queue entries sizes Keith Busch
2025-10-13 18:00 ` [PATCHv5 1/1] io_uring: add support for IORING_SETUP_SQE_MIXED Keith Busch
2025-10-14 22:33 ` Caleb Sander Mateos
2025-10-15 2:03 ` Keith Busch
2025-10-16 18:06 ` Keith Busch
2025-10-13 18:00 ` [PATCHv5 1/4] liburing: provide uring_cmd prep function Keith Busch
2025-10-19 16:24 ` Caleb Sander Mateos
2025-10-21 16:45 ` Keith Busch
2025-10-13 18:00 ` Keith Busch [this message]
2025-10-13 18:00 ` [PATCHv5 3/4] Add nop testing for IORING_SETUP_SQE_MIXED Keith Busch
2025-10-13 18:00 ` [PATCHv5 4/4] Add mixed sqe test for uring commands Keith Busch
[not found] <20251021213329.784558-1-kbusch@meta.com>
[not found] ` <20251021213329.784558-3-kbusch@meta.com>
2025-10-21 22:08 ` [PATCHv5 2/4] Add support IORING_SETUP_SQE_MIXED Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251013180011.134131-5-kbusch@meta.com \
--to=kbusch@meta.com \
--cc=axboe@kernel.dk \
--cc=csander@purestorage.com \
--cc=io-uring@vger.kernel.org \
--cc=kbusch@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox