From: Keith Busch <kbusch@meta.com>
To: <io-uring@vger.kernel.org>, <axboe@kernel.dk>, <csander@purestorage.com>
Cc: Keith Busch <kbusch@kernel.org>
Subject: [PATCHv6 2/6] Add support IORING_SETUP_SQE_MIXED
Date: Wed, 22 Oct 2025 10:19:20 -0700 [thread overview]
Message-ID: <20251022171924.2326863-3-kbusch@meta.com> (raw)
In-Reply-To: <20251022171924.2326863-1-kbusch@meta.com>
From: Keith Busch <kbusch@kernel.org>
This adds core support for mixed sized SQEs in the same SQ ring. Before
this, SQEs were either 64b in size (the normal size), or 128b if
IORING_SETUP_SQE128 was set in the ring initialization. With the mixed
support, an SQE may be either 64b or 128b on the same SQ ring. If the
SQE is 128b in size, then a 128b opcode will be set in the sqe op. When
acquiring a large sqe at the end of the sq, the client may post a NOP
SQE with IOSQE_CQE_SKIP_SUCCESS set that the kernel will process and
skip posting a CQE.
Signed-off-by: Keith Busch <kbusch@kernel.org>
---
src/include/liburing.h | 73 +++++++++++++++++++++++++++++++--
src/include/liburing/io_uring.h | 8 ++++
src/sanitize.c | 4 +-
3 files changed, 80 insertions(+), 5 deletions(-)
diff --git a/src/include/liburing.h b/src/include/liburing.h
index 757c3057..83819eb7 100644
--- a/src/include/liburing.h
+++ b/src/include/liburing.h
@@ -800,6 +800,12 @@ IOURINGINLINE void io_uring_prep_nop(struct io_uring_sqe *sqe)
io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
}
+IOURINGINLINE void io_uring_prep_nop128(struct io_uring_sqe *sqe)
+ LIBURING_NOEXCEPT
+{
+ io_uring_prep_rw(IORING_OP_NOP128, sqe, -1, NULL, 0, 0);
+}
+
IOURINGINLINE void io_uring_prep_timeout(struct io_uring_sqe *sqe,
const struct __kernel_timespec *ts,
unsigned count, unsigned flags)
@@ -1517,12 +1523,13 @@ IOURINGINLINE void io_uring_prep_socket_direct_alloc(struct io_uring_sqe *sqe,
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
}
-IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
- __u32 cmd_op,
- int fd)
+IOURINGINLINE void __io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
+ int op,
+ __u32 cmd_op,
+ int fd)
LIBURING_NOEXCEPT
{
- sqe->opcode = IORING_OP_URING_CMD;
+ sqe->opcode = (__u8) op;
sqe->fd = fd;
sqe->cmd_op = cmd_op;
sqe->__pad1 = 0;
@@ -1530,6 +1537,22 @@ IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
sqe->len = 0;
}
+IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
+ int cmd_op,
+ int fd)
+ LIBURING_NOEXCEPT
+{
+ __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD, cmd_op, fd);
+}
+
+IOURINGINLINE void io_uring_prep_uring_cmd128(struct io_uring_sqe *sqe,
+ int cmd_op,
+ int fd)
+ LIBURING_NOEXCEPT
+{
+ __io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD128, cmd_op, fd);
+}
+
/*
* Prepare commands for sockets
*/
@@ -2007,6 +2030,48 @@ IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
#endif
+
+/*
+ * Return a 128B sqe to fill. Applications must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant 128B sqe, or NULL if we're full. If the current tail is the
+ * last entry in the ring, this function will insert a nop + skip complete such
+ * that the 128b entry wraps back to the beginning of the queue for a
+ * contiguous big sq entry. It's up to the caller to use a 128b opcode in order
+ * for the kernel to know how to advance its sq head pointer.
+ */
+IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe128(struct io_uring *ring)
+ LIBURING_NOEXCEPT
+{
+ struct io_uring_sq *sq = &ring->sq;
+ unsigned head = io_uring_load_sq_head(ring), tail = sq->sqe_tail;
+ struct io_uring_sqe *sqe;
+
+ if (ring->flags & IORING_SETUP_SQE128)
+ return io_uring_get_sqe(ring);
+ if (!(ring->flags & IORING_SETUP_SQE_MIXED))
+ return NULL;
+
+ if (((tail + 1) & sq->ring_mask) == 0) {
+ if ((tail + 2) - head >= sq->ring_entries)
+ return NULL;
+
+ sqe = _io_uring_get_sqe(ring);
+ io_uring_prep_nop(sqe);
+ sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
+ tail = sq->sqe_tail;
+ } else if ((tail + 1) - head >= sq->ring_entries) {
+ return NULL;
+ }
+
+ sqe = &sq->sqes[tail & sq->ring_mask];
+ sq->sqe_tail = tail + 2;
+ io_uring_initialize_sqe(sqe);
+ return sqe;
+}
+
ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
LIBURING_NOEXCEPT;
ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p)
diff --git a/src/include/liburing/io_uring.h b/src/include/liburing/io_uring.h
index 31396057..44ce8229 100644
--- a/src/include/liburing/io_uring.h
+++ b/src/include/liburing/io_uring.h
@@ -211,6 +211,12 @@ enum io_uring_sqe_flags_bit {
*/
#define IORING_SETUP_CQE_MIXED (1U << 18)
+/*
+ * Allow both 64b and 128b SQEs. If a 128b SQE is posted, it will use a 128b
+ * opcode.
+ */
+#define IORING_SETUP_SQE_MIXED (1U << 19)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -275,6 +281,8 @@ enum io_uring_op {
IORING_OP_READV_FIXED,
IORING_OP_WRITEV_FIXED,
IORING_OP_PIPE,
+ IORING_OP_NOP128,
+ IORING_OP_URING_CMD128,
/* this goes last, obviously */
IORING_OP_LAST,
diff --git a/src/sanitize.c b/src/sanitize.c
index 383b7d64..6d8465a5 100644
--- a/src/sanitize.c
+++ b/src/sanitize.c
@@ -120,7 +120,9 @@ static inline void initialize_sanitize_handlers()
sanitize_handlers[IORING_OP_READV_FIXED] = sanitize_sqe_addr;
sanitize_handlers[IORING_OP_WRITEV_FIXED] = sanitize_sqe_addr;
sanitize_handlers[IORING_OP_PIPE] = sanitize_sqe_addr;
- _Static_assert(IORING_OP_PIPE + 1 == IORING_OP_LAST, "Need an implementation for all IORING_OP_* codes");
+ sanitize_handlers[IORING_OP_NOP128] = sanitize_sqe_nop;
+ sanitize_handlers[IORING_OP_URING_CMD128] = sanitize_sqe_optval;
+ _Static_assert(IORING_OP_URING_CMD128 + 1 == IORING_OP_LAST, "Need an implementation for all IORING_OP_* codes");
sanitize_handlers_initialized = true;
}
--
2.47.3
next prev parent reply other threads:[~2025-10-22 17:21 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-22 17:19 [PATCHv6 0/6] liburing: support for mixed sqes Keith Busch
2025-10-22 17:19 ` [PATCHv6 1/6] liburing: provide uring_cmd prep function Keith Busch
2025-10-22 17:19 ` Keith Busch [this message]
2025-10-22 17:35 ` [PATCHv6 2/6] Add support IORING_SETUP_SQE_MIXED Jens Axboe
2025-10-22 17:19 ` [PATCHv6 3/6] test: add nop testing for IORING_SETUP_SQE_MIXED Keith Busch
2025-10-22 17:19 ` [PATCHv6 4/6] test: add mixed sqe test for uring commands Keith Busch
2025-10-22 17:19 ` [PATCHv6 5/6] test/fdinfo: flush sq prior to reading Keith Busch
2025-10-22 17:19 ` [PATCHv6 6/6] test/fdinfo: add mixed sqe option to fdinfo test Keith Busch
2025-10-22 17:40 ` [PATCHv6 0/6] liburing: support for mixed sqes Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251022171924.2326863-3-kbusch@meta.com \
--to=kbusch@meta.com \
--cc=axboe@kernel.dk \
--cc=csander@purestorage.com \
--cc=io-uring@vger.kernel.org \
--cc=kbusch@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox