* [PATCH 1/3] io_uring/zctx: rename flags var for more clarity
2026-02-16 11:45 [PATCH 0/3] deduplicate send and senmsg zc issue handlers Pavel Begunkov
@ 2026-02-16 11:45 ` Pavel Begunkov
2026-02-16 11:45 ` [PATCH 2/3] io_uring/zctx: move vec regbuf import into io_send_zc_import Pavel Begunkov
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2026-02-16 11:45 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence, axboe, Dylan Yudaken
The name "flags" is too overloaded, so rename the variable in
io_sendmsg_zc() into msg_flags to stress that it contains MSG_*.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
io_uring/net.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 8576c6cb2236..5f7f02e2c034 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1524,7 +1524,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
- unsigned flags;
+ unsigned msg_flags;
int ret, min_ret = 0;
if (req->flags & REQ_F_IMPORT_BUFFER) {
@@ -1550,21 +1550,21 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
- flags = sr->msg_flags;
+ msg_flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
+ msg_flags |= MSG_DONTWAIT;
+ if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
- ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+ ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
- if (ret > 0 && io_net_retry(sock, flags)) {
+ if (ret > 0 && io_net_retry(sock, msg_flags)) {
sr->done_io += ret;
return -EAGAIN;
}
--
2.52.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH 2/3] io_uring/zctx: move vec regbuf import into io_send_zc_import
2026-02-16 11:45 [PATCH 0/3] deduplicate send and senmsg zc issue handlers Pavel Begunkov
2026-02-16 11:45 ` [PATCH 1/3] io_uring/zctx: rename flags var for more clarity Pavel Begunkov
@ 2026-02-16 11:45 ` Pavel Begunkov
2026-02-16 11:45 ` [PATCH 3/3] io_uring/zctx: unify zerocopy issue variants Pavel Begunkov
2026-02-16 16:35 ` [PATCH 0/3] deduplicate send and senmsg zc issue handlers Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2026-02-16 11:45 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence, axboe, Dylan Yudaken
Unify send and sendmsg zerocopy paths for importing registered buffers
and make io_send_zc_import() responsible for that. It's a preparation
patch making the next change simpler.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
io_uring/net.c | 41 +++++++++++++++++++++++++----------------
1 file changed, 25 insertions(+), 16 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 5f7f02e2c034..88962b18965e 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1439,17 +1439,34 @@ static int io_sg_from_iter(struct sk_buff *skb,
return ret;
}
-static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
+static int io_send_zc_import(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg,
+ unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg = req->async_data;
+ struct io_kiocb *notif = sr->notif;
+ int ret;
WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
- sr->notif->buf_index = req->buf_index;
- return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
- (u64)(uintptr_t)sr->buf, sr->len,
- ITER_SOURCE, issue_flags);
+ notif->buf_index = req->buf_index;
+
+ if (req->opcode == IORING_OP_SEND_ZC) {
+ ret = io_import_reg_buf(notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
+ } else {
+ unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
+
+ ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter,
+ notif, &kmsg->vec, uvec_segs,
+ issue_flags);
+ }
+
+ if (unlikely(ret))
+ return ret;
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
+ return 0;
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
@@ -1471,8 +1488,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
if (req->flags & REQ_F_IMPORT_BUFFER) {
- req->flags &= ~REQ_F_IMPORT_BUFFER;
- ret = io_send_zc_import(req, issue_flags);
+ ret = io_send_zc_import(req, kmsg, issue_flags);
if (unlikely(ret))
return ret;
}
@@ -1528,16 +1544,9 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
int ret, min_ret = 0;
if (req->flags & REQ_F_IMPORT_BUFFER) {
- unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
- int ret;
-
- sr->notif->buf_index = req->buf_index;
- ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter,
- sr->notif, &kmsg->vec, uvec_segs,
- issue_flags);
+ ret = io_send_zc_import(req, kmsg, issue_flags);
if (unlikely(ret))
return ret;
- req->flags &= ~REQ_F_IMPORT_BUFFER;
}
sock = sock_from_file(req->file);
--
2.52.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* [PATCH 3/3] io_uring/zctx: unify zerocopy issue variants
2026-02-16 11:45 [PATCH 0/3] deduplicate send and senmsg zc issue handlers Pavel Begunkov
2026-02-16 11:45 ` [PATCH 1/3] io_uring/zctx: rename flags var for more clarity Pavel Begunkov
2026-02-16 11:45 ` [PATCH 2/3] io_uring/zctx: move vec regbuf import into io_send_zc_import Pavel Begunkov
@ 2026-02-16 11:45 ` Pavel Begunkov
2026-02-16 16:35 ` [PATCH 0/3] deduplicate send and senmsg zc issue handlers Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2026-02-16 11:45 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence, axboe, Dylan Yudaken
io_send_zc and io_sendmsg_zc started different but now the only real
difference between them is how registered buffers are imported and
which net helper we use. Avoid duplication and combine them into a
single function.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
io_uring/net.c | 86 ++++++++----------------------------------------
io_uring/net.h | 1 -
io_uring/opdef.c | 2 +-
3 files changed, 14 insertions(+), 75 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 88962b18965e..7ebfd51b84de 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1469,9 +1469,9 @@ static int io_send_zc_import(struct io_kiocb *req,
return 0;
}
-int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned msg_flags;
@@ -1482,9 +1482,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return -ENOTSOCK;
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
-
if (!(req->flags & REQ_F_POLLED) &&
- (zc->flags & IORING_RECVSEND_POLL_FIRST))
+ (sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
if (req->flags & REQ_F_IMPORT_BUFFER) {
@@ -1493,87 +1492,28 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
- msg_flags = zc->msg_flags;
- if (issue_flags & IO_URING_F_NONBLOCK)
- msg_flags |= MSG_DONTWAIT;
- if (msg_flags & MSG_WAITALL)
- min_ret = iov_iter_count(&kmsg->msg.msg_iter);
- msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
-
- kmsg->msg.msg_flags = msg_flags;
- kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
- ret = sock_sendmsg(sock, &kmsg->msg);
-
- if (unlikely(ret < min_ret)) {
- if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return -EAGAIN;
-
- if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
- zc->done_io += ret;
- return -EAGAIN;
- }
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- req_set_fail(req);
- }
-
- if (ret >= 0)
- ret += zc->done_io;
- else if (zc->done_io)
- ret = zc->done_io;
-
- /*
- * If we're in io-wq we can't rely on tw ordering guarantees, defer
- * flushing notif to io_send_zc_cleanup()
- */
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- io_notif_flush(zc->notif);
- zc->notif = NULL;
- io_req_msg_cleanup(req, 0);
- }
- io_req_set_res(req, ret, IORING_CQE_F_MORE);
- return IOU_COMPLETE;
-}
-
-int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg = req->async_data;
- struct socket *sock;
- unsigned msg_flags;
- int ret, min_ret = 0;
-
- if (req->flags & REQ_F_IMPORT_BUFFER) {
- ret = io_send_zc_import(req, kmsg, issue_flags);
- if (unlikely(ret))
- return ret;
- }
-
- sock = sock_from_file(req->file);
- if (unlikely(!sock))
- return -ENOTSOCK;
- if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
- return -EOPNOTSUPP;
-
- if (!(req->flags & REQ_F_POLLED) &&
- (sr->flags & IORING_RECVSEND_POLL_FIRST))
- return -EAGAIN;
-
msg_flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
- kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
- ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
+
+ if (req->opcode == IORING_OP_SEND_ZC) {
+ msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ kmsg->msg.msg_flags = msg_flags;
+ ret = sock_sendmsg(sock, &kmsg->msg);
+ } else {
+ kmsg->msg.msg_control_user = sr->msg_control;
+ ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
+ }
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return -EAGAIN;
- if (ret > 0 && io_net_retry(sock, msg_flags)) {
+ if (ret > 0 && io_net_retry(sock, sr->msg_flags)) {
sr->done_io += ret;
return -EAGAIN;
}
diff --git a/io_uring/net.h b/io_uring/net.h
index a862960a3bb9..d4d1ddce50e3 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -50,7 +50,6 @@ void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req)
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
-int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req);
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 91a23baf415e..645980fa4651 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -437,7 +437,7 @@ const struct io_issue_def io_issue_defs[] = {
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep,
- .issue = io_send_zc,
+ .issue = io_sendmsg_zc,
#else
.prep = io_eopnotsupp_prep,
#endif
--
2.52.0
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH 0/3] deduplicate send and senmsg zc issue handlers
2026-02-16 11:45 [PATCH 0/3] deduplicate send and senmsg zc issue handlers Pavel Begunkov
` (2 preceding siblings ...)
2026-02-16 11:45 ` [PATCH 3/3] io_uring/zctx: unify zerocopy issue variants Pavel Begunkov
@ 2026-02-16 16:35 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2026-02-16 16:35 UTC (permalink / raw)
To: Pavel Begunkov, io-uring; +Cc: Dylan Yudaken
On 2/16/26 4:45 AM, Pavel Begunkov wrote:
> There is a bunch of code duplicated between send_zc and senmsg_zc,
> let's consolidate the functions.
>
> Note: it's based on top of Dylan's patch removing buf/len accounting.
>
> Pavel Begunkov (3):
> io_uring/zctx: rename flags var for more clarity
> io_uring/zctx: move vec regbuf import into io_send_zc_import
> io_uring/zctx: unify zerocopy issue variants
>
> io_uring/net.c | 125 ++++++++++++++---------------------------------
> io_uring/net.h | 1 -
> io_uring/opdef.c | 2 +-
> 3 files changed, 38 insertions(+), 90 deletions(-)
Nice cleanup, looks good to me. I'll get this in once -rc1 is tagged
and a 7.1 branch can get kicked off.
--
Jens Axboe
^ permalink raw reply [flat|nested] 5+ messages in thread