public inbox for [email protected]
 help / color / mirror / Atom feed
From: Jens Axboe <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>
Subject: [PATCH 04/15] io_uring/net: always setup an io_async_msghdr
Date: Tue, 19 Mar 2024 19:17:32 -0600	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

Rather than use an on-stack one and then need to allocate and copy if
we have to go async, always grab one upfront. This should be very
cheap, and potentially even have cache hotness benefits for back-to-back
send/recv requests.

For any recv type of request, this is probably a good choice in general,
as it's expected that no data is available initially. For send this is
not necessarily the case, as we expect space to be available. However,
getting a cached io_async_msghdr is very cheap, and as it should be
cache hot, probably the difference here is neglible, if any.

A nice side benefit is that we can kill io_setup_async_msg completely,
which has some nasty iovec manipulation code.

Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/net.c | 117 ++++++++++++++++++++-----------------------------
 1 file changed, 47 insertions(+), 70 deletions(-)

diff --git a/io_uring/net.c b/io_uring/net.c
index 2389bb1cc050..776ebfea8742 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -161,36 +161,6 @@ static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *r
 	return io_msg_alloc_async(req, 0);
 }
 
-static int io_setup_async_msg(struct io_kiocb *req,
-			      struct io_async_msghdr *kmsg,
-			      unsigned int issue_flags)
-{
-	struct io_async_msghdr *async_msg;
-
-	if (req_has_async_data(req))
-		return -EAGAIN;
-	async_msg = io_msg_alloc_async(req, issue_flags);
-	if (!async_msg) {
-		kfree(kmsg->free_iov);
-		return -ENOMEM;
-	}
-	req->flags |= REQ_F_NEED_CLEANUP;
-	memcpy(async_msg, kmsg, sizeof(*kmsg));
-	if (async_msg->msg.msg_name)
-		async_msg->msg.msg_name = &async_msg->addr;
-
-	if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
-		return -EAGAIN;
-
-	/* if were using fast_iov, set it to the new one */
-	if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
-		size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
-		async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
-	}
-
-	return -EAGAIN;
-}
-
 #ifdef CONFIG_COMPAT
 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
 				  struct io_async_msghdr *iomsg,
@@ -409,7 +379,7 @@ static void io_req_msg_cleanup(struct io_kiocb *req,
 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	struct socket *sock;
 	unsigned flags;
 	int min_ret = 0;
@@ -423,15 +393,17 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 		kmsg = req->async_data;
 		kmsg->msg.msg_control_user = sr->msg_control;
 	} else {
-		ret = io_sendmsg_copy_hdr(req, &iomsg);
+		kmsg = io_msg_alloc_async(req, issue_flags);
+		if (unlikely(!kmsg))
+			return -ENOMEM;
+		ret = io_sendmsg_copy_hdr(req, kmsg);
 		if (ret)
 			return ret;
-		kmsg = &iomsg;
 	}
 
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return io_setup_async_msg(req, kmsg, issue_flags);
+		return -EAGAIN;
 
 	flags = sr->msg_flags;
 	if (issue_flags & IO_URING_F_NONBLOCK)
@@ -443,13 +415,13 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 
 	if (ret < min_ret) {
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		if (ret > 0 && io_net_retry(sock, flags)) {
 			kmsg->msg.msg_controllen = 0;
 			kmsg->msg.msg_control = NULL;
 			sr->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
@@ -465,7 +437,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 }
 
 static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
-					     struct io_async_msghdr *stack_msg,
 					     unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -475,8 +446,9 @@ static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
 	if (req_has_async_data(req)) {
 		kmsg = req->async_data;
 	} else {
-		kmsg = stack_msg;
-		kmsg->free_iov = NULL;
+		kmsg = io_msg_alloc_async(req, issue_flags);
+		if (!kmsg)
+			return ERR_PTR(-ENOMEM);
 
 		if (sr->addr) {
 			ret = move_addr_to_kernel(sr->addr, sr->addr_len,
@@ -506,7 +478,7 @@ static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
 
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return ERR_PTR(io_setup_async_msg(req, kmsg, issue_flags));
+		return ERR_PTR(-EAGAIN);
 
 	return kmsg;
 }
@@ -514,7 +486,7 @@ static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
 int io_send(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	size_t len = sr->len;
 	struct socket *sock;
 	unsigned flags;
@@ -525,7 +497,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
 	if (unlikely(!sock))
 		return -ENOTSOCK;
 
-	kmsg = io_send_setup(req, &iomsg, issue_flags);
+	kmsg = io_send_setup(req, issue_flags);
 	if (IS_ERR(kmsg))
 		return PTR_ERR(kmsg);
 
@@ -544,12 +516,12 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
 	ret = sock_sendmsg(sock, &kmsg->msg);
 	if (ret < min_ret) {
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 
 		if (ret > 0 && io_net_retry(sock, flags)) {
 			sr->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
@@ -740,9 +712,10 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 			sr->nr_multishot_loops = 0;
 			mshot_retry_ret = IOU_REQUEUE;
 		}
-		*ret = io_setup_async_msg(req, kmsg, issue_flags);
-		if (*ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT)
+		if (issue_flags & IO_URING_F_MULTISHOT)
 			*ret = mshot_retry_ret;
+		else
+			*ret = -EAGAIN;
 		return true;
 	}
 
@@ -844,7 +817,7 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	struct socket *sock;
 	unsigned flags;
 	int ret, min_ret = 0;
@@ -858,15 +831,17 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 	if (req_has_async_data(req)) {
 		kmsg = req->async_data;
 	} else {
-		ret = io_recvmsg_copy_hdr(req, &iomsg);
+		kmsg = io_msg_alloc_async(req, issue_flags);
+		if (unlikely(!kmsg))
+			return -ENOMEM;
+		ret = io_recvmsg_copy_hdr(req, kmsg);
 		if (ret)
 			return ret;
-		kmsg = &iomsg;
 	}
 
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return io_setup_async_msg(req, kmsg, issue_flags);
+		return -EAGAIN;
 
 	flags = sr->msg_flags;
 	if (force_nonblock)
@@ -908,17 +883,16 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 
 	if (ret < min_ret) {
 		if (ret == -EAGAIN && force_nonblock) {
-			ret = io_setup_async_msg(req, kmsg, issue_flags);
-			if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
+			if (issue_flags & IO_URING_F_MULTISHOT) {
 				io_kbuf_recycle(req, issue_flags);
 				return IOU_ISSUE_SKIP_COMPLETE;
 			}
-			return ret;
+			return -EAGAIN;
 		}
 		if (ret > 0 && io_net_retry(sock, flags)) {
 			sr->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
@@ -943,7 +917,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	struct socket *sock;
 	unsigned flags;
 	int ret, min_ret = 0;
@@ -953,7 +927,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	if (req_has_async_data(req)) {
 		kmsg = req->async_data;
 	} else {
-		kmsg = &iomsg;
+		kmsg = io_msg_alloc_async(req, issue_flags);
+		if (unlikely(!kmsg))
+			return -ENOMEM;
 		kmsg->free_iov = NULL;
 		kmsg->msg.msg_name = NULL;
 		kmsg->msg.msg_namelen = 0;
@@ -973,7 +949,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return io_setup_async_msg(req, kmsg, issue_flags);
+		return -EAGAIN;
 
 	sock = sock_from_file(req->file);
 	if (unlikely(!sock))
@@ -1007,8 +983,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 	ret = sock_recvmsg(sock, &kmsg->msg, flags);
 	if (ret < min_ret) {
 		if (ret == -EAGAIN && force_nonblock) {
-			ret = io_setup_async_msg(req, kmsg, issue_flags);
-			if (ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT) {
+			if (issue_flags & IO_URING_F_MULTISHOT) {
 				io_kbuf_recycle(req, issue_flags);
 				return IOU_ISSUE_SKIP_COMPLETE;
 			}
@@ -1018,7 +993,7 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 		if (ret > 0 && io_net_retry(sock, flags)) {
 			sr->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
@@ -1214,7 +1189,7 @@ static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	struct socket *sock;
 	unsigned msg_flags;
 	int ret, min_ret = 0;
@@ -1225,7 +1200,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
 		return -EOPNOTSUPP;
 
-	kmsg = io_send_setup(req, &iomsg, issue_flags);
+	kmsg = io_send_setup(req, issue_flags);
 	if (IS_ERR(kmsg))
 		return PTR_ERR(kmsg);
 
@@ -1248,12 +1223,12 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 
 	if (unlikely(ret < min_ret)) {
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 
 		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
 			zc->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
@@ -1281,7 +1256,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr iomsg, *kmsg;
+	struct io_async_msghdr *kmsg;
 	struct socket *sock;
 	unsigned flags;
 	int ret, min_ret = 0;
@@ -1297,15 +1272,17 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 	if (req_has_async_data(req)) {
 		kmsg = req->async_data;
 	} else {
-		ret = io_sendmsg_copy_hdr(req, &iomsg);
+		kmsg = io_msg_alloc_async(req, issue_flags);
+		if (unlikely(!kmsg))
+			return -ENOMEM;
+		ret = io_sendmsg_copy_hdr(req, kmsg);
 		if (ret)
 			return ret;
-		kmsg = &iomsg;
 	}
 
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return io_setup_async_msg(req, kmsg, issue_flags);
+		return -EAGAIN;
 
 	flags = sr->msg_flags | MSG_ZEROCOPY;
 	if (issue_flags & IO_URING_F_NONBLOCK)
@@ -1319,12 +1296,12 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 
 	if (unlikely(ret < min_ret)) {
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 
 		if (ret > 0 && io_net_retry(sock, flags)) {
 			sr->done_io += ret;
 			req->flags |= REQ_F_BL_NO_RECYCLE;
-			return io_setup_async_msg(req, kmsg, issue_flags);
+			return -EAGAIN;
 		}
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
-- 
2.43.0


  parent reply	other threads:[~2024-03-20  1:23 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-20  1:17 [PATCHSET 0/15] Get rid of ->prep_async() Jens Axboe
2024-03-20  1:17 ` [PATCH 01/15] io_uring/net: switch io_send() and io_send_zc() to using io_async_msghdr Jens Axboe
2024-03-20  1:17 ` [PATCH 02/15] io_uring/net: switch io_recv() " Jens Axboe
2024-03-20  1:17 ` [PATCH 03/15] io_uring/net: unify cleanup handling Jens Axboe
2024-03-20  1:17 ` Jens Axboe [this message]
2024-03-20  1:17 ` [PATCH 05/15] io_uring/net: get rid of ->prep_async() for receive side Jens Axboe
2024-03-20  1:17 ` [PATCH 06/15] io_uring/net: get rid of ->prep_async() for send side Jens Axboe
2024-03-20  1:17 ` [PATCH 07/15] io_uring: kill io_msg_alloc_async_prep() Jens Axboe
2024-03-20  1:17 ` [PATCH 08/15] io_uring/net: add iovec recycling Jens Axboe
2024-03-20  1:17 ` [PATCH 09/15] io_uring/net: drop 'kmsg' parameter from io_req_msg_cleanup() Jens Axboe
2024-03-20  1:17 ` [PATCH 10/15] io_uring/rw: always setup io_async_rw for read/write requests Jens Axboe
2024-03-20  1:17 ` [PATCH 11/15] io_uring: get rid of struct io_rw_state Jens Axboe
2024-03-20  1:17 ` [PATCH 12/15] io_uring/rw: add iovec recycling Jens Axboe
2024-03-20  1:17 ` [PATCH 13/15] io_uring/net: move connect to always using async data Jens Axboe
2024-03-20  1:17 ` [PATCH 14/15] io_uring/uring_cmd: switch to always allocating " Jens Axboe
2024-03-20  1:17 ` [PATCH 15/15] io_uring: drop ->prep_async() Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox