public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCHSET next 0/2] Allow MSG_WAITALL for send/sendmsg
@ 2022-04-21  1:39 Jens Axboe
  2022-04-21  1:39 ` [PATCH 1/2] io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG) Jens Axboe
  2022-04-21  1:39 ` [PATCH 2/2] io_uring: allow re-poll if we made progress Jens Axboe
  0 siblings, 2 replies; 3+ messages in thread
From: Jens Axboe @ 2022-04-21  1:39 UTC (permalink / raw)
  To: io-uring

Hi,

Just like we did for recv/recvmsg, allow MSG_WAITALL to mean that we want
to wait for all the requested data instead of doing a partial completion.

Patch 2 ensures that we can sanely use apoll multiple times, as long as
we're making progress on the IO. This is important to avoid punting to
io-wq if we can avoid it. The gate on already having polled is lifted
if we did transfer some data, which should avoid the initial worry of
why that place was in check - repeatedly non-block retrying and getting
-EAGAIN.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG)
  2022-04-21  1:39 [PATCHSET next 0/2] Allow MSG_WAITALL for send/sendmsg Jens Axboe
@ 2022-04-21  1:39 ` Jens Axboe
  2022-04-21  1:39 ` [PATCH 2/2] io_uring: allow re-poll if we made progress Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2022-04-21  1:39 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

Like commit 7ba89d2af17a for recv/recvmsg, support MSG_WAITALL for the
send side. If this flag is set and we do a short send, retry for a
stream of seqpacket socket.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 36 +++++++++++++++++++++++++++++-------
 1 file changed, 29 insertions(+), 7 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7e1d5243bbbc..f06c6fed540b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5232,6 +5232,13 @@ static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
 }
 
 #if defined(CONFIG_NET)
+static bool io_net_retry(struct socket *sock, int flags)
+{
+	if (!(flags & MSG_WAITALL))
+		return false;
+	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
+}
+
 static int io_setup_async_msg(struct io_kiocb *req,
 			      struct io_async_msghdr *kmsg)
 {
@@ -5290,12 +5297,14 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (req->ctx->compat)
 		sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
+	sr->done_io = 0;
 	return 0;
 }
 
 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_async_msghdr iomsg, *kmsg;
+	struct io_sr_msg *sr = &req->sr_msg;
 	struct socket *sock;
 	unsigned flags;
 	int min_ret = 0;
@@ -5327,12 +5336,21 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 			return io_setup_async_msg(req, kmsg);
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
+		if (ret > 0 && io_net_retry(sock, flags)) {
+			sr->done_io += ret;
+			req->flags |= REQ_F_PARTIAL_IO;
+			return io_setup_async_msg(req, kmsg);
+		}
 		req_set_fail(req);
 	}
 	/* fast path, check for non-NULL to avoid function call */
 	if (kmsg->free_iov)
 		kfree(kmsg->free_iov);
 	req->flags &= ~REQ_F_NEED_CLEANUP;
+	if (ret >= 0)
+		ret += sr->done_io;
+	else if (sr->done_io)
+		ret = sr->done_io;
 	__io_req_complete(req, issue_flags, ret, 0);
 	return 0;
 }
@@ -5373,8 +5391,19 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
 			return -EAGAIN;
 		if (ret == -ERESTARTSYS)
 			ret = -EINTR;
+		if (ret > 0 && io_net_retry(sock, flags)) {
+			sr->len -= ret;
+			sr->buf += ret;
+			sr->done_io += ret;
+			req->flags |= REQ_F_PARTIAL_IO;
+			return -EAGAIN;
+		}
 		req_set_fail(req);
 	}
+	if (ret >= 0)
+		ret += sr->done_io;
+	else if (sr->done_io)
+		ret = sr->done_io;
 	__io_req_complete(req, issue_flags, ret, 0);
 	return 0;
 }
@@ -5506,13 +5535,6 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	return 0;
 }
 
-static bool io_net_retry(struct socket *sock, int flags)
-{
-	if (!(flags & MSG_WAITALL))
-		return false;
-	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
-}
-
 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_async_msghdr iomsg, *kmsg;
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] io_uring: allow re-poll if we made progress
  2022-04-21  1:39 [PATCHSET next 0/2] Allow MSG_WAITALL for send/sendmsg Jens Axboe
  2022-04-21  1:39 ` [PATCH 1/2] io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG) Jens Axboe
@ 2022-04-21  1:39 ` Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2022-04-21  1:39 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

We currently check REQ_F_POLLED before arming async poll for a
notification to retry. If it's set, then we don't allow poll and will
punt to io-wq instead. This is done to prevent a situation where a buggy
driver will repeatedly return that there's space/data available yet we
get -EAGAIN.

However, if we already transferred data, then it should be safe to rely
on poll again. Gate the check on whether or not REQ_F_PARTIAL_IO is
also set.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index f06c6fed540b..6a4460cad9c0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6263,7 +6263,9 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
 
 	if (!def->pollin && !def->pollout)
 		return IO_APOLL_ABORTED;
-	if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED))
+	if (!file_can_poll(req->file))
+		return IO_APOLL_ABORTED;
+	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
 		return IO_APOLL_ABORTED;
 
 	if (def->pollin) {
@@ -6278,8 +6280,10 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
 	}
 	if (def->poll_exclusive)
 		mask |= EPOLLEXCLUSIVE;
-	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
-	    !list_empty(&ctx->apoll_cache)) {
+	if (req->flags & REQ_F_POLLED) {
+		apoll = req->apoll;
+	} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+		   !list_empty(&ctx->apoll_cache)) {
 		apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
 						poll.wait.entry);
 		list_del_init(&apoll->poll.wait.entry);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-04-21  1:39 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-04-21  1:39 [PATCHSET next 0/2] Allow MSG_WAITALL for send/sendmsg Jens Axboe
2022-04-21  1:39 ` [PATCH 1/2] io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG) Jens Axboe
2022-04-21  1:39 ` [PATCH 2/2] io_uring: allow re-poll if we made progress Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox