public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCHSET 0/4] Limit multishot receive retries
@ 2024-01-29 20:23 Jens Axboe
  2024-01-29 20:23 ` [PATCH 1/4] io_uring/poll: move poll execution helpers higher up Jens Axboe
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Jens Axboe @ 2024-01-29 20:23 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence

Hi,

If we have multiple receive multishots pending and one/several/all of
the clients are flooding us with traffic, then we can retry each
multishot receive many times as we keep having data available. Some
quick testing here with 8 clients over a 10gbit link, I saw one client
do more than 32K retries. This causes an imbalance in how we serve
traffic like that, with the result being that each client will see
different throughput.

1-2 are just prep patches, no functional changes. Patch 3 doesn't do
anything by itself, but it enables the fix that is in patch 4. That
patch limits retries to 32, which should be enough to not cause any
extra overhead, while still allowing other clients to be processed
fairly.

 io_uring/io_uring.h |  8 +++++++-
 io_uring/net.c      | 49 ++++++++++++++++++++++++++++++++-------------
 io_uring/poll.c     | 49 ++++++++++++++++++++++++++-------------------
 3 files changed, 70 insertions(+), 36 deletions(-)

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/4] io_uring/poll: move poll execution helpers higher up
  2024-01-29 20:23 [PATCHSET 0/4] Limit multishot receive retries Jens Axboe
@ 2024-01-29 20:23 ` Jens Axboe
  2024-01-29 20:23 ` [PATCH 2/4] io_uring/net: un-indent mshot retry path in io_recv_finish() Jens Axboe
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2024-01-29 20:23 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe

In preparation for calling __io_poll_execute() higher up, move the
functions to avoid forward declarations.

No functional changes in this patch.

Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/poll.c | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/io_uring/poll.c b/io_uring/poll.c
index d59b74a99d4e..785a5b191003 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -228,6 +228,26 @@ enum {
 	IOU_POLL_REISSUE = 3,
 };
 
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+	unsigned flags = 0;
+
+	io_req_set_res(req, mask, 0);
+	req->io_task_work.func = io_poll_task_func;
+
+	trace_io_uring_task_add(req, mask);
+
+	if (!(req->flags & REQ_F_POLL_NO_LAZY))
+		flags = IOU_F_TWQ_LAZY_WAKE;
+	__io_req_task_work_add(req, flags);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+	if (io_poll_get_ownership(req))
+		__io_poll_execute(req, res);
+}
+
 /*
  * All poll tw should go through this. Checks for poll events, manages
  * references, does rewait, etc.
@@ -364,26 +384,6 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
 	}
 }
 
-static void __io_poll_execute(struct io_kiocb *req, int mask)
-{
-	unsigned flags = 0;
-
-	io_req_set_res(req, mask, 0);
-	req->io_task_work.func = io_poll_task_func;
-
-	trace_io_uring_task_add(req, mask);
-
-	if (!(req->flags & REQ_F_POLL_NO_LAZY))
-		flags = IOU_F_TWQ_LAZY_WAKE;
-	__io_req_task_work_add(req, flags);
-}
-
-static inline void io_poll_execute(struct io_kiocb *req, int res)
-{
-	if (io_poll_get_ownership(req))
-		__io_poll_execute(req, res);
-}
-
 static void io_poll_cancel_req(struct io_kiocb *req)
 {
 	io_poll_mark_cancelled(req);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/4] io_uring/net: un-indent mshot retry path in io_recv_finish()
  2024-01-29 20:23 [PATCHSET 0/4] Limit multishot receive retries Jens Axboe
  2024-01-29 20:23 ` [PATCH 1/4] io_uring/poll: move poll execution helpers higher up Jens Axboe
@ 2024-01-29 20:23 ` Jens Axboe
  2024-01-29 20:23 ` [PATCH 3/4] io_uring/poll: add requeue return code from poll multishot handling Jens Axboe
  2024-01-29 20:23 ` [PATCH 4/4] io_uring/net: limit inline multishot retries Jens Axboe
  3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2024-01-29 20:23 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe

In preparation for putting some retry logic in there, have the done
path just skip straight to the end rather than have too much nesting
in here.

No functional changes in this patch.

Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/net.c | 36 ++++++++++++++++++++----------------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/io_uring/net.c b/io_uring/net.c
index 75d494dad7e2..740c6bfa5b59 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -645,23 +645,27 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 		return true;
 	}
 
-	if (!mshot_finished) {
-		if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
-					*ret, cflags | IORING_CQE_F_MORE)) {
-			io_recv_prep_retry(req);
-			/* Known not-empty or unknown state, retry */
-			if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
-			    msg->msg_inq == -1)
-				return false;
-			if (issue_flags & IO_URING_F_MULTISHOT)
-				*ret = IOU_ISSUE_SKIP_COMPLETE;
-			else
-				*ret = -EAGAIN;
-			return true;
-		}
-		/* Otherwise stop multishot but use the current result. */
-	}
+	if (mshot_finished)
+		goto finish;
 
+	/*
+	 * Fill CQE for this receive and see if we should keep trying to
+	 * receive from this socket.
+	 */
+	if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+				*ret, cflags | IORING_CQE_F_MORE)) {
+		io_recv_prep_retry(req);
+		/* Known not-empty or unknown state, retry */
+		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
+			return false;
+		if (issue_flags & IO_URING_F_MULTISHOT)
+			*ret = IOU_ISSUE_SKIP_COMPLETE;
+		else
+			*ret = -EAGAIN;
+		return true;
+	}
+	/* Otherwise stop multishot but use the current result. */
+finish:
 	io_req_set_res(req, *ret, cflags);
 
 	if (issue_flags & IO_URING_F_MULTISHOT)
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/4] io_uring/poll: add requeue return code from poll multishot handling
  2024-01-29 20:23 [PATCHSET 0/4] Limit multishot receive retries Jens Axboe
  2024-01-29 20:23 ` [PATCH 1/4] io_uring/poll: move poll execution helpers higher up Jens Axboe
  2024-01-29 20:23 ` [PATCH 2/4] io_uring/net: un-indent mshot retry path in io_recv_finish() Jens Axboe
@ 2024-01-29 20:23 ` Jens Axboe
  2024-01-29 20:23 ` [PATCH 4/4] io_uring/net: limit inline multishot retries Jens Axboe
  3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2024-01-29 20:23 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe

Since our poll handling is edge triggered, multishot handlers retry
internally until they know that no more data is available. In
preparation for limiting these retries, add an internal return code,
IOU_REQUEUE, which can be used to inform the poll backend about the
handler wanting to retry, but that this should happen through a normal
task_work requeue rather than keep hammering on the issue side for this
one request.

No functional changes in this patch, nobody is using this return code
just yet.

Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/io_uring.h | 8 +++++++-
 io_uring/poll.c     | 9 ++++++++-
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 04e33f25919c..d5495710c178 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -15,11 +15,17 @@
 #include <trace/events/io_uring.h>
 #endif
 
-
 enum {
 	IOU_OK			= 0,
 	IOU_ISSUE_SKIP_COMPLETE	= -EIOCBQUEUED,
 
+	/*
+	 * Requeue the task_work to restart operations on this request. The
+	 * actual value isn't important, should just be not an otherwise
+	 * valid error code, yet less than -MAX_ERRNO and valid internally.
+	 */
+	IOU_REQUEUE		= -3072,
+
 	/*
 	 * Intended only when both IO_URING_F_MULTISHOT is passed
 	 * to indicate to the poll runner that multishot should be
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 785a5b191003..7513afc7b702 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -226,6 +226,7 @@ enum {
 	IOU_POLL_NO_ACTION = 1,
 	IOU_POLL_REMOVE_POLL_USE_RES = 2,
 	IOU_POLL_REISSUE = 3,
+	IOU_POLL_REQUEUE = 4,
 };
 
 static void __io_poll_execute(struct io_kiocb *req, int mask)
@@ -329,6 +330,8 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
 			int ret = io_poll_issue(req, ts);
 			if (ret == IOU_STOP_MULTISHOT)
 				return IOU_POLL_REMOVE_POLL_USE_RES;
+			else if (ret == IOU_REQUEUE)
+				return IOU_POLL_REQUEUE;
 			if (ret < 0)
 				return ret;
 		}
@@ -351,8 +354,12 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
 	int ret;
 
 	ret = io_poll_check_events(req, ts);
-	if (ret == IOU_POLL_NO_ACTION)
+	if (ret == IOU_POLL_NO_ACTION) {
 		return;
+	} else if (ret == IOU_POLL_REQUEUE) {
+		__io_poll_execute(req, 0);
+		return;
+	}
 	io_poll_remove_entries(req);
 	io_poll_tw_hash_eject(req, ts);
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/4] io_uring/net: limit inline multishot retries
  2024-01-29 20:23 [PATCHSET 0/4] Limit multishot receive retries Jens Axboe
                   ` (2 preceding siblings ...)
  2024-01-29 20:23 ` [PATCH 3/4] io_uring/poll: add requeue return code from poll multishot handling Jens Axboe
@ 2024-01-29 20:23 ` Jens Axboe
  3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2024-01-29 20:23 UTC (permalink / raw)
  To: io-uring; +Cc: asml.silence, Jens Axboe, stable

If we have multiple clients and some/all are flooding the receives to
such an extent that we can retry a LOT handling multishot receives, then
we can be starving some clients and hence serving traffic in an
imbalanced fashion.

Limit multishot retry attempts to some arbitrary value, whose only
purpose serves to ensure that we don't keep serving a single connection
for way too long. We default to 32 retries, which should be more than
enough to provide fairness, yet not so small that we'll spend too much
time requeuing rather than handling traffic.

Cc: [email protected]
Depends-on: 704ea888d646 ("io_uring/poll: add requeue return code from poll multishot handling")
Depends-on: 1e5d765a82f ("io_uring/net: un-indent mshot retry path in io_recv_finish()")
Depends-on: e84b01a880f6 ("io_uring/poll: move poll execution helpers higher up")
Fixes: b3fdea6ecb55 ("io_uring: multishot recv")
Fixes: 9bb66906f23e ("io_uring: support multishot in recvmsg")
Link: https://github.com/axboe/liburing/issues/1043
Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/net.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/io_uring/net.c b/io_uring/net.c
index 740c6bfa5b59..a12ff69e6843 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -60,6 +60,7 @@ struct io_sr_msg {
 	unsigned			len;
 	unsigned			done_io;
 	unsigned			msg_flags;
+	unsigned			nr_multishot_loops;
 	u16				flags;
 	/* initialised and used only by !msg send variants */
 	u16				addr_len;
@@ -70,6 +71,13 @@ struct io_sr_msg {
 	struct io_kiocb 		*notif;
 };
 
+/*
+ * Number of times we'll try and do receives if there's more data. If we
+ * exceed this limit, then add us to the back of the queue and retry from
+ * there. This helps fairness between flooding clients.
+ */
+#define MULTISHOT_MAX_RETRY	32
+
 static inline bool io_check_multishot(struct io_kiocb *req,
 				      unsigned int issue_flags)
 {
@@ -611,6 +619,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
 	sr->done_io = 0;
+	sr->nr_multishot_loops = 0;
 	return 0;
 }
 
@@ -654,12 +663,20 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
 	 */
 	if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
 				*ret, cflags | IORING_CQE_F_MORE)) {
+		struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
+
 		io_recv_prep_retry(req);
 		/* Known not-empty or unknown state, retry */
-		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1)
-			return false;
+		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
+			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
+				return false;
+			/* mshot retries exceeded, force a requeue */
+			sr->nr_multishot_loops = 0;
+			mshot_retry_ret = IOU_REQUEUE;
+		}
 		if (issue_flags & IO_URING_F_MULTISHOT)
-			*ret = IOU_ISSUE_SKIP_COMPLETE;
+			*ret = mshot_retry_ret;
 		else
 			*ret = -EAGAIN;
 		return true;
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-01-29 20:30 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-01-29 20:23 [PATCHSET 0/4] Limit multishot receive retries Jens Axboe
2024-01-29 20:23 ` [PATCH 1/4] io_uring/poll: move poll execution helpers higher up Jens Axboe
2024-01-29 20:23 ` [PATCH 2/4] io_uring/net: un-indent mshot retry path in io_recv_finish() Jens Axboe
2024-01-29 20:23 ` [PATCH 3/4] io_uring/poll: add requeue return code from poll multishot handling Jens Axboe
2024-01-29 20:23 ` [PATCH 4/4] io_uring/net: limit inline multishot retries Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox