* [PATCH 2/3] io_uring: refactor DEFER_TASKRUN multishot checks
2024-03-08 13:55 [PATCH 0/3] net mshot fixes and improvements Pavel Begunkov
2024-03-08 13:55 ` [PATCH 1/3] io_uring: fix mshot io-wq checks Pavel Begunkov
@ 2024-03-08 13:55 ` Pavel Begunkov
2024-03-08 13:55 ` [PATCH 3/3] io_uring/net: dedup io_recv_finish req completion Pavel Begunkov
2024-03-08 15:47 ` [PATCH 0/3] net mshot fixes and improvements Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2024-03-08 13:55 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
We disallow DEFER_TASKRUN multishots from running by io-wq, which is
checked by individual opcodes in the issue path. We can consolidate all
it in io_wq_submit_work() at the same time moving the checks out of the
hot path.
Suggested-by: Jens Axboe <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 20 ++++++++++++++++++++
io_uring/net.c | 21 ---------------------
io_uring/rw.c | 2 --
3 files changed, 20 insertions(+), 23 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cf2f514b7cc0..cf348c33f485 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -944,6 +944,8 @@ bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags)
u64 user_data = req->cqe.user_data;
struct io_uring_cqe *cqe;
+ lockdep_assert(!io_wq_current_is_worker());
+
if (!defer)
return __io_post_aux_cqe(ctx, user_data, res, cflags, false);
@@ -1968,6 +1970,24 @@ void io_wq_submit_work(struct io_wq_work *work)
goto fail;
}
+ /*
+ * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
+ * submitter task context. Final request completions are handed to the
+ * right context, however this is not the case of auxiliary CQEs,
+ * which is the main mean of operation for multishot requests.
+ * Don't allow any multishot execution from io-wq. It's more restrictive
+ * than necessary and also cleaner.
+ */
+ if (req->flags & REQ_F_APOLL_MULTISHOT) {
+ err = -EBADFD;
+ if (!io_file_can_poll(req))
+ goto fail;
+ err = -ECANCELED;
+ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
+ goto fail;
+ return;
+ }
+
if (req->flags & REQ_F_FORCE_ASYNC) {
bool opcode_poll = def->pollin || def->pollout;
diff --git a/io_uring/net.c b/io_uring/net.c
index d4ab4bdaf845..14d6bae60747 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -78,19 +78,6 @@ struct io_sr_msg {
*/
#define MULTISHOT_MAX_RETRY 32
-static inline bool io_check_multishot(struct io_kiocb *req,
- unsigned int issue_flags)
-{
- /*
- * When ->locked_cq is set we only allow to post CQEs from the original
- * task context. Usual request completions will be handled in other
- * generic paths but multipoll may decide to post extra cqes.
- */
- return !(issue_flags & IO_URING_F_IOWQ) ||
- !(req->flags & REQ_F_APOLL_MULTISHOT) ||
- !req->ctx->task_complete;
-}
-
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -853,9 +840,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
- if (!io_check_multishot(req, issue_flags))
- return io_setup_async_msg(req, kmsg, issue_flags);
-
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
@@ -951,9 +935,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
- if (!io_check_multishot(req, issue_flags))
- return -EAGAIN;
-
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -1403,8 +1384,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
- if (!io_check_multishot(req, issue_flags))
- return -EAGAIN;
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 7733449271f2..6f465b6b5dde 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -933,8 +933,6 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!io_file_can_poll(req))
return -EBADFD;
- if (issue_flags & IO_URING_F_IOWQ)
- return -EAGAIN;
ret = __io_read(req, issue_flags);
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/3] io_uring/net: dedup io_recv_finish req completion
2024-03-08 13:55 [PATCH 0/3] net mshot fixes and improvements Pavel Begunkov
2024-03-08 13:55 ` [PATCH 1/3] io_uring: fix mshot io-wq checks Pavel Begunkov
2024-03-08 13:55 ` [PATCH 2/3] io_uring: refactor DEFER_TASKRUN multishot checks Pavel Begunkov
@ 2024-03-08 13:55 ` Pavel Begunkov
2024-03-08 15:47 ` [PATCH 0/3] net mshot fixes and improvements Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Pavel Begunkov @ 2024-03-08 13:55 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
There are two block in io_recv_finish() completing the request, which we
can combine and remove jumping.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/net.c | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 14d6bae60747..96808f429b7a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -685,20 +685,12 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (msg->msg_inq && msg->msg_inq != -1)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- io_req_set_res(req, *ret, cflags);
- *ret = IOU_OK;
- return true;
- }
-
- if (mshot_finished)
- goto finish;
-
/*
* Fill CQE for this receive and see if we should keep trying to
* receive from this socket.
*/
- if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
+ if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
+ io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
*ret, cflags | IORING_CQE_F_MORE)) {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
@@ -718,8 +710,8 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
*ret = -EAGAIN;
return true;
}
- /* Otherwise stop multishot but use the current result. */
-finish:
+
+ /* Finish the request / stop multishot. */
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
--
2.43.0
^ permalink raw reply related [flat|nested] 5+ messages in thread