public inbox for [email protected]
 help / color / mirror / Atom feed
From: Jens Axboe <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>
Subject: [PATCH 6/6] io_uring: make __tctx_task_work_run() take an io_wq_work_list
Date: Fri, 22 Nov 2024 09:12:44 -0700	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

The normal task_work logic doesn't really need it, as it always runs
all of the pending work. But for SQPOLL, it can now pass in its
retry_list which simplifies the tracking of split up task_work running.

This avoids passing io_wq_work_node around. Rather than pass in a list,
SQPOLL could re-add the leftover items to the generic task_work list.
But that requires re-locking the task_lock and using task_list for that,
whereas having a separate retry list allows for skipping those steps.
The downside is that now two lists need checking, but that's now it
was before as well.

Signed-off-by: Jens Axboe <[email protected]>
---
 io_uring/io_uring.c | 36 ++++++++++++++++--------------------
 io_uring/io_uring.h |  9 +++++----
 io_uring/sqpoll.c   | 20 +++++++++++---------
 3 files changed, 32 insertions(+), 33 deletions(-)

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index bc520a67fc03..5e52d8db3dca 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1044,20 +1044,20 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
 /*
  * Run queued task_work, returning the number of entries processed in *count.
  * If more entries than max_entries are available, stop processing once this
- * is reached and return the rest of the list.
+ * is reached.
  */
-struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
-					  unsigned int *count,
-					  unsigned int max_entries)
+void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count,
+		       unsigned int max_entries)
 {
 	struct io_ring_ctx *ctx = NULL;
 	struct io_tw_state ts = { };
 
 	do {
-		struct io_wq_work_node *next = node->next;
+		struct io_wq_work_node *node = list->first;
 		struct io_kiocb *req = container_of(node, struct io_kiocb,
 						    io_task_work.node);
 
+		list->first = node->next;
 		if (req->ctx != ctx) {
 			ctx_flush_and_put(ctx, &ts);
 			ctx = req->ctx;
@@ -1067,17 +1067,15 @@ struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
 		INDIRECT_CALL_2(req->io_task_work.func,
 				io_poll_task_func, io_req_rw_complete,
 				req, &ts);
-		node = next;
 		(*count)++;
 		if (unlikely(need_resched())) {
 			ctx_flush_and_put(ctx, &ts);
 			ctx = NULL;
 			cond_resched();
 		}
-	} while (node && *count < max_entries);
+	} while (list->first && *count < max_entries);
 
 	ctx_flush_and_put(ctx, &ts);
-	return node;
 }
 
 static __cold void __io_fallback_schedule(struct io_ring_ctx *ctx,
@@ -1137,41 +1135,39 @@ static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
 	__io_fallback_tw(&tctx->task_list, &tctx->task_lock, sync);
 }
 
-struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
-					     unsigned int max_entries,
-					     unsigned int *count)
+void __tctx_task_work_run(struct io_uring_task *tctx,
+			  struct io_wq_work_list *list,
+			  unsigned int max_entries, unsigned int *count)
 {
-	struct io_wq_work_node *node;
-
 	if (unlikely(current->flags & PF_EXITING)) {
 		io_fallback_tw(tctx, true);
-		return NULL;
+		return;
 	}
 
 	if (!READ_ONCE(tctx->task_list.first))
-		return NULL;
+		return;
 
 	spin_lock_irq(&tctx->task_lock);
-	node = tctx->task_list.first;
+	*list = tctx->task_list;
 	INIT_WQ_LIST(&tctx->task_list);
 	spin_unlock_irq(&tctx->task_lock);
 
-	if (node)
-		node = io_handle_tw_list(node, count, max_entries);
+	if (!wq_list_empty(list))
+		io_handle_tw_list(list, count, max_entries);
 
 	/* relaxed read is enough as only the task itself sets ->in_cancel */
 	if (unlikely(atomic_read(&tctx->in_cancel)))
 		io_uring_drop_tctx_refs(current);
 
 	trace_io_uring_task_work_run(tctx, *count);
-	return node;
 }
 
 unsigned int tctx_task_work_run(struct io_uring_task *tctx)
 {
+	struct io_wq_work_list list;
 	unsigned int count = 0;
 
-	__tctx_task_work_run(tctx, UINT_MAX, &count);
+	__tctx_task_work_run(tctx, &list, UINT_MAX, &count);
 	return count;
 }
 
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 2b0e7c5db30d..74b1468aefda 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -91,10 +91,11 @@ void io_req_task_queue(struct io_kiocb *req);
 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
-struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node,
-	unsigned int *count, unsigned int max_entries);
-struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx,
-	unsigned int max_entries, unsigned int *count);
+void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count,
+	unsigned int max_entries);
+void __tctx_task_work_run(struct io_uring_task *tctx,
+	struct io_wq_work_list *list, unsigned int max_entries,
+	unsigned int *count);
 unsigned int tctx_task_work_run(struct io_uring_task *tctx);
 void tctx_task_work(struct callback_head *cb);
 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index aec6c2d56910..3cd50369db5a 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -221,29 +221,29 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
  * than we were asked to process. Newly queued task_work isn't run until the
  * retry list has been fully processed.
  */
-static unsigned int io_sq_tw(struct io_wq_work_node **retry_list, int max_entries)
+static unsigned int io_sq_tw(struct io_wq_work_list *retry_list, int max_entries)
 {
 	struct io_uring_task *tctx = current->io_uring;
 	unsigned int count = 0;
 
-	if (*retry_list) {
-		*retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
+	if (!wq_list_empty(retry_list)) {
+		io_handle_tw_list(retry_list, &count, max_entries);
 		if (count >= max_entries)
 			goto out;
 		max_entries -= count;
 	}
-	*retry_list = __tctx_task_work_run(tctx, max_entries, &count);
+	__tctx_task_work_run(tctx, retry_list, max_entries, &count);
 out:
 	if (task_work_pending(current))
 		task_work_run();
 	return count;
 }
 
-static bool io_sq_tw_pending(struct io_wq_work_node *retry_list)
+static bool io_sq_tw_pending(struct io_wq_work_list *retry_list)
 {
 	struct io_uring_task *tctx = current->io_uring;
 
-	return retry_list || READ_ONCE(tctx->task_list.first);
+	return !wq_list_empty(retry_list) || !wq_list_empty(&tctx->task_list);
 }
 
 static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
@@ -259,7 +259,7 @@ static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
 
 static int io_sq_thread(void *data)
 {
-	struct io_wq_work_node *retry_list = NULL;
+	struct io_wq_work_list retry_list;
 	struct io_sq_data *sqd = data;
 	struct io_ring_ctx *ctx;
 	struct rusage start;
@@ -292,6 +292,7 @@ static int io_sq_thread(void *data)
 	audit_uring_entry(IORING_OP_NOP);
 	audit_uring_exit(true, 0);
 
+	INIT_WQ_LIST(&retry_list);
 	mutex_lock(&sqd->lock);
 	while (1) {
 		bool cap_entries, sqt_spin = false;
@@ -332,7 +333,8 @@ static int io_sq_thread(void *data)
 		}
 
 		prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
-		if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
+		if (!io_sqd_events_pending(sqd) &&
+		    !io_sq_tw_pending(&retry_list)) {
 			bool needs_sched = true;
 
 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -371,7 +373,7 @@ static int io_sq_thread(void *data)
 		timeout = jiffies + sqd->sq_thread_idle;
 	}
 
-	if (retry_list)
+	if (!wq_list_empty(&retry_list))
 		io_sq_tw(&retry_list, UINT_MAX);
 
 	io_uring_cancel_generic(true, sqd);
-- 
2.45.2


      parent reply	other threads:[~2024-11-22 16:16 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-22 16:12 [PATCHSET for-next 0/6] task work cleanups Jens Axboe
2024-11-22 16:12 ` [PATCH 1/6] io_uring: make task_work pending check dependent on ring type Jens Axboe
2024-11-22 16:12 ` [PATCH 2/6] io_uring: replace defer task_work llist with io_wq_work_list Jens Axboe
2024-11-22 17:07   ` Pavel Begunkov
2024-11-22 17:11     ` Jens Axboe
2024-11-22 17:25       ` Pavel Begunkov
2024-11-22 17:44         ` Jens Axboe
2024-11-23  0:36           ` Pavel Begunkov
2024-11-22 16:12 ` [PATCH 3/6] io_uring/slist: add list-to-list list splice helper Jens Axboe
2024-11-22 16:12 ` [PATCH 4/6] io_uring: switch non-defer task_work to io_wq_work_list Jens Axboe
2024-11-22 16:12 ` [PATCH 5/6] io_uring: add __tctx_task_work_run() helper Jens Axboe
2024-11-22 16:12 ` Jens Axboe [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox