* [PATCH 1/2] io_uring: add generic callback_head helpers
2021-03-15 14:23 [PATCH 5.12 0/2] fix sqpoll cancellation hangs Pavel Begunkov
@ 2021-03-15 14:23 ` Pavel Begunkov
2021-03-15 14:23 ` [PATCH 2/2] io_uring: fix sqpoll cancellation via task_work Pavel Begunkov
1 sibling, 0 replies; 3+ messages in thread
From: Pavel Begunkov @ 2021-03-15 14:23 UTC (permalink / raw)
To: Jens Axboe, io-uring
We already have helpers to run/add callback_head but taking ctx and
working with ctx->exit_task_work. Extract generic versions of them
implemented in terms of struct callback_head, it will be used later.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 62 ++++++++++++++++++++++++++++++---------------------
1 file changed, 36 insertions(+), 26 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9fb4bc5f063b..f396063b4798 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1926,17 +1926,44 @@ static int io_req_task_work_add(struct io_kiocb *req)
return ret;
}
-static void io_req_task_work_add_fallback(struct io_kiocb *req,
- task_work_func_t cb)
+static bool io_run_task_work_head(struct callback_head **work_head)
+{
+ struct callback_head *work, *next;
+ bool executed = false;
+
+ do {
+ work = xchg(work_head, NULL);
+ if (!work)
+ break;
+
+ do {
+ next = work->next;
+ work->func(work);
+ work = next;
+ cond_resched();
+ } while (work);
+ executed = true;
+ } while (1);
+
+ return executed;
+}
+
+static void io_task_work_add_head(struct callback_head **work_head,
+ struct callback_head *task_work)
{
- struct io_ring_ctx *ctx = req->ctx;
struct callback_head *head;
- init_task_work(&req->task_work, cb);
do {
- head = READ_ONCE(ctx->exit_task_work);
- req->task_work.next = head;
- } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);
+ head = READ_ONCE(*work_head);
+ task_work->next = head;
+ } while (cmpxchg(work_head, head, task_work) != head);
+}
+
+static void io_req_task_work_add_fallback(struct io_kiocb *req,
+ task_work_func_t cb)
+{
+ init_task_work(&req->task_work, cb);
+ io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
}
static void __io_req_task_cancel(struct io_kiocb *req, int error)
@@ -8468,26 +8495,9 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
return -EINVAL;
}
-static bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
+static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
{
- struct callback_head *work, *next;
- bool executed = false;
-
- do {
- work = xchg(&ctx->exit_task_work, NULL);
- if (!work)
- break;
-
- do {
- next = work->next;
- work->func(work);
- work = next;
- cond_resched();
- } while (work);
- executed = true;
- } while (1);
-
- return executed;
+ return io_run_task_work_head(&ctx->exit_task_work);
}
struct io_tctx_exit {
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] io_uring: fix sqpoll cancellation via task_work
2021-03-15 14:23 [PATCH 5.12 0/2] fix sqpoll cancellation hangs Pavel Begunkov
2021-03-15 14:23 ` [PATCH 1/2] io_uring: add generic callback_head helpers Pavel Begunkov
@ 2021-03-15 14:23 ` Pavel Begunkov
1 sibling, 0 replies; 3+ messages in thread
From: Pavel Begunkov @ 2021-03-15 14:23 UTC (permalink / raw)
To: Jens Axboe, io-uring
Running sqpoll cancellations via task_work_run() is a bad idea because
it depends on other task works to be run, but those may be locked in
currently running task_work_run() because of how it's (splicing the list
in batches).
Enqueue and run them through a separate callback head, namely
struct io_sq_data::park_task_work. As a nice bonus we now precisely
control where it's run, that's much safer than guessing where it can
happen as it was before.
Reported-by: Jens Axboe <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f396063b4798..481b2ea85a50 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -274,6 +274,7 @@ struct io_sq_data {
unsigned long state;
struct completion exited;
+ struct callback_head *park_task_work;
};
#define IO_IOPOLL_BATCH 8
@@ -6724,6 +6725,7 @@ static int io_sq_thread(void *data)
cond_resched();
mutex_lock(&sqd->lock);
io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
continue;
}
@@ -6778,6 +6780,7 @@ static int io_sq_thread(void *data)
}
finish_wait(&sqd->wait, &wait);
+ io_run_task_work_head(&sqd->park_task_work);
timeout = jiffies + sqd->sq_thread_idle;
}
@@ -6789,6 +6792,7 @@ static int io_sq_thread(void *data)
mutex_unlock(&sqd->lock);
io_run_task_work();
+ io_run_task_work_head(&sqd->park_task_work);
complete(&sqd->exited);
do_exit(0);
}
@@ -8886,7 +8890,7 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
if (task) {
init_completion(&work.completion);
init_task_work(&work.task_work, io_sqpoll_cancel_cb);
- WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL));
+ io_task_work_add_head(&sqd->park_task_work, &work.task_work);
wake_up_process(task);
}
io_sq_thread_unpark(sqd);
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread