* [PATCH 1/1] io_uring: actively cancel poll/timeouts on exit
@ 2020-12-20 19:13 Pavel Begunkov
2020-12-20 19:15 ` Pavel Begunkov
0 siblings, 1 reply; 3+ messages in thread
From: Pavel Begunkov @ 2020-12-20 19:13 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: stable, Josef, Dmitry Kadashev
If io_ring_ctx_wait_and_kill() haven't killed all requests on the first
attempt, new timeouts or requests enqueued for polling may appear. They
won't be ever cancelled by io_ring_exit_work() unless we specifically
handle that case. That hangs of the exit work locking up grabbed by
io_uring resources.
Cc: <[email protected]> # 5.5+
Cc: Josef <[email protected]>
Reported-by: Dmitry Kadashev <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 55 +++++++++++++++++++++++++++------------------------
1 file changed, 29 insertions(+), 26 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fbf747803dbc..c1acc668fe96 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8620,6 +8620,32 @@ static int io_remove_personalities(int id, void *p, void *data)
return 0;
}
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct task_struct *task,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_task(de->req, task, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_ring_exit_work(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
@@ -8633,6 +8659,8 @@ static void io_ring_exit_work(struct work_struct *work)
*/
do {
io_iopoll_try_reap_events(ctx);
+ io_poll_remove_all(ctx, NULL, NULL);
+ io_kill_timeouts(ctx, NULL, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx);
}
@@ -8654,6 +8682,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock);
+ io_cancel_defer_files(ctx, NULL, NULL);
io_kill_timeouts(ctx, NULL, NULL);
io_poll_remove_all(ctx, NULL, NULL);
@@ -8716,32 +8745,6 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
return ret;
}
-static void io_cancel_defer_files(struct io_ring_ctx *ctx,
- struct task_struct *task,
- struct files_struct *files)
-{
- struct io_defer_entry *de = NULL;
- LIST_HEAD(list);
-
- spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_task(de->req, task, files)) {
- list_cut_position(&list, &ctx->defer_list, &de->list);
- break;
- }
- }
- spin_unlock_irq(&ctx->completion_lock);
-
- while (!list_empty(&list)) {
- de = list_first_entry(&list, struct io_defer_entry, list);
- list_del_init(&de->list);
- req_set_fail_links(de->req);
- io_put_req(de->req);
- io_req_complete(de->req, -ECANCELED);
- kfree(de);
- }
-}
-
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH 1/1] io_uring: actively cancel poll/timeouts on exit
2020-12-20 19:13 [PATCH 1/1] io_uring: actively cancel poll/timeouts on exit Pavel Begunkov
@ 2020-12-20 19:15 ` Pavel Begunkov
2020-12-21 8:31 ` Josef
0 siblings, 1 reply; 3+ messages in thread
From: Pavel Begunkov @ 2020-12-20 19:15 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: stable, Josef, Dmitry Kadashev
On 20/12/2020 19:13, Pavel Begunkov wrote:
> If io_ring_ctx_wait_and_kill() haven't killed all requests on the first
> attempt, new timeouts or requests enqueued for polling may appear. They
> won't be ever cancelled by io_ring_exit_work() unless we specifically
> handle that case. That hangs of the exit work locking up grabbed by
> io_uring resources.
Josef and Dmitry, it would be great to have your Tested-by: <>
> Cc: <[email protected]> # 5.5+
> Cc: Josef <[email protected]>
> Reported-by: Dmitry Kadashev <[email protected]>
> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
> fs/io_uring.c | 55 +++++++++++++++++++++++++++------------------------
> 1 file changed, 29 insertions(+), 26 deletions(-)
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fbf747803dbc..c1acc668fe96 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -8620,6 +8620,32 @@ static int io_remove_personalities(int id, void *p, void *data)
> return 0;
> }
>
> +static void io_cancel_defer_files(struct io_ring_ctx *ctx,
> + struct task_struct *task,
> + struct files_struct *files)
> +{
> + struct io_defer_entry *de = NULL;
> + LIST_HEAD(list);
> +
> + spin_lock_irq(&ctx->completion_lock);
> + list_for_each_entry_reverse(de, &ctx->defer_list, list) {
> + if (io_match_task(de->req, task, files)) {
> + list_cut_position(&list, &ctx->defer_list, &de->list);
> + break;
> + }
> + }
> + spin_unlock_irq(&ctx->completion_lock);
> +
> + while (!list_empty(&list)) {
> + de = list_first_entry(&list, struct io_defer_entry, list);
> + list_del_init(&de->list);
> + req_set_fail_links(de->req);
> + io_put_req(de->req);
> + io_req_complete(de->req, -ECANCELED);
> + kfree(de);
> + }
> +}
> +
> static void io_ring_exit_work(struct work_struct *work)
> {
> struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
> @@ -8633,6 +8659,8 @@ static void io_ring_exit_work(struct work_struct *work)
> */
> do {
> io_iopoll_try_reap_events(ctx);
> + io_poll_remove_all(ctx, NULL, NULL);
> + io_kill_timeouts(ctx, NULL, NULL);
> } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
> io_ring_ctx_free(ctx);
> }
> @@ -8654,6 +8682,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
> io_cqring_overflow_flush(ctx, true, NULL, NULL);
> mutex_unlock(&ctx->uring_lock);
>
> + io_cancel_defer_files(ctx, NULL, NULL);
> io_kill_timeouts(ctx, NULL, NULL);
> io_poll_remove_all(ctx, NULL, NULL);
>
> @@ -8716,32 +8745,6 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
> return ret;
> }
>
> -static void io_cancel_defer_files(struct io_ring_ctx *ctx,
> - struct task_struct *task,
> - struct files_struct *files)
> -{
> - struct io_defer_entry *de = NULL;
> - LIST_HEAD(list);
> -
> - spin_lock_irq(&ctx->completion_lock);
> - list_for_each_entry_reverse(de, &ctx->defer_list, list) {
> - if (io_match_task(de->req, task, files)) {
> - list_cut_position(&list, &ctx->defer_list, &de->list);
> - break;
> - }
> - }
> - spin_unlock_irq(&ctx->completion_lock);
> -
> - while (!list_empty(&list)) {
> - de = list_first_entry(&list, struct io_defer_entry, list);
> - list_del_init(&de->list);
> - req_set_fail_links(de->req);
> - io_put_req(de->req);
> - io_req_complete(de->req, -ECANCELED);
> - kfree(de);
> - }
> -}
> -
> static void io_uring_cancel_files(struct io_ring_ctx *ctx,
> struct task_struct *task,
> struct files_struct *files)
>
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/1] io_uring: actively cancel poll/timeouts on exit
2020-12-20 19:15 ` Pavel Begunkov
@ 2020-12-21 8:31 ` Josef
0 siblings, 0 replies; 3+ messages in thread
From: Josef @ 2020-12-21 8:31 UTC (permalink / raw)
To: Pavel Begunkov; +Cc: Jens Axboe, io-uring, stable, Dmitry Kadashev
> Josef and Dmitry, it would be great to have your Tested-by: <>
my bad, it doesn't work, it's related to this thread
https://lore.kernel.org/io-uring/[email protected]/T/#me3b958c51320f999384d7a05a958237b29146486
--
Josef
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-12-21 8:32 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-12-20 19:13 [PATCH 1/1] io_uring: actively cancel poll/timeouts on exit Pavel Begunkov
2020-12-20 19:15 ` Pavel Begunkov
2020-12-21 8:31 ` Josef
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox