* [PATCH v2] io-wq: forcefully cancel on io-wq destroy
@ 2021-03-31 23:18 Pavel Begunkov
2021-04-01 1:17 ` Jens Axboe
0 siblings, 1 reply; 3+ messages in thread
From: Pavel Begunkov @ 2021-03-31 23:18 UTC (permalink / raw)
To: Jens Axboe, io-uring
[ 491.222908] INFO: task thread-exit:2490 blocked for more than 122 seconds.
[ 491.222957] Call Trace:
[ 491.222967] __schedule+0x36b/0x950
[ 491.222985] schedule+0x68/0xe0
[ 491.222994] schedule_timeout+0x209/0x2a0
[ 491.223003] ? tlb_flush_mmu+0x28/0x140
[ 491.223013] wait_for_completion+0x8b/0xf0
[ 491.223023] io_wq_destroy_manager+0x24/0x60
[ 491.223037] io_wq_put_and_exit+0x18/0x30
[ 491.223045] io_uring_clean_tctx+0x76/0xa0
[ 491.223061] __io_uring_files_cancel+0x1b9/0x2e0
[ 491.223068] ? blk_finish_plug+0x26/0x40
[ 491.223085] do_exit+0xc0/0xb40
[ 491.223099] ? syscall_trace_enter.isra.0+0x1a1/0x1e0
[ 491.223109] __x64_sys_exit+0x1b/0x20
[ 491.223117] do_syscall_64+0x38/0x50
[ 491.223131] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 491.223177] INFO: task iou-mgr-2490:2491 blocked for more than 122 seconds.
[ 491.223194] Call Trace:
[ 491.223198] __schedule+0x36b/0x950
[ 491.223206] ? pick_next_task_fair+0xcf/0x3e0
[ 491.223218] schedule+0x68/0xe0
[ 491.223225] schedule_timeout+0x209/0x2a0
[ 491.223236] wait_for_completion+0x8b/0xf0
[ 491.223246] io_wq_manager+0xf1/0x1d0
[ 491.223255] ? recalc_sigpending+0x1c/0x60
[ 491.223265] ? io_wq_cpu_online+0x40/0x40
[ 491.223272] ret_from_fork+0x22/0x30
When io-wq worker exits and sees IO_WQ_BIT_EXIT it tries not cancel all
left requests but to execute them, hence we may wait for the exiting
task for long until someone pushes it, e.g. with SIGKILL. Actively
cancel pending work items on io-wq destruction.
note: io_run_cancel() moved up without any changes.
Signed-off-by: Pavel Begunkov <[email protected]>
---
v2: fix broken last minute change
fs/io-wq.c | 50 +++++++++++++++++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 15 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 7434eb40ca8c..45771bc06651 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -342,6 +342,20 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
spin_unlock(&wq->hash->wait.lock);
}
+static struct io_wq_work *io_get_work_all(struct io_wqe *wqe)
+ __must_hold(wqe->lock)
+{
+ struct io_wq_work_list *list = &wqe->work_list;
+ struct io_wq_work_node *node = list->first;
+ int i;
+
+ list->first = list->last = NULL;
+ for (i = 0; i < IO_WQ_NR_HASH_BUCKETS; i++)
+ wqe->hash_tail[i] = NULL;
+
+ return node ? container_of(node, struct io_wq_work, list) : NULL;
+}
+
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
__must_hold(wqe->lock)
{
@@ -410,6 +424,17 @@ static void io_assign_current_work(struct io_worker *worker,
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
+static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
+{
+ struct io_wq *wq = wqe->wq;
+
+ do {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ wq->do_work(work);
+ work = wq->free_work(work);
+ } while (work);
+}
+
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
@@ -518,11 +543,17 @@ static int io_wqe_worker(void *data)
}
if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+ struct io_wq_work *work, *next;
+
raw_spin_lock_irq(&wqe->lock);
- if (!wq_list_empty(&wqe->work_list))
- io_worker_handle_work(worker);
- else
- raw_spin_unlock_irq(&wqe->lock);
+ work = io_get_work_all(wqe);
+ raw_spin_unlock_irq(&wqe->lock);
+
+ while (work) {
+ next = wq_next_work(work);
+ io_run_cancel(work, wqe);
+ work = next;
+ }
}
io_worker_exit(worker);
@@ -748,17 +779,6 @@ static int io_wq_manager(void *data)
do_exit(0);
}
-static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
-{
- struct io_wq *wq = wqe->wq;
-
- do {
- work->flags |= IO_WQ_WORK_CANCEL;
- wq->do_work(work);
- work = wq->free_work(work);
- } while (work);
-}
-
static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
{
unsigned int hash;
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2] io-wq: forcefully cancel on io-wq destroy
2021-03-31 23:18 [PATCH v2] io-wq: forcefully cancel on io-wq destroy Pavel Begunkov
@ 2021-04-01 1:17 ` Jens Axboe
2021-04-01 10:25 ` Pavel Begunkov
0 siblings, 1 reply; 3+ messages in thread
From: Jens Axboe @ 2021-04-01 1:17 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 3/31/21 5:18 PM, Pavel Begunkov wrote:
> [ 491.222908] INFO: task thread-exit:2490 blocked for more than 122 seconds.
> [ 491.222957] Call Trace:
> [ 491.222967] __schedule+0x36b/0x950
> [ 491.222985] schedule+0x68/0xe0
> [ 491.222994] schedule_timeout+0x209/0x2a0
> [ 491.223003] ? tlb_flush_mmu+0x28/0x140
> [ 491.223013] wait_for_completion+0x8b/0xf0
> [ 491.223023] io_wq_destroy_manager+0x24/0x60
> [ 491.223037] io_wq_put_and_exit+0x18/0x30
> [ 491.223045] io_uring_clean_tctx+0x76/0xa0
> [ 491.223061] __io_uring_files_cancel+0x1b9/0x2e0
> [ 491.223068] ? blk_finish_plug+0x26/0x40
> [ 491.223085] do_exit+0xc0/0xb40
> [ 491.223099] ? syscall_trace_enter.isra.0+0x1a1/0x1e0
> [ 491.223109] __x64_sys_exit+0x1b/0x20
> [ 491.223117] do_syscall_64+0x38/0x50
> [ 491.223131] entry_SYSCALL_64_after_hwframe+0x44/0xae
> [ 491.223177] INFO: task iou-mgr-2490:2491 blocked for more than 122 seconds.
> [ 491.223194] Call Trace:
> [ 491.223198] __schedule+0x36b/0x950
> [ 491.223206] ? pick_next_task_fair+0xcf/0x3e0
> [ 491.223218] schedule+0x68/0xe0
> [ 491.223225] schedule_timeout+0x209/0x2a0
> [ 491.223236] wait_for_completion+0x8b/0xf0
> [ 491.223246] io_wq_manager+0xf1/0x1d0
> [ 491.223255] ? recalc_sigpending+0x1c/0x60
> [ 491.223265] ? io_wq_cpu_online+0x40/0x40
> [ 491.223272] ret_from_fork+0x22/0x30
>
> When io-wq worker exits and sees IO_WQ_BIT_EXIT it tries not cancel all
> left requests but to execute them, hence we may wait for the exiting
> task for long until someone pushes it, e.g. with SIGKILL. Actively
> cancel pending work items on io-wq destruction.
>
> note: io_run_cancel() moved up without any changes.
Just to pull some of the discussion in here - I don't think this is a
good idea as-is. At the very least, this should be gated on UNBOUND,
and just waiting for bounded requests while canceling unbounded ones.
--
Jens Axboe
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] io-wq: forcefully cancel on io-wq destroy
2021-04-01 1:17 ` Jens Axboe
@ 2021-04-01 10:25 ` Pavel Begunkov
0 siblings, 0 replies; 3+ messages in thread
From: Pavel Begunkov @ 2021-04-01 10:25 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 01/04/2021 02:17, Jens Axboe wrote:
> On 3/31/21 5:18 PM, Pavel Begunkov wrote:
>> [ 491.222908] INFO: task thread-exit:2490 blocked for more than 122 seconds.
>> [ 491.222957] Call Trace:
>> [ 491.222967] __schedule+0x36b/0x950
>> [ 491.222985] schedule+0x68/0xe0
>> [ 491.222994] schedule_timeout+0x209/0x2a0
>> [ 491.223003] ? tlb_flush_mmu+0x28/0x140
>> [ 491.223013] wait_for_completion+0x8b/0xf0
>> [ 491.223023] io_wq_destroy_manager+0x24/0x60
>> [ 491.223037] io_wq_put_and_exit+0x18/0x30
>> [ 491.223045] io_uring_clean_tctx+0x76/0xa0
>> [ 491.223061] __io_uring_files_cancel+0x1b9/0x2e0
>> [ 491.223068] ? blk_finish_plug+0x26/0x40
>> [ 491.223085] do_exit+0xc0/0xb40
>> [ 491.223099] ? syscall_trace_enter.isra.0+0x1a1/0x1e0
>> [ 491.223109] __x64_sys_exit+0x1b/0x20
>> [ 491.223117] do_syscall_64+0x38/0x50
>> [ 491.223131] entry_SYSCALL_64_after_hwframe+0x44/0xae
>> [ 491.223177] INFO: task iou-mgr-2490:2491 blocked for more than 122 seconds.
>> [ 491.223194] Call Trace:
>> [ 491.223198] __schedule+0x36b/0x950
>> [ 491.223206] ? pick_next_task_fair+0xcf/0x3e0
>> [ 491.223218] schedule+0x68/0xe0
>> [ 491.223225] schedule_timeout+0x209/0x2a0
>> [ 491.223236] wait_for_completion+0x8b/0xf0
>> [ 491.223246] io_wq_manager+0xf1/0x1d0
>> [ 491.223255] ? recalc_sigpending+0x1c/0x60
>> [ 491.223265] ? io_wq_cpu_online+0x40/0x40
>> [ 491.223272] ret_from_fork+0x22/0x30
>>
>> When io-wq worker exits and sees IO_WQ_BIT_EXIT it tries not cancel all
>> left requests but to execute them, hence we may wait for the exiting
>> task for long until someone pushes it, e.g. with SIGKILL. Actively
>> cancel pending work items on io-wq destruction.
>>
>> note: io_run_cancel() moved up without any changes.
>
> Just to pull some of the discussion in here - I don't think this is a
> good idea as-is. At the very least, this should be gated on UNBOUND,
> and just waiting for bounded requests while canceling unbounded ones.
Right, and this may be unexpected for userspace as well, e.g.
sockets/pipes. Another approach would be go executing for some time, but
if doesn't help go and kill them all. Or mixture of both. This at least
would give a chance for socket ops to get it done if it's dynamic and
doesn't stuck waiting.
Though, as the original problem it locks do_exit() for some time,
that's not nice, so maybe it would need deferring this final io-wq
execution to async and letting do_exit() to proceed.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-04-01 10:30 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-03-31 23:18 [PATCH v2] io-wq: forcefully cancel on io-wq destroy Pavel Begunkov
2021-04-01 1:17 ` Jens Axboe
2021-04-01 10:25 ` Pavel Begunkov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox