From: Jens Axboe <[email protected]>
To: [email protected]
Cc: Pavel Begunkov <[email protected]>, Jens Axboe <[email protected]>
Subject: [PATCH 24/33] io_uring: kill sqo_dead and sqo submission halting
Date: Wed, 3 Mar 2021 17:26:51 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
From: Pavel Begunkov <[email protected]>
As SQPOLL task doesn't poke into ->sqo_task anymore, there is no need to
kill the sqo when the master task exits. Before it was necessary to
avoid races accessing sqo_task->files with removing them.
Signed-off-by: Pavel Begunkov <[email protected]>
[axboe: don't forget to enable SQPOLL before exit, if started disabled]
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 45 ++++++++-------------------------------------
1 file changed, 8 insertions(+), 37 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bdaeda5eefd5..da90d877afd4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -338,7 +338,6 @@ struct io_ring_ctx {
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
- unsigned int sqo_dead: 1;
unsigned int sqo_exec: 1;
/*
@@ -1967,7 +1966,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
mutex_lock(&ctx->uring_lock);
- if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && !current->in_execve)
+ if (!(current->flags & PF_EXITING) && !current->in_execve)
__io_queue_sqe(req);
else
__io_req_task_cancel(req, -EFAULT);
@@ -6578,8 +6577,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0);
- if (to_submit && !ctx->sqo_dead &&
- likely(!percpu_ref_is_dying(&ctx->refs)))
+ if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
@@ -7818,7 +7816,7 @@ static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)
clear_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
reinit_completion(&sqd->completion);
- ctx->sqo_dead = ctx->sqo_exec = 0;
+ ctx->sqo_exec = 0;
sqd->task_pid = current->pid;
current->flags |= PF_IO_WORKER;
ret = io_wq_fork_thread(io_sq_thread, sqd);
@@ -8529,10 +8527,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
-
- if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
- ctx->sqo_dead = 1;
-
/* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1;
if (ctx->rings)
@@ -8692,19 +8686,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
}
}
-static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
-{
- mutex_lock(&ctx->uring_lock);
- ctx->sqo_dead = 1;
- if (ctx->flags & IORING_SETUP_R_DISABLED)
- io_sq_offload_start(ctx);
- mutex_unlock(&ctx->uring_lock);
-
- /* make sure callers enter the ring to get error */
- if (ctx->rings)
- io_ring_set_wakeup_flag(ctx);
-}
-
/*
* We need to iteratively cancel requests, in case a request has dependent
* hard links. These persist even for failure of cancelations, hence keep
@@ -8717,7 +8698,11 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
bool did_park = false;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- io_disable_sqo_submit(ctx);
+ /* never started, nothing to cancel */
+ if (ctx->flags & IORING_SETUP_R_DISABLED) {
+ io_sq_offload_start(ctx);
+ return;
+ }
did_park = io_sq_thread_park(ctx->sq_data);
if (did_park) {
task = ctx->sq_data->thread;
@@ -8838,7 +8823,6 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
if (!sqd)
return;
- io_disable_sqo_submit(ctx);
if (!io_sq_thread_park(sqd))
return;
tctx = ctx->sq_data->thread->io_uring;
@@ -8883,7 +8867,6 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
- /* trigger io_disable_sqo_submit() */
if (tctx->sqpoll) {
struct file *file;
unsigned long index;
@@ -9014,22 +8997,14 @@ static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
do {
if (!io_sqring_full(ctx))
break;
-
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
- if (unlikely(ctx->sqo_dead)) {
- ret = -EOWNERDEAD;
- goto out;
- }
-
if (!io_sqring_full(ctx))
break;
-
schedule();
} while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait);
-out:
return ret;
}
@@ -9115,8 +9090,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
goto out;
}
ret = -EOWNERDEAD;
- if (unlikely(ctx->sqo_dead))
- goto out;
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9488,7 +9461,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
*/
ret = io_uring_install_fd(ctx, file);
if (ret < 0) {
- io_disable_sqo_submit(ctx);
/* fput will clean it up */
fput(file);
return ret;
@@ -9497,7 +9469,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
- io_disable_sqo_submit(ctx);
io_ring_ctx_wait_and_kill(ctx);
return ret;
}
--
2.30.1
next prev parent reply other threads:[~2021-03-04 1:10 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-04 0:26 [PATCHSET 0/33] Fixes queued up for 5.12 Jens Axboe
2021-03-04 0:26 ` [PATCH 01/33] io-wq: wait for worker startup when forking a new one Jens Axboe
2021-03-04 0:26 ` [PATCH 02/33] io-wq: have manager wait for all workers to exit Jens Axboe
2021-03-04 0:26 ` [PATCH 03/33] io-wq: don't ask for a new worker if we're exiting Jens Axboe
2021-03-04 0:26 ` [PATCH 04/33] io-wq: rename wq->done completion to wq->started Jens Axboe
2021-03-04 0:26 ` [PATCH 05/33] io-wq: wait for manager exit on wq destroy Jens Axboe
2021-03-04 0:26 ` [PATCH 06/33] io-wq: fix double put of 'wq' in error path Jens Axboe
2021-03-04 0:26 ` [PATCH 07/33] io_uring: SQPOLL stop error handling fixes Jens Axboe
2021-03-04 0:26 ` [PATCH 08/33] io_uring: run fallback on cancellation Jens Axboe
2021-03-04 0:26 ` [PATCH 09/33] io_uring: don't use complete_all() on SQPOLL thread exit Jens Axboe
2021-03-04 0:26 ` [PATCH 10/33] io-wq: provide an io_wq_put_and_exit() helper Jens Axboe
2021-03-04 0:26 ` [PATCH 11/33] io_uring: fix race condition in task_work add and clear Jens Axboe
2021-03-04 0:26 ` [PATCH 12/33] io_uring: signal worker thread unshare Jens Axboe
2021-03-04 12:15 ` Stefan Metzmacher
2021-03-04 14:05 ` Jens Axboe
2021-03-04 0:26 ` [PATCH 13/33] io_uring: warn on not destroyed io-wq Jens Axboe
2021-03-04 0:26 ` [PATCH 14/33] io_uring: destroy io-wq on exec Jens Axboe
2021-03-04 0:26 ` [PATCH 15/33] io_uring: remove unused argument 'tsk' from io_req_caches_free() Jens Axboe
2021-03-04 0:26 ` [PATCH 16/33] io_uring: kill unnecessary REQ_F_WORK_INITIALIZED checks Jens Axboe
2021-03-04 0:26 ` [PATCH 17/33] io_uring: move cred assignment into io_issue_sqe() Jens Axboe
2021-03-04 0:26 ` [PATCH 18/33] io_uring: kill unnecessary io_run_ctx_fallback() in io_ring_exit_work() Jens Axboe
2021-03-04 0:26 ` [PATCH 19/33] io_uring: kill io_uring_flush() Jens Axboe
2021-03-04 0:26 ` [PATCH 20/33] io_uring: fix __tctx_task_work() ctx race Jens Axboe
2021-03-04 0:26 ` [PATCH 21/33] io_uring: replace cmpxchg in fallback with xchg Jens Axboe
2021-03-04 0:26 ` [PATCH 22/33] io_uring: ensure that SQPOLL thread is started for exit Jens Axboe
2021-03-04 0:26 ` [PATCH 23/33] io_uring: ignore double poll add on the same waitqueue head Jens Axboe
2021-03-04 0:26 ` Jens Axboe [this message]
2021-03-04 0:26 ` [PATCH 25/33] io_uring: remove sqo_task Jens Axboe
2021-03-04 0:26 ` [PATCH 26/33] io-wq: fix error path leak of buffered write hash map Jens Axboe
2021-03-04 0:26 ` [PATCH 27/33] io_uring: fix -EAGAIN retry with IOPOLL Jens Axboe
2021-03-04 0:26 ` [PATCH 28/33] io_uring: choose right tctx->io_wq for try cancel Jens Axboe
2021-03-04 0:26 ` [PATCH 29/33] io_uring: inline io_req_clean_work() Jens Axboe
2021-03-04 0:26 ` [PATCH 30/33] io_uring: inline __io_queue_async_work() Jens Axboe
2021-03-04 0:26 ` [PATCH 31/33] io_uring: remove extra in_idle wake up Jens Axboe
2021-03-04 0:26 ` [PATCH 32/33] io_uring: ensure that threads freeze on suspend Jens Axboe
2021-03-04 0:27 ` [PATCH 33/33] io-wq: ensure all pending work is canceled on exit Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox