From: Jens Axboe <[email protected]>
To: syzbot <[email protected]>,
[email protected], [email protected],
[email protected], [email protected],
[email protected], [email protected]
Subject: Re: possible deadlock in io_timeout_fn
Date: Mon, 10 Aug 2020 10:53:16 -0600 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
On 8/10/20 9:37 AM, syzbot wrote:
> Hello,
>
> syzbot found the following issue on:
>
> HEAD commit: 449dc8c9 Merge tag 'for-v5.9' of git://git.kernel.org/pub/..
> git tree: upstream
> console output: https://syzkaller.appspot.com/x/log.txt?x=11293dc6900000
> kernel config: https://syzkaller.appspot.com/x/.config?x=9d25235bf0162fbc
> dashboard link: https://syzkaller.appspot.com/bug?extid=ef4b654b49ed7ff049bf
> compiler: clang version 10.0.0 (https://github.com/llvm/llvm-project/ c2443155a0fb245c8f17f2c1c72b6ea391e86e81)
> syz repro: https://syzkaller.appspot.com/x/repro.syz?x=126b0f1a900000
> C reproducer: https://syzkaller.appspot.com/x/repro.c?x=13e32994900000
>
> The issue was bisected to:
>
> commit e62753e4e2926f249d088cc0517be5ed4efec6d6
> Author: Bijan Mottahedeh <[email protected]>
> Date: Sat May 23 04:31:18 2020 +0000
>
> io_uring: call statx directly
I don't think this one is to blame, it's a generic issue with needing
to put the file table from the error/fail path.
Something like the below should fix it - if we have the completion
lock locked, then punt the file table put to a safe context through
task_work instead. Looks bigger than it is, due to moving some of
the generic task_work handling functions up a bit.
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f9be665d1c5e..5df805d6251e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1108,10 +1108,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static void io_req_clean_work(struct io_kiocb *req)
+/*
+ * Returns true if we need to defer file table putting. This can only happen
+ * from the error path with REQ_F_COMP_LOCKED set.
+ */
+static bool io_req_clean_work(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_WORK_INITIALIZED))
- return;
+ return false;
+
+ req->flags &= ~REQ_F_WORK_INITIALIZED;
if (req->work.mm) {
mmdrop(req->work.mm);
@@ -1124,6 +1130,9 @@ static void io_req_clean_work(struct io_kiocb *req)
if (req->work.fs) {
struct fs_struct *fs = req->work.fs;
+ if (req->flags & REQ_F_COMP_LOCKED)
+ return true;
+
spin_lock(&req->work.fs->lock);
if (--fs->users)
fs = NULL;
@@ -1132,7 +1141,8 @@ static void io_req_clean_work(struct io_kiocb *req)
free_fs_struct(fs);
req->work.fs = NULL;
}
- req->flags &= ~REQ_F_WORK_INITIALIZED;
+
+ return false;
}
static void io_prep_async_work(struct io_kiocb *req)
@@ -1544,7 +1554,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
fput(file);
}
-static void io_dismantle_req(struct io_kiocb *req)
+static bool io_dismantle_req(struct io_kiocb *req)
{
io_clean_op(req);
@@ -1552,7 +1562,6 @@ static void io_dismantle_req(struct io_kiocb *req)
kfree(req->io);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- io_req_clean_work(req);
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
@@ -1564,15 +1573,108 @@ static void io_dismantle_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
+
+ return io_req_clean_work(req);
}
-static void __io_free_req(struct io_kiocb *req)
+static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
- struct io_ring_ctx *ctx;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_cqring_fill_event(req, error);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ req_set_fail_links(req);
+ io_double_put_req(req);
+}
+
+static void io_req_task_cancel(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+
+ __io_req_task_cancel(req, -ECANCELED);
+}
+
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+ struct task_struct *tsk = req->task;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, notify = TWA_RESUME;
+
+ ret = __task_work_add(tsk, cb);
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup.
+ * For any other work, use signaled wakeups if the task isn't
+ * running to avoid dependencies between tasks or threads. If
+ * the issuing task is currently waiting in the kernel on a thread,
+ * and same thread is waiting for a completion event, then we need
+ * to ensure that the issuing task processes task_work. TWA_SIGNAL
+ * is needed for that.
+ */
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ notify = 0;
+ else if (READ_ONCE(tsk->state) != TASK_RUNNING)
+ notify = TWA_SIGNAL;
+
+ __task_work_notify(tsk, notify);
+ wake_up_process(tsk);
+ return 0;
+}
+
+static void __io_req_task_submit(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!__io_sq_thread_acquire_mm(ctx)) {
+ mutex_lock(&ctx->uring_lock);
+ __io_queue_sqe(req, NULL, NULL);
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ __io_req_task_cancel(req, -EFAULT);
+ }
+}
+
+static void io_req_task_submit(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+
+ __io_req_task_submit(req);
+}
+
+
+static void __io_req_task_queue(struct io_kiocb *req, task_work_func_t func)
+{
+ int ret;
+
+ init_task_work(&req->task_work, func);
+
+ ret = io_req_task_work_add(req, &req->task_work);
+ if (unlikely(ret)) {
+ struct task_struct *tsk;
+
+ init_task_work(&req->task_work, io_req_task_cancel);
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &req->task_work, 0);
+ wake_up_process(tsk);
+ }
+}
+
+static void io_req_task_queue(struct io_kiocb *req)
+{
+ __io_req_task_queue(req, io_req_task_submit);
+}
+
+static void __io_free_req_finish(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
- io_dismantle_req(req);
__io_put_req_task(req);
- ctx = req->ctx;
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
@@ -1580,6 +1682,29 @@ static void __io_free_req(struct io_kiocb *req)
percpu_ref_put(&ctx->refs);
}
+static void io_req_task_file_table_put(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct fs_struct *fs = req->work.fs;
+
+ spin_lock(&req->work.fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->work.fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ req->work.fs = NULL;
+ __io_free_req_finish(req);
+}
+
+static void __io_free_req(struct io_kiocb *req)
+{
+ if (!io_dismantle_req(req))
+ __io_free_req_finish(req);
+ else
+ __io_req_task_queue(req, io_req_task_file_table_put);
+}
+
static bool io_link_cancel_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1667,6 +1792,7 @@ static void __io_fail_links(struct io_kiocb *req)
trace_io_uring_fail_link(req, link);
io_cqring_fill_event(link, -ECANCELED);
+ link->flags |= REQ_F_COMP_LOCKED;
__io_double_put_req(link);
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
@@ -1717,93 +1843,6 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return __io_req_find_next(req);
}
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
-{
- struct task_struct *tsk = req->task;
- struct io_ring_ctx *ctx = req->ctx;
- int ret, notify = TWA_RESUME;
-
- ret = __task_work_add(tsk, cb);
- if (unlikely(ret))
- return ret;
-
- /*
- * SQPOLL kernel thread doesn't need notification, just a wakeup.
- * For any other work, use signaled wakeups if the task isn't
- * running to avoid dependencies between tasks or threads. If
- * the issuing task is currently waiting in the kernel on a thread,
- * and same thread is waiting for a completion event, then we need
- * to ensure that the issuing task processes task_work. TWA_SIGNAL
- * is needed for that.
- */
- if (ctx->flags & IORING_SETUP_SQPOLL)
- notify = 0;
- else if (READ_ONCE(tsk->state) != TASK_RUNNING)
- notify = TWA_SIGNAL;
-
- __task_work_notify(tsk, notify);
- wake_up_process(tsk);
- return 0;
-}
-
-static void __io_req_task_cancel(struct io_kiocb *req, int error)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock_irq(&ctx->completion_lock);
- io_cqring_fill_event(req, error);
- io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
-
- io_cqring_ev_posted(ctx);
- req_set_fail_links(req);
- io_double_put_req(req);
-}
-
-static void io_req_task_cancel(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
- __io_req_task_cancel(req, -ECANCELED);
-}
-
-static void __io_req_task_submit(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (!__io_sq_thread_acquire_mm(ctx)) {
- mutex_lock(&ctx->uring_lock);
- __io_queue_sqe(req, NULL, NULL);
- mutex_unlock(&ctx->uring_lock);
- } else {
- __io_req_task_cancel(req, -EFAULT);
- }
-}
-
-static void io_req_task_submit(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-
- __io_req_task_submit(req);
-}
-
-static void io_req_task_queue(struct io_kiocb *req)
-{
- int ret;
-
- init_task_work(&req->task_work, io_req_task_submit);
-
- ret = io_req_task_work_add(req, &req->task_work);
- if (unlikely(ret)) {
- struct task_struct *tsk;
-
- init_task_work(&req->task_work, io_req_task_cancel);
- tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, 0);
- wake_up_process(tsk);
- }
-}
-
static void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = io_req_find_next(req);
@@ -1872,7 +1911,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
req->flags &= ~REQ_F_TASK_PINNED;
}
- io_dismantle_req(req);
+ WARN_ON_ONCE(io_dismantle_req(req));
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
__io_req_free_batch_flush(req->ctx, rb);
--
Jens Axboe
prev parent reply other threads:[~2020-08-10 16:53 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-10 15:37 possible deadlock in io_timeout_fn syzbot
2020-08-10 16:53 ` Jens Axboe [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox