* [PATCH 01/10] io_uring: fix refs underflow in io_iopoll_queue()
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 02/10] io_uring: remove inflight batching in free_many() Pavel Begunkov
` (9 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Now io_complete_rw_common() puts a ref, extra io_req_put()
in io_iopoll_queue() causes undeflow. Remove it.
[ 455.998620] refcount_t: underflow; use-after-free.
[ 455.998743] WARNING: CPU: 6 PID: 285394 at lib/refcount.c:28
refcount_warn_saturate+0xae/0xf0
[ 455.998772] CPU: 6 PID: 285394 Comm: read-write2 Tainted: G
I E 5.8.0-rc2-00048-g1b1aa738f167-dirty #509
[ 455.998772] RIP: 0010:refcount_warn_saturate+0xae/0xf0
...
[ 455.998778] Call Trace:
[ 455.998778] io_put_req+0x44/0x50
[ 455.998778] io_iopoll_complete+0x245/0x370
[ 455.998779] io_iopoll_getevents+0x12f/0x1a0
[ 455.998779] io_iopoll_reap_events.part.0+0x5e/0xa0
[ 455.998780] io_ring_ctx_wait_and_kill+0x132/0x1c0
[ 455.998780] io_uring_release+0x20/0x30
[ 455.998780] __fput+0xcd/0x230
[ 455.998781] ____fput+0xe/0x10
[ 455.998781] task_work_run+0x67/0xa0
[ 455.998781] do_exit+0x35d/0xb70
[ 455.998782] do_group_exit+0x43/0xa0
[ 455.998783] get_signal+0x140/0x900
[ 455.998783] do_signal+0x37/0x780
[ 455.998784] __prepare_exit_to_usermode+0x126/0x1c0
[ 455.998785] __syscall_return_slowpath+0x3b/0x1c0
[ 455.998785] do_syscall_64+0x5f/0xa0
[ 455.998785] entry_SYSCALL_64_after_hwframe+0x44/0xa9
Fixes: a1d7c393c47 ("io_uring: enable READ/WRITE to use deferred completions")
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bf236ba10601..52441f2465fe 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1904,7 +1904,6 @@ static void io_iopoll_queue(struct list_head *again)
/* shouldn't happen unless io_uring is dying, cancel reqs */
if (unlikely(!current->mm)) {
io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
- io_put_req(req);
continue;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 02/10] io_uring: remove inflight batching in free_many()
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
2020-06-28 9:52 ` [PATCH 01/10] io_uring: fix refs underflow in io_iopoll_queue() Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 03/10] io_uring: dismantle req early and remove need_iter Pavel Begunkov
` (8 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_free_req_many() is used only for iopoll requests, i.e. reads/writes.
Hence no need to batch inflight unhooking. For safety, it'll be done
by io_dismantle_req(), which replaces __io_req_aux_free(), and looks
more solid and cleaner.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 41 ++++++++---------------------------------
1 file changed, 8 insertions(+), 33 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 52441f2465fe..28a66e85ef9f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1504,7 +1504,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
fput(file);
}
-static void __io_req_aux_free(struct io_kiocb *req)
+static void io_dismantle_req(struct io_kiocb *req)
{
if (req->flags & REQ_F_NEED_CLEANUP)
io_cleanup_req(req);
@@ -1514,11 +1514,6 @@ static void __io_req_aux_free(struct io_kiocb *req)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
__io_put_req_task(req);
io_req_work_drop_env(req);
-}
-
-static void __io_free_req(struct io_kiocb *req)
-{
- __io_req_aux_free(req);
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
@@ -1530,7 +1525,11 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
+}
+static void __io_free_req(struct io_kiocb *req)
+{
+ io_dismantle_req(req);
percpu_ref_put(&req->ctx->refs);
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
@@ -1549,35 +1548,11 @@ static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
if (!rb->to_free)
return;
if (rb->need_iter) {
- int i, inflight = 0;
- unsigned long flags;
-
- for (i = 0; i < rb->to_free; i++) {
- struct io_kiocb *req = rb->reqs[i];
-
- if (req->flags & REQ_F_INFLIGHT)
- inflight++;
- __io_req_aux_free(req);
- }
- if (!inflight)
- goto do_free;
-
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- for (i = 0; i < rb->to_free; i++) {
- struct io_kiocb *req = rb->reqs[i];
-
- if (req->flags & REQ_F_INFLIGHT) {
- list_del(&req->inflight_entry);
- if (!--inflight)
- break;
- }
- }
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+ int i;
- if (waitqueue_active(&ctx->inflight_wait))
- wake_up(&ctx->inflight_wait);
+ for (i = 0; i < rb->to_free; i++)
+ io_dismantle_req(rb->reqs[i]);
}
-do_free:
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
percpu_ref_put_many(&ctx->refs, rb->to_free);
rb->to_free = rb->need_iter = 0;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 03/10] io_uring: dismantle req early and remove need_iter
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
2020-06-28 9:52 ` [PATCH 01/10] io_uring: fix refs underflow in io_iopoll_queue() Pavel Begunkov
2020-06-28 9:52 ` [PATCH 02/10] io_uring: remove inflight batching in free_many() Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 04/10] io_uring: batch-free linked reqs as well Pavel Begunkov
` (7 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Every request in io_req_multi_free() is having ->file, instead of
pointlessly defering and counting reqs with file, dismantle it
on place and save for batch dealloc.
It also saves us from potentially skipping io_cleanup_req(), put_task(),
etc. Never happens though, becacuse ->file is always there.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 14 +++-----------
1 file changed, 3 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 28a66e85ef9f..f3c6506e9df1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1540,22 +1540,16 @@ static void __io_free_req(struct io_kiocb *req)
struct req_batch {
void *reqs[IO_IOPOLL_BATCH];
int to_free;
- int need_iter;
};
static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
{
if (!rb->to_free)
return;
- if (rb->need_iter) {
- int i;
- for (i = 0; i < rb->to_free; i++)
- io_dismantle_req(rb->reqs[i]);
- }
kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
percpu_ref_put_many(&ctx->refs, rb->to_free);
- rb->to_free = rb->need_iter = 0;
+ rb->to_free = 0;
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
@@ -1846,9 +1840,7 @@ static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
return false;
- if (req->file || req->io)
- rb->need_iter++;
-
+ io_dismantle_req(req);
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
@@ -1900,7 +1892,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
/* order with ->result store in io_complete_rw_iopoll() */
smp_rmb();
- rb.to_free = rb.need_iter = 0;
+ rb.to_free = 0;
while (!list_empty(done)) {
int cflags = 0;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 04/10] io_uring: batch-free linked reqs as well
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (2 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 03/10] io_uring: dismantle req early and remove need_iter Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 05/10] io_uring: cosmetic changes for batch free Pavel Begunkov
` (6 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
There is no reason to not batch deallocation of linked requests. Take
away its next req first and handle it as everything else in
io_req_multi_free().
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 26 ++++++++++++++++----------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f3c6506e9df1..d9dd445e4e57 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1728,17 +1728,21 @@ static void io_req_task_queue(struct io_kiocb *req)
wake_up_process(tsk);
}
-static void io_free_req(struct io_kiocb *req)
+static void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = NULL;
io_req_find_next(req, &nxt);
- __io_free_req(req);
-
if (nxt)
io_req_task_queue(nxt);
}
+static void io_free_req(struct io_kiocb *req)
+{
+ io_queue_next(req);
+ __io_free_req(req);
+}
+
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
@@ -1835,16 +1839,19 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
-static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+static inline void io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
{
- if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
- return false;
+ if (unlikely(io_is_fallback_req(req))) {
+ io_free_req(req);
+ return;
+ }
+ if (req->flags & REQ_F_LINK_HEAD)
+ io_queue_next(req);
io_dismantle_req(req);
rb->reqs[rb->to_free++] = req;
if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
io_free_req_many(req->ctx, rb);
- return true;
}
static int io_put_kbuf(struct io_kiocb *req)
@@ -1910,9 +1917,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
__io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++;
- if (refcount_dec_and_test(&req->refs) &&
- !io_req_multi_free(&rb, req))
- io_free_req(req);
+ if (refcount_dec_and_test(&req->refs))
+ io_req_multi_free(&rb, req);
}
io_commit_cqring(ctx);
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 05/10] io_uring: cosmetic changes for batch free
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (3 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 04/10] io_uring: batch-free linked reqs as well Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 06/10] io_uring: kill REQ_F_LINK_NEXT Pavel Begunkov
` (5 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Move all batch free bits close to each other and rename in a consistent
way.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 69 +++++++++++++++++++++++++++------------------------
1 file changed, 37 insertions(+), 32 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d9dd445e4e57..f64cca727021 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1537,21 +1537,6 @@ static void __io_free_req(struct io_kiocb *req)
clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
}
-struct req_batch {
- void *reqs[IO_IOPOLL_BATCH];
- int to_free;
-};
-
-static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
-{
- if (!rb->to_free)
- return;
-
- kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
- percpu_ref_put_many(&ctx->refs, rb->to_free);
- rb->to_free = 0;
-}
-
static bool io_link_cancel_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1743,6 +1728,41 @@ static void io_free_req(struct io_kiocb *req)
__io_free_req(req);
}
+struct req_batch {
+ void *reqs[IO_IOPOLL_BATCH];
+ int to_free;
+};
+
+static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
+ struct req_batch *rb)
+{
+ kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+ percpu_ref_put_many(&ctx->refs, rb->to_free);
+ rb->to_free = 0;
+}
+
+static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
+ struct req_batch *rb)
+{
+ if (rb->to_free)
+ __io_req_free_batch_flush(ctx, rb);
+}
+
+static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+{
+ if (unlikely(io_is_fallback_req(req))) {
+ io_free_req(req);
+ return;
+ }
+ if (req->flags & REQ_F_LINK_HEAD)
+ io_queue_next(req);
+
+ io_dismantle_req(req);
+ rb->reqs[rb->to_free++] = req;
+ if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+ __io_req_free_batch_flush(req->ctx, rb);
+}
+
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
@@ -1839,21 +1859,6 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
-static inline void io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
-{
- if (unlikely(io_is_fallback_req(req))) {
- io_free_req(req);
- return;
- }
- if (req->flags & REQ_F_LINK_HEAD)
- io_queue_next(req);
-
- io_dismantle_req(req);
- rb->reqs[rb->to_free++] = req;
- if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
- io_free_req_many(req->ctx, rb);
-}
-
static int io_put_kbuf(struct io_kiocb *req)
{
struct io_buffer *kbuf;
@@ -1918,13 +1923,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
(*nr_events)++;
if (refcount_dec_and_test(&req->refs))
- io_req_multi_free(&rb, req);
+ io_req_free_batch(&rb, req);
}
io_commit_cqring(ctx);
if (ctx->flags & IORING_SETUP_SQPOLL)
io_cqring_ev_posted(ctx);
- io_free_req_many(ctx, &rb);
+ io_req_free_batch_finish(ctx, &rb);
if (!list_empty(&again))
io_iopoll_queue(&again);
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 06/10] io_uring: kill REQ_F_LINK_NEXT
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (4 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 05/10] io_uring: cosmetic changes for batch free Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 07/10] io_uring: clean up req->result setting by rw Pavel Begunkov
` (4 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
After pulling nxt from a request, it's no more a link's head, so
clear REQ_F_LINK_HEAD. Absence of this flag also indicates that
there are no linked requests, so replacing REQ_F_LINK_NEXT,
which can be killed.
Linked timeouts also behave leaving the flag intact when necessary.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f64cca727021..b9f44c6b32f1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -526,7 +526,6 @@ enum {
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
REQ_F_LINK_HEAD_BIT,
- REQ_F_LINK_NEXT_BIT,
REQ_F_FAIL_LINK_BIT,
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
@@ -565,8 +564,6 @@ enum {
/* head of a link */
REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
- /* already grabbed next link */
- REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
/* fail rest of links */
REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
/* on inflight list */
@@ -1559,10 +1556,6 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
struct io_ring_ctx *ctx = req->ctx;
bool wake_ev = false;
- /* Already got next link */
- if (req->flags & REQ_F_LINK_NEXT)
- return;
-
/*
* The list should never be empty when we are called here. But could
* potentially happen if the chain is messed up, check to be on the
@@ -1587,7 +1580,6 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
break;
}
- req->flags |= REQ_F_LINK_NEXT;
if (wake_ev)
io_cqring_ev_posted(ctx);
}
@@ -1628,6 +1620,7 @@ static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
if (likely(!(req->flags & REQ_F_LINK_HEAD)))
return;
+ req->flags &= ~REQ_F_LINK_HEAD;
/*
* If LINK is set, we have dependent requests in this chain. If we
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 07/10] io_uring: clean up req->result setting by rw
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (5 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 06/10] io_uring: kill REQ_F_LINK_NEXT Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue() Pavel Begunkov
` (3 subsequent siblings)
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Assign req->result to io_size early in io_{read,write}(),
it's enough and makes it more straightforward.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b9f44c6b32f1..5e0196393c4f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2384,7 +2384,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
- req->result = 0;
req->iopoll_completed = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
@@ -2957,10 +2956,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
- req->result = 0;
io_size = ret;
- if (req->flags & REQ_F_LINK_HEAD)
- req->result = io_size;
+ req->result = io_size;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ))
@@ -3054,10 +3051,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
- req->result = 0;
io_size = ret;
- if (req->flags & REQ_F_LINK_HEAD)
- req->result = io_size;
+ req->result = io_size;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue()
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (6 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 07/10] io_uring: clean up req->result setting by rw Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 14:12 ` Jens Axboe
2020-06-28 9:52 ` [PATCH 09/10] io_uring: do task_work_run() during iopoll Pavel Begunkov
` (2 subsequent siblings)
10 siblings, 1 reply; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
Don't forget to wake up a process to which io_rw_reissue() added
task_work.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5e0196393c4f..79d5680219d1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2172,6 +2172,7 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
ret = task_work_add(tsk, &req->task_work, true);
if (!ret)
return true;
+ wake_up_process(tsk);
#endif
return false;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue()
2020-06-28 9:52 ` [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue() Pavel Begunkov
@ 2020-06-28 14:12 ` Jens Axboe
2020-06-28 14:48 ` Pavel Begunkov
0 siblings, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2020-06-28 14:12 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 6/28/20 3:52 AM, Pavel Begunkov wrote:
> Don't forget to wake up a process to which io_rw_reissue() added
> task_work.
>
> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
> fs/io_uring.c | 1 +
> 1 file changed, 1 insertion(+)
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 5e0196393c4f..79d5680219d1 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2172,6 +2172,7 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
> ret = task_work_add(tsk, &req->task_work, true);
> if (!ret)
> return true;
> + wake_up_process(tsk);
> #endif
> return false;
> }
>
Actually, I spoke too soon, you'll want this wake_up_process() to be for
the success case. I've dropped this patch, please resend it.
--
Jens Axboe
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue()
2020-06-28 14:12 ` Jens Axboe
@ 2020-06-28 14:48 ` Pavel Begunkov
0 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 14:48 UTC (permalink / raw)
To: Jens Axboe, io-uring
On 28/06/2020 17:12, Jens Axboe wrote:
> On 6/28/20 3:52 AM, Pavel Begunkov wrote:
>> Don't forget to wake up a process to which io_rw_reissue() added
>> task_work.
>>
>> Signed-off-by: Pavel Begunkov <[email protected]>
>> ---
>> fs/io_uring.c | 1 +
>> 1 file changed, 1 insertion(+)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 5e0196393c4f..79d5680219d1 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -2172,6 +2172,7 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
>> ret = task_work_add(tsk, &req->task_work, true);
>> if (!ret)
>> return true;
>> + wake_up_process(tsk);
>> #endif
>> return false;
>> }
>>
>
> Actually, I spoke too soon, you'll want this wake_up_process() to be for
> the success case. I've dropped this patch, please resend it.
Oh, a stupid mistake. Thanks for taking a look
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH 09/10] io_uring: do task_work_run() during iopoll
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (7 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 08/10] io_uring: fix missing wake_up io_rw_reissue() Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 9:52 ` [PATCH 10/10] io_uring: fix iopoll -EAGAIN handling Pavel Begunkov
2020-06-28 14:09 ` [PATCH 00/10] some fixing + refactoring batch-free Jens Axboe
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
There are a lot of new users of task_work, and some of task_work_add()
may happen while we do io polling, thus make iopoll from time to time
to do task_work_run(), so it doesn't poll for sitting there reqs.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 79d5680219d1..75ec0d952cb5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2052,6 +2052,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
*/
if (!(++iters & 7)) {
mutex_unlock(&ctx->uring_lock);
+ if (current->task_works)
+ task_work_run();
mutex_lock(&ctx->uring_lock);
}
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* [PATCH 10/10] io_uring: fix iopoll -EAGAIN handling
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (8 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 09/10] io_uring: do task_work_run() during iopoll Pavel Begunkov
@ 2020-06-28 9:52 ` Pavel Begunkov
2020-06-28 14:09 ` [PATCH 00/10] some fixing + refactoring batch-free Jens Axboe
10 siblings, 0 replies; 14+ messages in thread
From: Pavel Begunkov @ 2020-06-28 9:52 UTC (permalink / raw)
To: Jens Axboe, io-uring
req->iopoll() is not necessarily called by a task that submitted a
request. Because of that, it's dangerous to grab_env() and punt async
on -EGAIN, potentially grabbinf another task's mm and corrupting its
memory.
Do resubmit from the submitter task context.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 75ec0d952cb5..f4b1ebc81949 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -892,6 +892,7 @@ enum io_mem_account {
ACCT_PINNED,
};
+static bool io_rw_reissue(struct io_kiocb *req, long res);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req);
@@ -1873,14 +1874,9 @@ static void io_iopoll_queue(struct list_head *again)
req = list_first_entry(again, struct io_kiocb, list);
list_del(&req->list);
- /* shouldn't happen unless io_uring is dying, cancel reqs */
- if (unlikely(!current->mm)) {
+ /* should have ->mm unless io_uring is dying, kill reqs then */
+ if (unlikely(!current->mm) || !io_rw_reissue(req, -EAGAIN))
io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
- continue;
- }
-
- refcount_inc(&req->refs);
- io_queue_async_work(req);
} while (!list_empty(again));
}
@@ -2388,6 +2384,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
+ io_get_req_task(req);
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
--
2.24.0
^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [PATCH 00/10] some fixing + refactoring batch-free
2020-06-28 9:52 [PATCH 00/10] some fixing + refactoring batch-free Pavel Begunkov
` (9 preceding siblings ...)
2020-06-28 9:52 ` [PATCH 10/10] io_uring: fix iopoll -EAGAIN handling Pavel Begunkov
@ 2020-06-28 14:09 ` Jens Axboe
10 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2020-06-28 14:09 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 6/28/20 3:52 AM, Pavel Begunkov wrote:
> Firing away... All for 5.9 on top of 5 previous patches fixing link
> task submission. I don't think there is much to discuss so didn't
> separate into sub-patchsets. Let me know if I should.
Agree, this works fine as posted.
> Batching free is inteded to be reused outside of iopoll, so
> it's a preparation but nice by itself.
>
> [8/10] looks like it, but double check would be nice.
Thanks, yes we need the wakeup there.
I have applied this on top, thanks Pavel!
--
Jens Axboe
^ permalink raw reply [flat|nested] 14+ messages in thread