* [PATCH 1/2] io_uring: allow finding next link independent of req reference count
2019-11-20 20:12 [PATCHSET v2 0/2] io_uring: close lookup gap for dependent work Jens Axboe
@ 2019-11-20 20:12 ` Jens Axboe
2019-11-20 20:12 ` [PATCH 2/2] io_uring: close lookup gap for dependent next work Jens Axboe
1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-20 20:12 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence, Jens Axboe
We currently try and start the next link when we put the request, and
only if we were going to free it. This means that the optimization to
continue executing requests from the same context often fails, as we're
not putting the final reference.
Add REQ_F_LINK_NEXT to keep track of this, and allow io_uring to find the
next request more efficiently.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 066b59ffb54e..132a890368bf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -340,6 +340,7 @@ struct io_kiocb {
#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
+#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
@@ -874,6 +875,10 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
struct io_kiocb *nxt;
bool wake_ev = false;
+ /* Already got next link */
+ if (req->flags & REQ_F_LINK_NEXT)
+ return;
+
/*
* The list should never be empty when we are called here. But could
* potentially happen if the chain is messed up, check to be on the
@@ -910,6 +915,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
break;
}
+ req->flags |= REQ_F_LINK_NEXT;
if (wake_ev)
io_cqring_ev_posted(ctx);
}
@@ -946,12 +952,10 @@ static void io_fail_links(struct io_kiocb *req)
io_cqring_ev_posted(ctx);
}
-static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
+static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
- if (likely(!(req->flags & REQ_F_LINK))) {
- __io_free_req(req);
+ if (likely(!(req->flags & REQ_F_LINK)))
return;
- }
/*
* If LINK is set, we have dependent requests in this chain. If we
@@ -977,7 +981,11 @@ static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
} else {
io_req_link_next(req, nxt);
}
+}
+static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+ io_req_find_next(req, nxt);
__io_free_req(req);
}
@@ -994,8 +1002,10 @@ static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
struct io_kiocb *nxt = NULL;
+ io_req_find_next(req, &nxt);
+
if (refcount_dec_and_test(&req->refs))
- io_free_req_find_next(req, &nxt);
+ __io_free_req(req);
if (nxt) {
if (nxtptr)
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] io_uring: close lookup gap for dependent next work
2019-11-20 20:12 [PATCHSET v2 0/2] io_uring: close lookup gap for dependent work Jens Axboe
2019-11-20 20:12 ` [PATCH 1/2] io_uring: allow finding next link independent of req reference count Jens Axboe
@ 2019-11-20 20:12 ` Jens Axboe
1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-11-20 20:12 UTC (permalink / raw)
To: io-uring; +Cc: asml.silence, Jens Axboe
When we find new work to process within the work handler, we queue the
linked timeout before we have issued the new work. This can be
problematic for very short timeouts, as we have a window where the new
work isn't visible.
Allow the work handler to store a callback function for this in the work
item, and flag it with IO_WQ_WORK_CB if the caller has done so. If that
is set, then io-wq will call the callback when it has setup the new work
item.
Reported-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io-wq.c | 3 +++
fs/io-wq.h | 12 +++++++++++-
fs/io_uring.c | 14 ++++++++++++--
3 files changed, 26 insertions(+), 3 deletions(-)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index b4bc377dda61..2666384aaf44 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -427,6 +427,9 @@ static void io_worker_handle_work(struct io_worker *worker)
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
+ if (work->flags & IO_WQ_WORK_CB)
+ work->cb.fn(work->cb.data);
+
if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
current->files != work->files) {
task_lock(current);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 4b29f922f80c..892989f3e41e 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -11,6 +11,7 @@ enum {
IO_WQ_WORK_NEEDS_FILES = 16,
IO_WQ_WORK_UNBOUND = 32,
IO_WQ_WORK_INTERNAL = 64,
+ IO_WQ_WORK_CB = 128,
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
@@ -21,8 +22,17 @@ enum io_wq_cancel {
IO_WQ_CANCEL_NOTFOUND, /* work not found */
};
+struct io_wq_work;
+struct io_wq_work_cb {
+ void (*fn)(void *data);
+ void *data;
+};
+
struct io_wq_work {
- struct list_head list;
+ union {
+ struct list_head list;
+ struct io_wq_work_cb cb;
+ };
void (*func)(struct io_wq_work **);
unsigned flags;
struct files_struct *files;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 132a890368bf..6175e2e195c0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2664,6 +2664,13 @@ static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
+static void io_link_work_cb(void *data)
+{
+ struct io_kiocb *link = data;
+
+ io_queue_linked_timeout(link);
+}
+
static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
@@ -2710,8 +2717,11 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_prep_async_work(nxt, &link);
*workptr = &nxt->work;
- if (link)
- io_queue_linked_timeout(link);
+ if (link) {
+ nxt->work.flags |= IO_WQ_WORK_CB;
+ nxt->work.cb.fn = io_link_work_cb;
+ nxt->work.cb.data = link;
+ }
}
}
--
2.24.0
^ permalink raw reply related [flat|nested] 3+ messages in thread