* [PATCH for-next 01/10] io_uring: fix multi ctx cancellation
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 02/10] io_uring: improve task exit timeout cancellations Pavel Begunkov
` (9 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
io_uring_try_cancel_requests() loops until there is nothing left to do
with the ring, however there might be several rings and they might have
dependencies between them, e.g. via poll requests.
Instead of cancelling rings one by one, try to cancel them all and only
then loop over if we still potenially some work to do.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 87 ++++++++++++++++++++++++---------------------
1 file changed, 46 insertions(+), 41 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0f18a86f3f8c..2d1d4752b955 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -132,7 +132,7 @@ struct io_defer_entry {
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
-static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task,
bool cancel_all);
@@ -2648,7 +2648,9 @@ static __cold void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them.
*/
do {
- io_uring_try_cancel_requests(ctx, NULL, true);
+ while (io_uring_try_cancel_requests(ctx, NULL, true))
+ cond_resched();
+
if (ctx->sq_data) {
struct io_sq_data *sqd = ctx->sq_data;
struct task_struct *tsk;
@@ -2806,53 +2808,48 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
return ret;
}
-static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task,
bool cancel_all)
{
struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
struct io_uring_task *tctx = task ? task->io_uring : NULL;
+ enum io_wq_cancel cret;
+ bool ret = false;
/* failed during ring init, it couldn't have issued any requests */
if (!ctx->rings)
- return;
-
- while (1) {
- enum io_wq_cancel cret;
- bool ret = false;
+ return false;
- if (!task) {
- ret |= io_uring_try_cancel_iowq(ctx);
- } else if (tctx && tctx->io_wq) {
- /*
- * Cancels requests of all rings, not only @ctx, but
- * it's fine as the task is in exit/exec.
- */
- cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
- &cancel, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
+ if (!task) {
+ ret |= io_uring_try_cancel_iowq(ctx);
+ } else if (tctx && tctx->io_wq) {
+ /*
+ * Cancels requests of all rings, not only @ctx, but
+ * it's fine as the task is in exit/exec.
+ */
+ cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
+ &cancel, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
- /* SQPOLL thread does its own polling */
- if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
- (ctx->sq_data && ctx->sq_data->thread == current)) {
- while (!wq_list_empty(&ctx->iopoll_list)) {
- io_iopoll_try_reap_events(ctx);
- ret = true;
- }
+ /* SQPOLL thread does its own polling */
+ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
+ (ctx->sq_data && ctx->sq_data->thread == current)) {
+ while (!wq_list_empty(&ctx->iopoll_list)) {
+ io_iopoll_try_reap_events(ctx);
+ ret = true;
}
-
- ret |= io_cancel_defer_files(ctx, task, cancel_all);
- mutex_lock(&ctx->uring_lock);
- ret |= io_poll_remove_all(ctx, task, cancel_all);
- mutex_unlock(&ctx->uring_lock);
- ret |= io_kill_timeouts(ctx, task, cancel_all);
- if (task)
- ret |= io_run_task_work();
- if (!ret)
- break;
- cond_resched();
}
+
+ ret |= io_cancel_defer_files(ctx, task, cancel_all);
+ mutex_lock(&ctx->uring_lock);
+ ret |= io_poll_remove_all(ctx, task, cancel_all);
+ mutex_unlock(&ctx->uring_lock);
+ ret |= io_kill_timeouts(ctx, task, cancel_all);
+ if (task)
+ ret |= io_run_task_work();
+ return ret;
}
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
@@ -2882,6 +2879,8 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
atomic_inc(&tctx->in_idle);
do {
+ bool loop = false;
+
io_uring_drop_tctx_refs(current);
/* read completions before cancelations */
inflight = tctx_inflight(tctx, !cancel_all);
@@ -2896,13 +2895,19 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
/* sqpoll task will cancel all its requests */
if (node->ctx->sq_data)
continue;
- io_uring_try_cancel_requests(node->ctx, current,
- cancel_all);
+ loop |= io_uring_try_cancel_requests(node->ctx,
+ current, cancel_all);
}
} else {
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- io_uring_try_cancel_requests(ctx, current,
- cancel_all);
+ loop |= io_uring_try_cancel_requests(ctx,
+ current,
+ cancel_all);
+ }
+
+ if (loop) {
+ cond_resched();
+ continue;
}
prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 02/10] io_uring: improve task exit timeout cancellations
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 01/10] io_uring: fix multi ctx cancellation Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 03/10] io_uring: fix io_poll_remove_all clang warnings Pavel Begunkov
` (8 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Don't spin trying to cancel timeouts that are reachable but not
cancellable, e.g. already executing.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/timeout.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 557c637af158..a79a7d6ef1b3 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -49,7 +49,7 @@ static inline void io_put_req(struct io_kiocb *req)
}
}
-static void io_kill_timeout(struct io_kiocb *req, int status)
+static bool io_kill_timeout(struct io_kiocb *req, int status)
__must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock)
{
@@ -64,7 +64,9 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&timeout->list);
io_req_tw_post_queue(req, status, 0);
+ return true;
}
+ return false;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
@@ -620,10 +622,9 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
- if (io_match_task(req, tsk, cancel_all)) {
- io_kill_timeout(req, -ECANCELED);
+ if (io_match_task(req, tsk, cancel_all) &&
+ io_kill_timeout(req, -ECANCELED))
canceled++;
- }
}
spin_unlock_irq(&ctx->timeout_lock);
io_commit_cqring(ctx);
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 03/10] io_uring: fix io_poll_remove_all clang warnings
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 01/10] io_uring: fix multi ctx cancellation Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 02/10] io_uring: improve task exit timeout cancellations Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-21 15:40 ` Nathan Chancellor
2022-06-20 0:25 ` [PATCH for-next 04/10] io_uring: hide eventfd assumptions in evenfd paths Pavel Begunkov
` (7 subsequent siblings)
10 siblings, 1 reply; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence, Nathan Chancellor
clang complains on bitwise operations with bools, add a bit more
verbosity to better show that we want to call io_poll_remove_all_table()
twice but with different arguments.
Reported-by: Nathan Chancellor <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/poll.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d4bfc6d945cf..9af6a34222a9 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -589,8 +589,11 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all)
__must_hold(&ctx->uring_lock)
{
- return io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all) |
- io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
+ bool ret;
+
+ ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
+ ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
+ return ret;
}
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 04/10] io_uring: hide eventfd assumptions in evenfd paths
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (2 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 03/10] io_uring: fix io_poll_remove_all clang warnings Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 05/10] io_uring: introduce locking helpers for CQE posting Pavel Begunkov
` (6 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Some io_uring-eventfd users assume that there won't be spurious wakeups.
That assumption has to be honoured by all io_cqring_ev_posted() callers,
which is inconvenient and from time to time leads to problems but should
be maintained to not break the userspace.
Instead of making the callers to track whether a CQE was posted or not,
hide it inside io_eventfd_signal(). It saves ->cached_cq_tail it saw
last time and triggers the eventfd only when ->cached_cq_tail changed
since then.
Signed-off-by: Pavel Begunkov <[email protected]>
---
include/linux/io_uring_types.h | 2 ++
io_uring/io_uring.c | 44 ++++++++++++++++++++--------------
io_uring/timeout.c | 3 +--
3 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 6bcd7bff6479..5987f8acca38 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -314,6 +314,8 @@ struct io_ring_ctx {
struct list_head defer_list;
unsigned sq_thread_idle;
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
};
enum {
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 2d1d4752b955..ded42d884c49 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -473,6 +473,22 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
static void io_eventfd_signal(struct io_ring_ctx *ctx)
{
struct io_ev_fd *ev_fd;
+ bool skip;
+
+ spin_lock(&ctx->completion_lock);
+ /*
+ * Eventfd should only get triggered when at least one event has been
+ * posted. Some applications rely on the eventfd notification count only
+ * changing IFF a new CQE has been added to the CQ ring. There's no
+ * depedency on 1:1 relationship between how many times this function is
+ * called (and hence the eventfd count) and number of CQEs posted to the
+ * CQ ring.
+ */
+ skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+ if (skip)
+ return;
rcu_read_lock();
/*
@@ -511,13 +527,6 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
io_eventfd_signal(ctx);
}
-/*
- * This should only get called when at least one event has been posted.
- * Some applications rely on the eventfd notification count only changing
- * IFF a new CQE has been added to the CQ ring. There's no depedency on
- * 1:1 relationship between how many times this function is called (and
- * hence the eventfd count) and number of CQEs posted to the CQ ring.
- */
void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
@@ -530,7 +539,7 @@ void io_cqring_ev_posted(struct io_ring_ctx *ctx)
/* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
- bool all_flushed, posted;
+ bool all_flushed;
size_t cqe_size = sizeof(struct io_uring_cqe);
if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
@@ -539,7 +548,6 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1;
- posted = false;
spin_lock(&ctx->completion_lock);
while (!list_empty(&ctx->cq_overflow_list)) {
struct io_uring_cqe *cqe = io_get_cqe(ctx);
@@ -554,7 +562,6 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
else
io_account_cq_overflow(ctx);
- posted = true;
list_del(&ocqe->list);
kfree(ocqe);
}
@@ -567,8 +574,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
return all_flushed;
}
@@ -758,8 +764,7 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (filled)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
return filled;
}
@@ -940,14 +945,12 @@ __cold void io_free_req(struct io_kiocb *req)
static void __io_req_find_next_prep(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- bool posted;
spin_lock(&ctx->completion_lock);
- posted = io_disarm_next(req);
+ io_disarm_next(req);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (posted)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
@@ -2428,6 +2431,11 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
kfree(ev_fd);
return ret;
}
+
+ spin_lock(&ctx->completion_lock);
+ ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+
ev_fd->eventfd_async = eventfd_async;
ctx->has_evfd = true;
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index a79a7d6ef1b3..424b2fc858b8 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -629,7 +629,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
spin_unlock_irq(&ctx->timeout_lock);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- if (canceled != 0)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted(ctx);
return canceled != 0;
}
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 05/10] io_uring: introduce locking helpers for CQE posting
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (3 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 04/10] io_uring: hide eventfd assumptions in evenfd paths Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 06/10] io_uring: add io_commit_cqring_flush() Pavel Begunkov
` (5 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
spin_lock(&ctx->completion_lock);
/* post CQEs */
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
We have many places repeating this sequence, and the three function
unlock section is not perfect from the maintainance perspective and also
makes harder to add new locking/sync trick.
Introduce to helpers. io_cq_lock(), which is simple and only grabs
->completion_lock, and io_cq_unlock_post() encapsulating the three call
section.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 57 +++++++++++++++++++++------------------------
io_uring/io_uring.h | 9 ++++++-
io_uring/timeout.c | 6 ++---
3 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ded42d884c49..82a9e4e2a3e2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -527,7 +527,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
io_eventfd_signal(ctx);
}
-void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
ctx->has_evfd))
@@ -536,6 +536,19 @@ void io_cqring_ev_posted(struct io_ring_ctx *ctx)
io_cqring_wake(ctx);
}
+static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
+ __releases(ctx->completion_lock)
+{
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+}
+
+void io_cq_unlock_post(struct io_ring_ctx *ctx)
+{
+ __io_cq_unlock_post(ctx);
+}
+
/* Returns true if there are no backlogged entries after the flush */
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
@@ -548,7 +561,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (ctx->flags & IORING_SETUP_CQE32)
cqe_size <<= 1;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
struct io_uring_cqe *cqe = io_get_cqe(ctx);
struct io_overflow_cqe *ocqe;
@@ -572,9 +585,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
}
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return all_flushed;
}
@@ -760,11 +771,9 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx,
{
bool filled;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return filled;
}
@@ -810,11 +819,9 @@ void io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
__io_req_complete_post(req);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
}
inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
@@ -946,11 +953,9 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
io_disarm_next(req);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
}
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
@@ -984,13 +989,6 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
percpu_ref_put(&ctx->refs);
}
-static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
-{
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-}
-
static void handle_prev_tw_list(struct io_wq_work_node *node,
struct io_ring_ctx **ctx, bool *uring_locked)
{
@@ -1006,7 +1004,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
if (req->ctx != *ctx) {
if (unlikely(!*uring_locked && *ctx))
- ctx_commit_and_unlock(*ctx);
+ io_cq_unlock_post(*ctx);
ctx_flush_and_put(*ctx, uring_locked);
*ctx = req->ctx;
@@ -1014,7 +1012,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
*uring_locked = mutex_trylock(&(*ctx)->uring_lock);
percpu_ref_get(&(*ctx)->refs);
if (unlikely(!*uring_locked))
- spin_lock(&(*ctx)->completion_lock);
+ io_cq_lock(*ctx);
}
if (likely(*uring_locked)) {
req->io_task_work.func(req, uring_locked);
@@ -1026,7 +1024,7 @@ static void handle_prev_tw_list(struct io_wq_work_node *node,
} while (node);
if (unlikely(!*uring_locked))
- ctx_commit_and_unlock(*ctx);
+ io_cq_unlock_post(*ctx);
}
static void handle_tw_list(struct io_wq_work_node *node,
@@ -1261,10 +1259,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP))
__io_fill_cqe_req(ctx, req);
}
-
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ __io_cq_unlock_post(ctx);
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index bdc62727638b..738fb96575ab 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -24,7 +24,6 @@ void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
void io_req_complete_post(struct io_kiocb *req);
void __io_req_complete_post(struct io_kiocb *req);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-void io_cqring_ev_posted(struct io_ring_ctx *ctx);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
@@ -66,6 +65,14 @@ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
+static inline void io_cq_lock(struct io_ring_ctx *ctx)
+ __acquires(ctx->completion_lock)
+{
+ spin_lock(&ctx->completion_lock);
+}
+
+void io_cq_unlock_post(struct io_ring_ctx *ctx);
+
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 424b2fc858b8..7e2c341f9762 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -617,7 +617,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct io_timeout *timeout, *tmp;
int canceled = 0;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
@@ -627,8 +627,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
canceled++;
}
spin_unlock_irq(&ctx->timeout_lock);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return canceled != 0;
}
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 06/10] io_uring: add io_commit_cqring_flush()
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (4 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 05/10] io_uring: introduce locking helpers for CQE posting Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 07/10] io_uring: opcode independent fixed buf import Pavel Begunkov
` (4 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Since __io_commit_cqring_flush users moved to different files, introduce
io_commit_cqring_flush() helper and encapsulate all flags testing details
inside.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 5 +----
io_uring/io_uring.h | 6 ++++++
io_uring/rw.c | 5 +----
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 82a9e4e2a3e2..0be942ca91c4 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -529,10 +529,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
- ctx->has_evfd))
- __io_commit_cqring_flush(ctx);
-
+ io_commit_cqring_flush(ctx);
io_cqring_wake(ctx);
}
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 738fb96575ab..afca7ff8956c 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -229,4 +229,10 @@ static inline void io_req_add_compl_list(struct io_kiocb *req)
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}
+static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
+{
+ if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
+ __io_commit_cqring_flush(ctx);
+}
+
#endif
diff --git a/io_uring/rw.c b/io_uring/rw.c
index f5567d52d2af..5660b1c95641 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -1015,10 +1015,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
- ctx->has_evfd))
- __io_commit_cqring_flush(ctx);
-
+ io_commit_cqring_flush(ctx);
if (ctx->flags & IORING_SETUP_SQPOLL)
io_cqring_wake(ctx);
}
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 07/10] io_uring: opcode independent fixed buf import
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (5 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 06/10] io_uring: add io_commit_cqring_flush() Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:25 ` [PATCH for-next 08/10] io_uring: move io_import_fixed() Pavel Begunkov
` (3 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Fixed buffers are generic infrastructure, make io_import_fixed() opcode
agnostic.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/rw.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 5660b1c95641..4e5d96040cdc 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -273,14 +273,15 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
return IOU_ISSUE_SKIP_COMPLETE;
}
-static int __io_import_fixed(struct io_kiocb *req, int ddir,
- struct iov_iter *iter, struct io_mapped_ubuf *imu)
+static int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
{
- struct io_rw *rw = io_kiocb_to_cmd(req);
- size_t len = rw->len;
- u64 buf_end, buf_addr = rw->addr;
+ u64 buf_end;
size_t offset;
+ if (WARN_ON_ONCE(!imu))
+ return -EFAULT;
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
@@ -332,14 +333,6 @@ static int __io_import_fixed(struct io_kiocb *req, int ddir,
return 0;
}
-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
- unsigned int issue_flags)
-{
- if (WARN_ON_ONCE(!req->imu))
- return -EFAULT;
- return __io_import_fixed(req, rw, iter, req->imu);
-}
-
#ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
@@ -426,7 +419,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
ssize_t ret;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
- ret = io_import_fixed(req, ddir, iter, issue_flags);
+ ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
if (ret)
return ERR_PTR(ret);
return NULL;
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 08/10] io_uring: move io_import_fixed()
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (6 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 07/10] io_uring: opcode independent fixed buf import Pavel Begunkov
@ 2022-06-20 0:25 ` Pavel Begunkov
2022-06-20 0:26 ` [PATCH for-next 09/10] io_uring: consistent naming for inline completion Pavel Begunkov
` (2 subsequent siblings)
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:25 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Move io_import_fixed() into rsrc.c where it belongs.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/rsrc.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++
io_uring/rsrc.h | 3 +++
io_uring/rw.c | 60 -------------------------------------------------
3 files changed, 63 insertions(+), 60 deletions(-)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index c10c512aa71b..3a2a5ef263f0 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1307,3 +1307,63 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
io_rsrc_node_switch(ctx, NULL);
return ret;
}
+
+int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
+{
+ u64 buf_end;
+ size_t offset;
+
+ if (WARN_ON_ONCE(!imu))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
+ return -EFAULT;
+ /* not inside the mapped region */
+ if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
+ return -EFAULT;
+
+ /*
+ * May not be a start of buffer, set size appropriately
+ * and advance us to the beginning.
+ */
+ offset = buf_addr - imu->ubuf;
+ iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
+
+ if (offset) {
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here, because
+ * we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * first and last bvec
+ *
+ * So just find our index, and adjust the iterator afterwards.
+ * If the offset is within the first bvec (or the whole first
+ * bvec, just use iov_iter_advance(). This makes it easier
+ * since we can just skip the first segment, which may not
+ * be PAGE_SIZE aligned.
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+ if (offset <= bvec->bv_len) {
+ iov_iter_advance(iter, offset);
+ } else {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> PAGE_SHIFT);
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+ iter->count -= bvec->bv_len + offset;
+ iter->iov_offset = offset & ~PAGE_MASK;
+ }
+ }
+
+ return 0;
+}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 03f26516e994..87f58315b247 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -64,6 +64,9 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_data *data_to_kill);
+int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len);
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 4e5d96040cdc..9166d8166b82 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -273,66 +273,6 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
return IOU_ISSUE_SKIP_COMPLETE;
}
-static int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
-{
- u64 buf_end;
- size_t offset;
-
- if (WARN_ON_ONCE(!imu))
- return -EFAULT;
- if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
- return -EFAULT;
- /* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
- return -EFAULT;
-
- /*
- * May not be a start of buffer, set size appropriately
- * and advance us to the beginning.
- */
- offset = buf_addr - imu->ubuf;
- iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
-
- if (offset) {
- /*
- * Don't use iov_iter_advance() here, as it's really slow for
- * using the latter parts of a big fixed buffer - it iterates
- * over each segment manually. We can cheat a bit here, because
- * we know that:
- *
- * 1) it's a BVEC iter, we set it up
- * 2) all bvecs are PAGE_SIZE in size, except potentially the
- * first and last bvec
- *
- * So just find our index, and adjust the iterator afterwards.
- * If the offset is within the first bvec (or the whole first
- * bvec, just use iov_iter_advance(). This makes it easier
- * since we can just skip the first segment, which may not
- * be PAGE_SIZE aligned.
- */
- const struct bio_vec *bvec = imu->bvec;
-
- if (offset <= bvec->bv_len) {
- iov_iter_advance(iter, offset);
- } else {
- unsigned long seg_skip;
-
- /* skip first vec */
- offset -= bvec->bv_len;
- seg_skip = 1 + (offset >> PAGE_SHIFT);
-
- iter->bvec = bvec + seg_skip;
- iter->nr_segs -= seg_skip;
- iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~PAGE_MASK;
- }
- }
-
- return 0;
-}
-
#ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
unsigned int issue_flags)
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 09/10] io_uring: consistent naming for inline completion
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (7 preceding siblings ...)
2022-06-20 0:25 ` [PATCH for-next 08/10] io_uring: move io_import_fixed() Pavel Begunkov
@ 2022-06-20 0:26 ` Pavel Begunkov
2022-06-20 0:26 ` [PATCH for-next 10/10] io_uring: add an warn_once for poll_find Pavel Begunkov
2022-06-20 11:52 ` [PATCH for-next 00/10] 5.20 patches Jens Axboe
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:26 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
Improve naming of the inline/deferred completion helper so it's
consistent with it's *_post counterpart. Add some comments and extra
lockdeps to ensure the locking is done right.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/io_uring.c | 4 ++--
io_uring/io_uring.h | 10 +++++++++-
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 0be942ca91c4..afda42246d12 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1380,7 +1380,7 @@ void io_req_task_complete(struct io_kiocb *req, bool *locked)
}
if (*locked)
- io_req_add_compl_list(req);
+ io_req_complete_defer(req);
else
io_req_complete_post(req);
}
@@ -1648,7 +1648,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (ret == IOU_OK) {
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
- io_req_add_compl_list(req);
+ io_req_complete_defer(req);
else
io_req_complete_post(req);
} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index afca7ff8956c..7a00bbe85d35 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -222,10 +222,18 @@ static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
}
}
-static inline void io_req_add_compl_list(struct io_kiocb *req)
+/*
+ * Don't complete immediately but use deferred completion infrastructure.
+ * Protected by ->uring_lock and can only be used either with
+ * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
+ */
+static inline void io_req_complete_defer(struct io_kiocb *req)
+ __must_hold(&req->ctx->uring_lock)
{
struct io_submit_state *state = &req->ctx->submit_state;
+ lockdep_assert_held(&req->ctx->uring_lock);
+
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH for-next 10/10] io_uring: add an warn_once for poll_find
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (8 preceding siblings ...)
2022-06-20 0:26 ` [PATCH for-next 09/10] io_uring: consistent naming for inline completion Pavel Begunkov
@ 2022-06-20 0:26 ` Pavel Begunkov
2022-06-20 11:52 ` [PATCH for-next 00/10] 5.20 patches Jens Axboe
10 siblings, 0 replies; 13+ messages in thread
From: Pavel Begunkov @ 2022-06-20 0:26 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe, asml.silence
io_poll_remove() expects poll_find() to search only for poll requests
and passes a flag for this. Just be a little bit extra cautious
considering lots of recent poll/cancellation changes and add a
WARN_ON_ONCE checking that we don't get an apoll'ed request.
Signed-off-by: Pavel Begunkov <[email protected]>
---
io_uring/poll.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 9af6a34222a9..8f4fff76d3b4 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -827,6 +827,11 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
}
found:
+ if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
if (poll_update->update_events || poll_update->update_user_data) {
/* only mask one event flags, keep behavior flags */
if (poll_update->update_events) {
--
2.36.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH for-next 00/10] 5.20 patches
2022-06-20 0:25 [PATCH for-next 00/10] 5.20 patches Pavel Begunkov
` (9 preceding siblings ...)
2022-06-20 0:26 ` [PATCH for-next 10/10] io_uring: add an warn_once for poll_find Pavel Begunkov
@ 2022-06-20 11:52 ` Jens Axboe
10 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2022-06-20 11:52 UTC (permalink / raw)
To: io-uring, asml.silence
On Mon, 20 Jun 2022 01:25:51 +0100, Pavel Begunkov wrote:
> 1/10 fixes multi ring cancellation bugs
> 2-3 are cleanups
> 4-6 is a resend of the dropped patches
> 7-10 are rebased cleanups
>
> Pavel Begunkov (10):
> io_uring: fix multi ctx cancellation
> io_uring: improve task exit timeout cancellations
> io_uring: fix io_poll_remove_all clang warnings
> io_uring: hide eventfd assumptions in evenfd paths
> io_uring: introduce locking helpers for CQE posting
> io_uring: add io_commit_cqring_flush()
> io_uring: opcode independent fixed buf import
> io_uring: move io_import_fixed()
> io_uring: consistent naming for inline completion
> io_uring: add an warn_once for poll_find
>
> [...]
Applied, thanks!
[01/10] io_uring: fix multi ctx cancellation
commit: 45987e01342c884ff15f180e1c5f3bfc6d5ee50f
[02/10] io_uring: improve task exit timeout cancellations
commit: 23641c3094a7e57eb3a61544b76586a4e2980c2d
[03/10] io_uring: fix io_poll_remove_all clang warnings
commit: e67910197b4844701d17439ab867ab8a08425ce6
[04/10] io_uring: hide eventfd assumptions in evenfd paths
commit: 8ac9127be60bf7de7efcee71bba0fd08bb3573fd
[05/10] io_uring: introduce locking helpers for CQE posting
commit: d88cbb474bb52fcced34e5ebc47de4521f98713f
[06/10] io_uring: add io_commit_cqring_flush()
commit: fe435d183d95618149aedd19c4ccf141ff74b875
[07/10] io_uring: opcode independent fixed buf import
commit: a708de4e48daf9e667f9e9a983c3a432614202ba
[08/10] io_uring: move io_import_fixed()
commit: d9b631c2d3d437792d1cdb9576f79b809e4b4ada
[09/10] io_uring: consistent naming for inline completion
commit: f7605b87fdcf73a569c71ea74e346f239b48c7d3
[10/10] io_uring: add an warn_once for poll_find
commit: d0093035a00357ff45bc52ed8f2c40c40e1de8c5
Best regards,
--
Jens Axboe
^ permalink raw reply [flat|nested] 13+ messages in thread