* [PATCHSET next 0/5] Extend cancelation support @ 2022-04-15 13:33 Jens Axboe 2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe ` (4 more replies) 0 siblings, 5 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring Hi, We currently only support looking up and canceling requests based on the user_data of the original request. Sometimes it can be useful to instead key off the fd used in the original request, eg if a socket goes away. Patch 1 is just a cleanup spotted while doing this, 2+3 are prep patches, patch 4 adds support for IORING_ASYNC_CANCEL_ALL, and finally patch 5 adds support for IORING_ASYNC_CANCEL_FD. If IORING_ASYNC_CANCEL_ALL is set, all requests matching the given criteria are canceled. Return value is -ENOENT if none were found, or a positive return indicating how many requests were found and canceled. If IORING_ASYNC_CANCEL_FD is set, requests matching sqe->fd are canceled rather than matching on sqe->addr for user_data. v2: - Add IORING_ASYNC_CANCEL_ALL - Minor fixes for IORING_ASYNC_CANCEL_FD -- Jens Axboe ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe @ 2022-04-15 13:33 ` Jens Axboe 2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe ` (3 subsequent siblings) 4 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe It's only called from one location, and it always passes in 'false'. Kill the argument, and just pass in 'false' to io_poll_find(). Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d3fc0c5b4e82..878d30a31606 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6316,11 +6316,10 @@ static bool io_poll_disarm(struct io_kiocb *req) return true; } -static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr, - bool poll_only) +static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); + struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false); if (!req) return -ENOENT; @@ -6808,7 +6807,7 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) return 0; spin_lock(&ctx->completion_lock); - ret = io_poll_cancel(ctx, sqe_addr, false); + ret = io_poll_cancel(ctx, sqe_addr); if (ret != -ENOENT) goto out; -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe 2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe @ 2022-04-15 13:33 ` Jens Axboe 2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe ` (2 subsequent siblings) 4 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe In preparation for being able to not only key cancel off the user_data, pass in the io_cancel_data struct. Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 878d30a31606..a45ab678a455 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6316,10 +6316,15 @@ static bool io_poll_disarm(struct io_kiocb *req) return true; } -static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +struct io_cancel_data { + struct io_ring_ctx *ctx; + u64 user_data; +}; + +static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false); + struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false); if (!req) return -ENOENT; @@ -6752,11 +6757,6 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) return 0; } -struct io_cancel_data { - struct io_ring_ctx *ctx; - u64 user_data; -}; - static bool io_cancel_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); @@ -6765,17 +6765,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data; } -static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, - struct io_ring_ctx *ctx) +static int io_async_cancel_one(struct io_uring_task *tctx, + struct io_cancel_data *cd) { - struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, }; enum io_wq_cancel cancel_ret; int ret = 0; if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6791,14 +6790,14 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, return ret; } -static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) +static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) { struct io_ring_ctx *ctx = req->ctx; int ret; WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); - ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); + ret = io_async_cancel_one(req->task->io_uring, cd); /* * Fall-through even for -EALREADY, as we may have poll armed * that need unarming. @@ -6807,12 +6806,12 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) return 0; spin_lock(&ctx->completion_lock); - ret = io_poll_cancel(ctx, sqe_addr); + ret = io_poll_cancel(ctx, cd); if (ret != -ENOENT) goto out; spin_lock_irq(&ctx->timeout_lock); - ret = io_timeout_cancel(ctx, sqe_addr); + ret = io_timeout_cancel(ctx, cd->user_data); spin_unlock_irq(&ctx->timeout_lock); out: spin_unlock(&ctx->completion_lock); @@ -6837,11 +6836,14 @@ static int io_async_cancel_prep(struct io_kiocb *req, static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - u64 sqe_addr = req->cancel.addr; + struct io_cancel_data cd = { + .ctx = ctx, + .user_data = req->cancel.addr, + }; struct io_tctx_node *node; int ret; - ret = io_try_cancel_userdata(req, sqe_addr); + ret = io_try_cancel(req, &cd); if (ret != -ENOENT) goto done; @@ -6851,7 +6853,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; - ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); + ret = io_async_cancel_one(tctx, &cd); if (ret != -ENOENT) break; } @@ -7455,8 +7457,14 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) int ret = -ENOENT; if (prev) { - if (!(req->task->flags & PF_EXITING)) - ret = io_try_cancel_userdata(req, prev->cqe.user_data); + if (!(req->task->flags & PF_EXITING)) { + struct io_cancel_data cd = { + .ctx = req->ctx, + .user_data = prev->cqe.user_data, + }; + + ret = io_try_cancel(req, &cd); + } io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe 2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe 2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe @ 2022-04-15 13:33 ` Jens Axboe 2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe 2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe 4 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe In preparation for putting other data in there than just the user_data, rename it to a data. Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a45ab678a455..6dcf3ad7ee99 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6318,13 +6318,13 @@ static bool io_poll_disarm(struct io_kiocb *req) struct io_cancel_data { struct io_ring_ctx *ctx; - u64 user_data; + u64 data; }; static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false); + struct io_kiocb *req = io_poll_find(ctx, cd->data, false); if (!req) return -ENOENT; @@ -6762,7 +6762,7 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_cancel_data *cd = data; - return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data; + return req->ctx == cd->ctx && req->cqe.user_data == cd->data; } static int io_async_cancel_one(struct io_uring_task *tctx, @@ -6811,7 +6811,7 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) goto out; spin_lock_irq(&ctx->timeout_lock); - ret = io_timeout_cancel(ctx, cd->user_data); + ret = io_timeout_cancel(ctx, cd->data); spin_unlock_irq(&ctx->timeout_lock); out: spin_unlock(&ctx->completion_lock); @@ -6837,8 +6837,8 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; struct io_cancel_data cd = { - .ctx = ctx, - .user_data = req->cancel.addr, + .ctx = ctx, + .data = req->cancel.addr, }; struct io_tctx_node *node; int ret; @@ -7459,8 +7459,8 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) if (prev) { if (!(req->task->flags & PF_EXITING)) { struct io_cancel_data cd = { - .ctx = req->ctx, - .user_data = prev->cqe.user_data, + .ctx = req->ctx, + .data = prev->cqe.user_data, }; ret = io_try_cancel(req, &cd); -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe ` (2 preceding siblings ...) 2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe @ 2022-04-15 13:33 ` Jens Axboe 2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe 4 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe The current cancelation will lookup and cancel the first request it finds based on the key passed in. Add a flag that allows to cancel any request that matches they key. It completes with either -ENOENT if none were found, or res > 0 for the number of entries canceled. Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 60 +++++++++++++++++++++++++---------- include/uapi/linux/io_uring.h | 7 ++++ 2 files changed, 50 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6dcf3ad7ee99..c7e5d60fbbe5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -568,6 +568,7 @@ struct io_sync { struct io_cancel { struct file *file; u64 addr; + u32 flags; }; struct io_timeout { @@ -6319,6 +6320,7 @@ static bool io_poll_disarm(struct io_kiocb *req) struct io_cancel_data { struct io_ring_ctx *ctx; u64 data; + u32 flags; }; static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) @@ -6774,7 +6776,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx, if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, + cd->flags & IORING_ASYNC_CANCEL_ALL); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6825,27 +6828,34 @@ static int io_async_cancel_prep(struct io_kiocb *req, return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; - if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags || - sqe->splice_fd_in) + if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); + req->cancel.flags = READ_ONCE(sqe->cancel_flags); + if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + return -EINVAL; + return 0; } -static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req, + unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - struct io_cancel_data cd = { - .ctx = ctx, - .data = req->cancel.addr, - }; + bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL; + struct io_ring_ctx *ctx = cd->ctx; struct io_tctx_node *node; - int ret; + int ret, nr = 0; - ret = io_try_cancel(req, &cd); - if (ret != -ENOENT) - goto done; + do { + ret = io_try_cancel(req, cd); + if (ret == -ENOENT) + break; + if (!cancel_all) + return ret; + nr++; + io_run_task_work(); + } while (1); /* slow path, try all io-wq's */ io_ring_submit_lock(ctx, issue_flags); @@ -6853,12 +6863,28 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; - ret = io_async_cancel_one(tctx, &cd); - if (ret != -ENOENT) - break; + ret = io_async_cancel_one(tctx, cd); + if (ret != -ENOENT) { + if (!cancel_all) + break; + nr++; + io_run_task_work(); + } } io_ring_submit_unlock(ctx, issue_flags); -done: + return cancel_all ? nr : ret; +} + +static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_cancel_data cd = { + .ctx = req->ctx, + .data = req->cancel.addr, + .flags = req->cancel.flags, + }; + int ret; + + ret = __io_async_cancel(&cd, req, issue_flags); if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1845cf7c80ba..476e58a2837f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -187,6 +187,13 @@ enum { #define IORING_POLL_UPDATE_EVENTS (1U << 1) #define IORING_POLL_UPDATE_USER_DATA (1U << 2) +/* + * ASYNC_CANCEL flags. + * + * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + */ +#define IORING_ASYNC_CANCEL_ALL (1U << 0) + /* * IO completion data structure (Completion Queue Entry) */ -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe ` (3 preceding siblings ...) 2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe @ 2022-04-15 13:33 ` Jens Axboe 4 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe Currently sqe->addr must contain the user_data of the request being canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the kernel that we're keying off the file fd instead for cancelation. This allows canceling any request that a) uses a file, and b) was assigned the file based on the value being passed in. Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 65 ++++++++++++++++++++++++++++++----- include/uapi/linux/io_uring.h | 3 ++ 2 files changed, 60 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c7e5d60fbbe5..4a3dbdb36d0d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -569,6 +569,7 @@ struct io_cancel { struct file *file; u64 addr; u32 flags; + s32 fd; }; struct io_timeout { @@ -6307,6 +6308,26 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr, return NULL; } +static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, + struct file *file) + __must_hold(&ctx->completion_lock) +{ + struct io_kiocb *req; + int i; + + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry(req, list, hash_node) { + if (req->file != file) + continue; + return req; + } + } + return NULL; +} + static bool io_poll_disarm(struct io_kiocb *req) __must_hold(&ctx->completion_lock) { @@ -6319,15 +6340,22 @@ static bool io_poll_disarm(struct io_kiocb *req) struct io_cancel_data { struct io_ring_ctx *ctx; - u64 data; + union { + u64 data; + struct file *file; + }; u32 flags; }; static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, cd->data, false); + struct io_kiocb *req; + if (cd->flags & IORING_ASYNC_CANCEL_FD) + req = io_poll_file_find(ctx, cd->file); + else + req = io_poll_find(ctx, cd->data, false); if (!req) return -ENOENT; io_poll_cancel_req(req); @@ -6764,7 +6792,11 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data) struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_cancel_data *cd = data; - return req->ctx == cd->ctx && req->cqe.user_data == cd->data; + if (req->ctx != cd->ctx) + return false; + if (cd->flags & IORING_ASYNC_CANCEL_FD) + return req->file == cd->file; + return req->cqe.user_data == cd->data; } static int io_async_cancel_one(struct io_uring_task *tctx, @@ -6813,9 +6845,11 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) if (ret != -ENOENT) goto out; - spin_lock_irq(&ctx->timeout_lock); - ret = io_timeout_cancel(ctx, cd->data); - spin_unlock_irq(&ctx->timeout_lock); + if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) { + spin_lock_irq(&ctx->timeout_lock); + ret = io_timeout_cancel(ctx, cd->data); + spin_unlock_irq(&ctx->timeout_lock); + } out: spin_unlock(&ctx->completion_lock); return ret; @@ -6826,15 +6860,17 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); req->cancel.flags = READ_ONCE(sqe->cancel_flags); - if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL) + if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD)) return -EINVAL; + if (req->cancel.flags & IORING_ASYNC_CANCEL_FD) + req->cancel.fd = READ_ONCE(sqe->fd); return 0; } @@ -6884,7 +6920,20 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) }; int ret; + if (cd.flags & IORING_ASYNC_CANCEL_FD) { + if (req->flags & REQ_F_FIXED_FILE) + req->file = io_file_get_fixed(req, req->cancel.fd, issue_flags); + else + req->file = io_file_get_normal(req, req->cancel.fd); + if (!req->file) { + ret = -EBADF; + goto done; + } + cd.file = req->file; + } + ret = __io_async_cancel(&cd, req, issue_flags); +done: if (ret < 0) req_set_fail(req); io_req_complete_post(req, ret, 0); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 476e58a2837f..cc7fe82a1798 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -191,8 +191,11 @@ enum { * ASYNC_CANCEL flags. * * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key + * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the + * request 'user_data' */ #define IORING_ASYNC_CANCEL_ALL (1U << 0) +#define IORING_ASYNC_CANCEL_FD (1U << 1) /* * IO completion data structure (Completion Queue Entry) -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCHSET v4 next 0/5] Extend cancelation support @ 2022-04-18 16:43 Jens Axboe 2022-04-18 16:43 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe 0 siblings, 1 reply; 7+ messages in thread From: Jens Axboe @ 2022-04-18 16:43 UTC (permalink / raw) To: io-uring Hi, We currently only support looking up and canceling requests based on the user_data of the original request. Sometimes it can be useful to instead key off the fd used in the original request, eg if a socket goes away. Patch 1 is just a cleanup spotted while doing this, 2 is a prep patch, patch 3 adds support for IORING_ASYNC_CANCEL_ALL, patch 4 adds support for IORING_ASYNC_CANCEL_FD, and patch 5 adds support for matching any request (useful with CANCEL_ALL). If IORING_ASYNC_CANCEL_ALL is set, all requests matching the given criteria are canceled. Return value is number of requests canceled, and 0 if none were found, or any error encountered canceling requests. If IORING_ASYNC_CANCEL_FD is set, requests matching sqe->fd are canceled rather than matching on sqe->addr for user_data. If IORING_ASYNC_CANCEL_ANY is set, all requests are matched vs using the fd or user_data key. There's some support in the below liburing branch: https://git.kernel.dk/cgit/liburing/log/?h=cancel-fd-all which also has various test cases. v4: - Minor cleanups - Rebase on current tree - Add IORING_ASYNC_CANCEL_ANY -- Jens Axboe ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently 2022-04-18 16:43 [PATCHSET v4 next 0/5] Extend cancelation support Jens Axboe @ 2022-04-18 16:43 ` Jens Axboe 0 siblings, 0 replies; 7+ messages in thread From: Jens Axboe @ 2022-04-18 16:43 UTC (permalink / raw) To: io-uring; +Cc: Jens Axboe In preparation for being able to not only key cancel off the user_data, pass in the io_cancel_data struct for the various functions that deal with request cancelation. Signed-off-by: Jens Axboe <[email protected]> --- fs/io_uring.c | 76 +++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c0f8c5b15f2f..eab464e0c323 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -971,6 +971,11 @@ struct io_defer_entry { u32 seq; }; +struct io_cancel_data { + struct io_ring_ctx *ctx; + u64 data; +}; + struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; @@ -6254,16 +6259,16 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, return found; } -static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr, - bool poll_only) +static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, + struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { struct hlist_head *list; struct io_kiocb *req; - list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)]; + list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)]; hlist_for_each_entry(req, list, hash_node) { - if (sqe_addr != req->cqe.user_data) + if (cd->data != req->cqe.user_data) continue; if (poll_only && req->opcode != IORING_OP_POLL_ADD) continue; @@ -6282,10 +6287,10 @@ static bool io_poll_disarm(struct io_kiocb *req) return true; } -static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) +static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) { - struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false); + struct io_kiocb *req = io_poll_find(ctx, false, cd); if (!req) return -ENOENT; @@ -6377,13 +6382,14 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) { + struct io_cancel_data cd = { .data = req->poll_update.old_user_data, }; struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *preq; int ret2, ret = 0; bool locked; spin_lock(&ctx->completion_lock); - preq = io_poll_find(ctx, req->poll_update.old_user_data, true); + preq = io_poll_find(ctx, true, &cd); if (!preq || !io_poll_disarm(preq)) { spin_unlock(&ctx->completion_lock); ret = preq ? -EALREADY : -ENOENT; @@ -6443,7 +6449,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) } static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, - __u64 user_data) + struct io_cancel_data *cd) __must_hold(&ctx->timeout_lock) { struct io_timeout_data *io; @@ -6451,7 +6457,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, bool found = false; list_for_each_entry(req, &ctx->timeout_list, timeout.list) { - found = user_data == req->cqe.user_data; + found = cd->data == req->cqe.user_data; if (found) break; } @@ -6465,11 +6471,11 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, return req; } -static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) +static int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) __must_hold(&ctx->completion_lock) __must_hold(&ctx->timeout_lock) { - struct io_kiocb *req = io_timeout_extract(ctx, user_data); + struct io_kiocb *req = io_timeout_extract(ctx, cd); if (IS_ERR(req)) return PTR_ERR(req); @@ -6522,7 +6528,8 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, struct timespec64 *ts, enum hrtimer_mode mode) __must_hold(&ctx->timeout_lock) { - struct io_kiocb *req = io_timeout_extract(ctx, user_data); + struct io_cancel_data cd = { .data = user_data, }; + struct io_kiocb *req = io_timeout_extract(ctx, &cd); struct io_timeout_data *data; if (IS_ERR(req)) @@ -6587,9 +6594,11 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) int ret; if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { + struct io_cancel_data cd = { .data = tr->addr, }; + spin_lock(&ctx->completion_lock); spin_lock_irq(&ctx->timeout_lock); - ret = io_timeout_cancel(ctx, tr->addr); + ret = io_timeout_cancel(ctx, &cd); spin_unlock_irq(&ctx->timeout_lock); spin_unlock(&ctx->completion_lock); } else { @@ -6718,30 +6727,24 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) return 0; } -struct io_cancel_data { - struct io_ring_ctx *ctx; - u64 user_data; -}; - static bool io_cancel_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_cancel_data *cd = data; - return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data; + return req->ctx == cd->ctx && req->cqe.user_data == cd->data; } -static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, - struct io_ring_ctx *ctx) +static int io_async_cancel_one(struct io_uring_task *tctx, + struct io_cancel_data *cd) { - struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, }; enum io_wq_cancel cancel_ret; int ret = 0; if (!tctx || !tctx->io_wq) return -ENOENT; - cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false); + cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -6757,14 +6760,14 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data, return ret; } -static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) +static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) { struct io_ring_ctx *ctx = req->ctx; int ret; WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); - ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); + ret = io_async_cancel_one(req->task->io_uring, cd); /* * Fall-through even for -EALREADY, as we may have poll armed * that need unarming. @@ -6773,12 +6776,12 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) return 0; spin_lock(&ctx->completion_lock); - ret = io_poll_cancel(ctx, sqe_addr); + ret = io_poll_cancel(ctx, cd); if (ret != -ENOENT) goto out; spin_lock_irq(&ctx->timeout_lock); - ret = io_timeout_cancel(ctx, sqe_addr); + ret = io_timeout_cancel(ctx, cd); spin_unlock_irq(&ctx->timeout_lock); out: spin_unlock(&ctx->completion_lock); @@ -6803,11 +6806,14 @@ static int io_async_cancel_prep(struct io_kiocb *req, static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - u64 sqe_addr = req->cancel.addr; + struct io_cancel_data cd = { + .ctx = ctx, + .data = req->cancel.addr, + }; struct io_tctx_node *node; int ret; - ret = io_try_cancel_userdata(req, sqe_addr); + ret = io_try_cancel(req, &cd); if (ret != -ENOENT) goto done; @@ -6817,7 +6823,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; - ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); + ret = io_async_cancel_one(tctx, &cd); if (ret != -ENOENT) break; } @@ -7419,8 +7425,14 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) int ret = -ENOENT; if (prev) { - if (!(req->task->flags & PF_EXITING)) - ret = io_try_cancel_userdata(req, prev->cqe.user_data); + if (!(req->task->flags & PF_EXITING)) { + struct io_cancel_data cd = { + .ctx = req->ctx, + .data = prev->cqe.user_data, + }; + + ret = io_try_cancel(req, &cd); + } io_req_complete_post(req, ret ?: -ETIME, 0); io_put_req(prev); } else { -- 2.35.1 ^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-04-18 16:44 UTC | newest] Thread overview: 7+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe 2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe 2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe 2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe 2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe 2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe -- strict thread matches above, loose matches on Subject: below -- 2022-04-18 16:43 [PATCHSET v4 next 0/5] Extend cancelation support Jens Axboe 2022-04-18 16:43 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox