* [PATCHSET next 0/4] Allow cancelation based on fd
@ 2022-04-14 20:24 Jens Axboe
2022-04-14 20:24 ` [PATCH 1/4] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-14 20:24 UTC (permalink / raw)
To: io-uring
Hi,
We currently only support looking up and canceling requests based on
the user_data of the original request. Sometimes it can be useful to
instead key off the fd used in the original request, eg if a socket
goes away.
Patch 1 is just a cleanup spotted while doing this, 2+3 are prep patches,
and patch 4 then finally is the meat of it.
Adds IORING_ASYNC_CANCEL_FD as a IORING_OP_ASYNC_CANCEL request flag,
which tells the kernel that we're looking up by fd rather than user_data.
--
Jens Axboe
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/4] io_uring: remove dead 'poll_only' argument to io_poll_cancel()
2022-04-14 20:24 [PATCHSET next 0/4] Allow cancelation based on fd Jens Axboe
@ 2022-04-14 20:24 ` Jens Axboe
2022-04-14 20:24 ` [PATCH 2/4] io_uring: pass in struct io_cancel_data consistently Jens Axboe
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-14 20:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
It's only called from one location, and it always passes in 'false'.
Kill the argument, and just pass in 'false' to io_poll_find().
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d3fc0c5b4e82..878d30a31606 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6316,11 +6316,10 @@ static bool io_poll_disarm(struct io_kiocb *req)
return true;
}
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
- bool poll_only)
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
__must_hold(&ctx->completion_lock)
{
- struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
+ struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false);
if (!req)
return -ENOENT;
@@ -6808,7 +6807,7 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
return 0;
spin_lock(&ctx->completion_lock);
- ret = io_poll_cancel(ctx, sqe_addr, false);
+ ret = io_poll_cancel(ctx, sqe_addr);
if (ret != -ENOENT)
goto out;
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/4] io_uring: pass in struct io_cancel_data consistently
2022-04-14 20:24 [PATCHSET next 0/4] Allow cancelation based on fd Jens Axboe
2022-04-14 20:24 ` [PATCH 1/4] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
@ 2022-04-14 20:24 ` Jens Axboe
2022-04-14 20:24 ` [PATCH 3/4] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
2022-04-14 20:24 ` [PATCH 4/4] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-14 20:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
In preparation for being able to not only key cancel off the user_data,
pass in the io_cancel_data struct.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 49 +++++++++++++++++++++++++++++--------------------
1 file changed, 29 insertions(+), 20 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 878d30a31606..c3955b9709c6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6316,10 +6316,15 @@ static bool io_poll_disarm(struct io_kiocb *req)
return true;
}
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+struct io_cancel_data {
+ struct io_ring_ctx *ctx;
+ u64 user_data;
+};
+
+static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
__must_hold(&ctx->completion_lock)
{
- struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false);
+ struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false);
if (!req)
return -ENOENT;
@@ -6752,11 +6757,6 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
-struct io_cancel_data {
- struct io_ring_ctx *ctx;
- u64 user_data;
-};
-
static bool io_cancel_cb(struct io_wq_work *work, void *data)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
@@ -6765,17 +6765,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
}
-static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
- struct io_ring_ctx *ctx)
+static int io_async_cancel_one(struct io_uring_task *tctx,
+ struct io_cancel_data *cd)
{
- struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
enum io_wq_cancel cancel_ret;
int ret = 0;
if (!tctx || !tctx->io_wq)
return -ENOENT;
- cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
+ cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false);
switch (cancel_ret) {
case IO_WQ_CANCEL_OK:
ret = 0;
@@ -6791,14 +6790,15 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
return ret;
}
-static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
+static int io_try_cancel_userdata(struct io_kiocb *req,
+ struct io_cancel_data *cd)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
- ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
+ ret = io_async_cancel_one(req->task->io_uring, cd);
/*
* Fall-through even for -EALREADY, as we may have poll armed
* that need unarming.
@@ -6807,12 +6807,12 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
return 0;
spin_lock(&ctx->completion_lock);
- ret = io_poll_cancel(ctx, sqe_addr);
+ ret = io_poll_cancel(ctx, cd);
if (ret != -ENOENT)
goto out;
spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, sqe_addr);
+ ret = io_timeout_cancel(ctx, cd->user_data);
spin_unlock_irq(&ctx->timeout_lock);
out:
spin_unlock(&ctx->completion_lock);
@@ -6837,11 +6837,14 @@ static int io_async_cancel_prep(struct io_kiocb *req,
static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- u64 sqe_addr = req->cancel.addr;
+ struct io_cancel_data cd = {
+ .ctx = ctx,
+ .user_data = req->cancel.addr,
+ };
struct io_tctx_node *node;
int ret;
- ret = io_try_cancel_userdata(req, sqe_addr);
+ ret = io_try_cancel_userdata(req, &cd);
if (ret != -ENOENT)
goto done;
@@ -6851,7 +6854,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
struct io_uring_task *tctx = node->task->io_uring;
- ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
+ ret = io_async_cancel_one(tctx, &cd);
if (ret != -ENOENT)
break;
}
@@ -7455,8 +7458,14 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
int ret = -ENOENT;
if (prev) {
- if (!(req->task->flags & PF_EXITING))
- ret = io_try_cancel_userdata(req, prev->cqe.user_data);
+ if (!(req->task->flags & PF_EXITING)) {
+ struct io_cancel_data cd = {
+ .ctx = req->ctx,
+ .user_data = prev->cqe.user_data,
+ };
+
+ ret = io_try_cancel_userdata(req, &cd);
+ }
io_req_complete_post(req, ret ?: -ETIME, 0);
io_put_req(prev);
} else {
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/4] io_uring: rename io_cancel_data->user_data to just 'data'
2022-04-14 20:24 [PATCHSET next 0/4] Allow cancelation based on fd Jens Axboe
2022-04-14 20:24 ` [PATCH 1/4] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
2022-04-14 20:24 ` [PATCH 2/4] io_uring: pass in struct io_cancel_data consistently Jens Axboe
@ 2022-04-14 20:24 ` Jens Axboe
2022-04-14 20:24 ` [PATCH 4/4] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-14 20:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
In preparation for putting other data in there than just the user_data,
rename it to a data.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c3955b9709c6..0ef8401b6552 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6318,13 +6318,13 @@ static bool io_poll_disarm(struct io_kiocb *req)
struct io_cancel_data {
struct io_ring_ctx *ctx;
- u64 user_data;
+ u64 data;
};
static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
__must_hold(&ctx->completion_lock)
{
- struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false);
+ struct io_kiocb *req = io_poll_find(ctx, cd->data, false);
if (!req)
return -ENOENT;
@@ -6762,7 +6762,7 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_cancel_data *cd = data;
- return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
+ return req->ctx == cd->ctx && req->cqe.user_data == cd->data;
}
static int io_async_cancel_one(struct io_uring_task *tctx,
@@ -6812,7 +6812,7 @@ static int io_try_cancel_userdata(struct io_kiocb *req,
goto out;
spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, cd->user_data);
+ ret = io_timeout_cancel(ctx, cd->data);
spin_unlock_irq(&ctx->timeout_lock);
out:
spin_unlock(&ctx->completion_lock);
@@ -6838,8 +6838,8 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_cancel_data cd = {
- .ctx = ctx,
- .user_data = req->cancel.addr,
+ .ctx = ctx,
+ .data = req->cancel.addr,
};
struct io_tctx_node *node;
int ret;
@@ -7460,8 +7460,8 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
if (prev) {
if (!(req->task->flags & PF_EXITING)) {
struct io_cancel_data cd = {
- .ctx = req->ctx,
- .user_data = prev->cqe.user_data,
+ .ctx = req->ctx,
+ .data = prev->cqe.user_data,
};
ret = io_try_cancel_userdata(req, &cd);
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 4/4] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key
2022-04-14 20:24 [PATCHSET next 0/4] Allow cancelation based on fd Jens Axboe
` (2 preceding siblings ...)
2022-04-14 20:24 ` [PATCH 3/4] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
@ 2022-04-14 20:24 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-14 20:24 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Currently sqe->addr must contain the user_data of the request being
canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the
kernel that we're keying off the file fd instead for cancelation. This
allows canceling any request that a) uses a file, and b) was assigned the
file based on the value being passed in.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 58 +++++++++++++++++++++++++++++------
include/uapi/linux/io_uring.h | 8 +++++
2 files changed, 56 insertions(+), 10 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0ef8401b6552..c86a92a975b7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -567,7 +567,8 @@ struct io_sync {
struct io_cancel {
struct file *file;
- u64 addr;
+ u64 data;
+ u32 flags;
};
struct io_timeout {
@@ -6306,6 +6307,25 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
return NULL;
}
+static struct io_kiocb *io_poll_fd_find(struct io_ring_ctx *ctx, int fd)
+ __must_hold(&ctx->completion_lock)
+{
+ struct io_kiocb *req;
+ int i;
+
+ for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+ struct hlist_head *list;
+
+ list = &ctx->cancel_hash[i];
+ hlist_for_each_entry(req, list, hash_node) {
+ if (!req->file || fd != req->cqe.fd)
+ continue;
+ return req;
+ }
+ }
+ return NULL;
+}
+
static bool io_poll_disarm(struct io_kiocb *req)
__must_hold(&ctx->completion_lock)
{
@@ -6319,13 +6339,18 @@ static bool io_poll_disarm(struct io_kiocb *req)
struct io_cancel_data {
struct io_ring_ctx *ctx;
u64 data;
+ unsigned int flags;
};
static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
__must_hold(&ctx->completion_lock)
{
- struct io_kiocb *req = io_poll_find(ctx, cd->data, false);
+ struct io_kiocb *req;
+ if (cd->flags & IORING_ASYNC_CANCEL_FD)
+ req = io_poll_fd_find(ctx, cd->data);
+ else
+ req = io_poll_find(ctx, cd->data, false);
if (!req)
return -ENOENT;
io_poll_cancel_req(req);
@@ -6762,7 +6787,11 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_cancel_data *cd = data;
- return req->ctx == cd->ctx && req->cqe.user_data == cd->data;
+ if (req->ctx != cd->ctx)
+ return false;
+ if (cd->flags & IORING_ASYNC_CANCEL_FD)
+ return req->file && cd->data == req->cqe.fd;
+ return req->cqe.user_data == cd->data;
}
static int io_async_cancel_one(struct io_uring_task *tctx,
@@ -6811,9 +6840,11 @@ static int io_try_cancel_userdata(struct io_kiocb *req,
if (ret != -ENOENT)
goto out;
- spin_lock_irq(&ctx->timeout_lock);
- ret = io_timeout_cancel(ctx, cd->data);
- spin_unlock_irq(&ctx->timeout_lock);
+ if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) {
+ spin_lock_irq(&ctx->timeout_lock);
+ ret = io_timeout_cancel(ctx, cd->data);
+ spin_unlock_irq(&ctx->timeout_lock);
+ }
out:
spin_unlock(&ctx->completion_lock);
return ret;
@@ -6826,11 +6857,17 @@ static int io_async_cancel_prep(struct io_kiocb *req,
return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
- sqe->splice_fd_in)
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
+ return -EINVAL;
+
+ req->cancel.data = READ_ONCE(sqe->addr);
+ req->cancel.flags = READ_ONCE(sqe->cancel_flags);
+ if (req->cancel.flags & ~IORING_ASYNC_CANCEL_FD)
+ return -EINVAL;
+ else if ((req->cancel.flags & IORING_ASYNC_CANCEL_FD) &&
+ req->cancel.data > INT_MAX)
return -EINVAL;
- req->cancel.addr = READ_ONCE(sqe->addr);
return 0;
}
@@ -6839,7 +6876,8 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct io_cancel_data cd = {
.ctx = ctx,
- .data = req->cancel.addr,
+ .data = req->cancel.data,
+ .flags = req->cancel.flags,
};
struct io_tctx_node *node;
int ret;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1845cf7c80ba..806c473dde9f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -187,6 +187,14 @@ enum {
#define IORING_POLL_UPDATE_EVENTS (1U << 1)
#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
+/*
+ * ASYNC_CANCEL flags.
+ *
+ * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
+ * request 'user_data'
+ */
+#define IORING_ASYNC_CANCEL_FD (1U << 0)
+
/*
* IO completion data structure (Completion Queue Entry)
*/
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2022-04-14 20:24 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-04-14 20:24 [PATCHSET next 0/4] Allow cancelation based on fd Jens Axboe
2022-04-14 20:24 ` [PATCH 1/4] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
2022-04-14 20:24 ` [PATCH 2/4] io_uring: pass in struct io_cancel_data consistently Jens Axboe
2022-04-14 20:24 ` [PATCH 3/4] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
2022-04-14 20:24 ` [PATCH 4/4] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox