public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCHSET next 0/5] Extend cancelation support
@ 2022-04-15 13:33 Jens Axboe
  2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring

Hi,

We currently only support looking up and canceling requests based on
the user_data of the original request. Sometimes it can be useful to
instead key off the fd used in the original request, eg if a socket
goes away.

Patch 1 is just a cleanup spotted while doing this, 2+3 are prep patches,
patch 4 adds support for IORING_ASYNC_CANCEL_ALL, and finally patch 5
adds support for IORING_ASYNC_CANCEL_FD.

If IORING_ASYNC_CANCEL_ALL is set, all requests matching the given
criteria are canceled. Return value is -ENOENT if none were found, or
a positive return indicating how many requests were found and canceled.

If IORING_ASYNC_CANCEL_FD is set, requests matching sqe->fd are
canceled rather than matching on sqe->addr for user_data.

v2:
- Add IORING_ASYNC_CANCEL_ALL
- Minor fixes for IORING_ASYNC_CANCEL_FD

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel()
  2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
@ 2022-04-15 13:33 ` Jens Axboe
  2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

It's only called from one location, and it always passes in 'false'.
Kill the argument, and just pass in 'false' to io_poll_find().

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index d3fc0c5b4e82..878d30a31606 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6316,11 +6316,10 @@ static bool io_poll_disarm(struct io_kiocb *req)
 	return true;
 }
 
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
-			  bool poll_only)
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
 	__must_hold(&ctx->completion_lock)
 {
-	struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
+	struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false);
 
 	if (!req)
 		return -ENOENT;
@@ -6808,7 +6807,7 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
 		return 0;
 
 	spin_lock(&ctx->completion_lock);
-	ret = io_poll_cancel(ctx, sqe_addr, false);
+	ret = io_poll_cancel(ctx, sqe_addr);
 	if (ret != -ENOENT)
 		goto out;
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently
  2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
  2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
@ 2022-04-15 13:33 ` Jens Axboe
  2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

In preparation for being able to not only key cancel off the user_data,
pass in the io_cancel_data struct.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 48 ++++++++++++++++++++++++++++--------------------
 1 file changed, 28 insertions(+), 20 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 878d30a31606..a45ab678a455 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6316,10 +6316,15 @@ static bool io_poll_disarm(struct io_kiocb *req)
 	return true;
 }
 
-static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+struct io_cancel_data {
+	struct io_ring_ctx *ctx;
+	u64 user_data;
+};
+
+static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 	__must_hold(&ctx->completion_lock)
 {
-	struct io_kiocb *req = io_poll_find(ctx, sqe_addr, false);
+	struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false);
 
 	if (!req)
 		return -ENOENT;
@@ -6752,11 +6757,6 @@ static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
 	return 0;
 }
 
-struct io_cancel_data {
-	struct io_ring_ctx *ctx;
-	u64 user_data;
-};
-
 static bool io_cancel_cb(struct io_wq_work *work, void *data)
 {
 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
@@ -6765,17 +6765,16 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
 	return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
 }
 
-static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
-			       struct io_ring_ctx *ctx)
+static int io_async_cancel_one(struct io_uring_task *tctx,
+			       struct io_cancel_data *cd)
 {
-	struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
 	enum io_wq_cancel cancel_ret;
 	int ret = 0;
 
 	if (!tctx || !tctx->io_wq)
 		return -ENOENT;
 
-	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
+	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false);
 	switch (cancel_ret) {
 	case IO_WQ_CANCEL_OK:
 		ret = 0;
@@ -6791,14 +6790,14 @@ static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
 	return ret;
 }
 
-static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
+static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	int ret;
 
 	WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
 
-	ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
+	ret = io_async_cancel_one(req->task->io_uring, cd);
 	/*
 	 * Fall-through even for -EALREADY, as we may have poll armed
 	 * that need unarming.
@@ -6807,12 +6806,12 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
 		return 0;
 
 	spin_lock(&ctx->completion_lock);
-	ret = io_poll_cancel(ctx, sqe_addr);
+	ret = io_poll_cancel(ctx, cd);
 	if (ret != -ENOENT)
 		goto out;
 
 	spin_lock_irq(&ctx->timeout_lock);
-	ret = io_timeout_cancel(ctx, sqe_addr);
+	ret = io_timeout_cancel(ctx, cd->user_data);
 	spin_unlock_irq(&ctx->timeout_lock);
 out:
 	spin_unlock(&ctx->completion_lock);
@@ -6837,11 +6836,14 @@ static int io_async_cancel_prep(struct io_kiocb *req,
 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
-	u64 sqe_addr = req->cancel.addr;
+	struct io_cancel_data cd = {
+		.ctx		= ctx,
+		.user_data	= req->cancel.addr,
+	};
 	struct io_tctx_node *node;
 	int ret;
 
-	ret = io_try_cancel_userdata(req, sqe_addr);
+	ret = io_try_cancel(req, &cd);
 	if (ret != -ENOENT)
 		goto done;
 
@@ -6851,7 +6853,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
 		struct io_uring_task *tctx = node->task->io_uring;
 
-		ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
+		ret = io_async_cancel_one(tctx, &cd);
 		if (ret != -ENOENT)
 			break;
 	}
@@ -7455,8 +7457,14 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
 	int ret = -ENOENT;
 
 	if (prev) {
-		if (!(req->task->flags & PF_EXITING))
-			ret = io_try_cancel_userdata(req, prev->cqe.user_data);
+		if (!(req->task->flags & PF_EXITING)) {
+			struct io_cancel_data cd = {
+				.ctx		= req->ctx,
+				.user_data	= prev->cqe.user_data,
+			};
+
+			ret = io_try_cancel(req, &cd);
+		}
 		io_req_complete_post(req, ret ?: -ETIME, 0);
 		io_put_req(prev);
 	} else {
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data'
  2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
  2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
  2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe
@ 2022-04-15 13:33 ` Jens Axboe
  2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe
  2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

In preparation for putting other data in there than just the user_data,
rename it to a data.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index a45ab678a455..6dcf3ad7ee99 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6318,13 +6318,13 @@ static bool io_poll_disarm(struct io_kiocb *req)
 
 struct io_cancel_data {
 	struct io_ring_ctx *ctx;
-	u64 user_data;
+	u64 data;
 };
 
 static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 	__must_hold(&ctx->completion_lock)
 {
-	struct io_kiocb *req = io_poll_find(ctx, cd->user_data, false);
+	struct io_kiocb *req = io_poll_find(ctx, cd->data, false);
 
 	if (!req)
 		return -ENOENT;
@@ -6762,7 +6762,7 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
 	struct io_cancel_data *cd = data;
 
-	return req->ctx == cd->ctx && req->cqe.user_data == cd->user_data;
+	return req->ctx == cd->ctx && req->cqe.user_data == cd->data;
 }
 
 static int io_async_cancel_one(struct io_uring_task *tctx,
@@ -6811,7 +6811,7 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
 		goto out;
 
 	spin_lock_irq(&ctx->timeout_lock);
-	ret = io_timeout_cancel(ctx, cd->user_data);
+	ret = io_timeout_cancel(ctx, cd->data);
 	spin_unlock_irq(&ctx->timeout_lock);
 out:
 	spin_unlock(&ctx->completion_lock);
@@ -6837,8 +6837,8 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_cancel_data cd = {
-		.ctx		= ctx,
-		.user_data	= req->cancel.addr,
+		.ctx	= ctx,
+		.data	= req->cancel.addr,
 	};
 	struct io_tctx_node *node;
 	int ret;
@@ -7459,8 +7459,8 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
 	if (prev) {
 		if (!(req->task->flags & PF_EXITING)) {
 			struct io_cancel_data cd = {
-				.ctx		= req->ctx,
-				.user_data	= prev->cqe.user_data,
+				.ctx	= req->ctx,
+				.data	= prev->cqe.user_data,
 			};
 
 			ret = io_try_cancel(req, &cd);
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL
  2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
                   ` (2 preceding siblings ...)
  2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
@ 2022-04-15 13:33 ` Jens Axboe
  2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

The current cancelation will lookup and cancel the first request it
finds based on the key passed in. Add a flag that allows to cancel any
request that matches they key. It completes with either -ENOENT if none
were found, or res > 0 for the number of entries canceled.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c                 | 60 +++++++++++++++++++++++++----------
 include/uapi/linux/io_uring.h |  7 ++++
 2 files changed, 50 insertions(+), 17 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6dcf3ad7ee99..c7e5d60fbbe5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -568,6 +568,7 @@ struct io_sync {
 struct io_cancel {
 	struct file			*file;
 	u64				addr;
+	u32				flags;
 };
 
 struct io_timeout {
@@ -6319,6 +6320,7 @@ static bool io_poll_disarm(struct io_kiocb *req)
 struct io_cancel_data {
 	struct io_ring_ctx *ctx;
 	u64 data;
+	u32 flags;
 };
 
 static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
@@ -6774,7 +6776,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx,
 	if (!tctx || !tctx->io_wq)
 		return -ENOENT;
 
-	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, false);
+	cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd,
+					cd->flags & IORING_ASYNC_CANCEL_ALL);
 	switch (cancel_ret) {
 	case IO_WQ_CANCEL_OK:
 		ret = 0;
@@ -6825,27 +6828,34 @@ static int io_async_cancel_prep(struct io_kiocb *req,
 		return -EINVAL;
 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
 		return -EINVAL;
-	if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
-	    sqe->splice_fd_in)
+	if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
 		return -EINVAL;
 
 	req->cancel.addr = READ_ONCE(sqe->addr);
+	req->cancel.flags = READ_ONCE(sqe->cancel_flags);
+	if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL)
+		return -EINVAL;
+
 	return 0;
 }
 
-static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
+			     unsigned int issue_flags)
 {
-	struct io_ring_ctx *ctx = req->ctx;
-	struct io_cancel_data cd = {
-		.ctx	= ctx,
-		.data	= req->cancel.addr,
-	};
+	bool cancel_all = cd->flags & IORING_ASYNC_CANCEL_ALL;
+	struct io_ring_ctx *ctx = cd->ctx;
 	struct io_tctx_node *node;
-	int ret;
+	int ret, nr = 0;
 
-	ret = io_try_cancel(req, &cd);
-	if (ret != -ENOENT)
-		goto done;
+	do {
+		ret = io_try_cancel(req, cd);
+		if (ret == -ENOENT)
+			break;
+		if (!cancel_all)
+			return ret;
+		nr++;
+		io_run_task_work();
+	} while (1);
 
 	/* slow path, try all io-wq's */
 	io_ring_submit_lock(ctx, issue_flags);
@@ -6853,12 +6863,28 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
 		struct io_uring_task *tctx = node->task->io_uring;
 
-		ret = io_async_cancel_one(tctx, &cd);
-		if (ret != -ENOENT)
-			break;
+		ret = io_async_cancel_one(tctx, cd);
+		if (ret != -ENOENT) {
+			if (!cancel_all)
+				break;
+			nr++;
+			io_run_task_work();
+		}
 	}
 	io_ring_submit_unlock(ctx, issue_flags);
-done:
+	return cancel_all ? nr : ret;
+}
+
+static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
+{
+	struct io_cancel_data cd = {
+		.ctx	= req->ctx,
+		.data	= req->cancel.addr,
+		.flags	= req->cancel.flags,
+	};
+	int ret;
+
+	ret = __io_async_cancel(&cd, req, issue_flags);
 	if (ret < 0)
 		req_set_fail(req);
 	io_req_complete_post(req, ret, 0);
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1845cf7c80ba..476e58a2837f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -187,6 +187,13 @@ enum {
 #define IORING_POLL_UPDATE_EVENTS	(1U << 1)
 #define IORING_POLL_UPDATE_USER_DATA	(1U << 2)
 
+/*
+ * ASYNC_CANCEL flags.
+ *
+ * IORING_ASYNC_CANCEL_ALL	Cancel all requests that match the given key
+ */
+#define IORING_ASYNC_CANCEL_ALL	(1U << 0)
+
 /*
  * IO completion data structure (Completion Queue Entry)
  */
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key
  2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
                   ` (3 preceding siblings ...)
  2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe
@ 2022-04-15 13:33 ` Jens Axboe
  4 siblings, 0 replies; 6+ messages in thread
From: Jens Axboe @ 2022-04-15 13:33 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

Currently sqe->addr must contain the user_data of the request being
canceled. Introduce the IORING_ASYNC_CANCEL_FD flag, which tells the
kernel that we're keying off the file fd instead for cancelation. This
allows canceling any request that a) uses a file, and b) was assigned the
file based on the value being passed in.

Signed-off-by: Jens Axboe <[email protected]>
---
 fs/io_uring.c                 | 65 ++++++++++++++++++++++++++++++-----
 include/uapi/linux/io_uring.h |  3 ++
 2 files changed, 60 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index c7e5d60fbbe5..4a3dbdb36d0d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -569,6 +569,7 @@ struct io_cancel {
 	struct file			*file;
 	u64				addr;
 	u32				flags;
+	s32				fd;
 };
 
 struct io_timeout {
@@ -6307,6 +6308,26 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
 	return NULL;
 }
 
+static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
+					  struct file *file)
+	__must_hold(&ctx->completion_lock)
+{
+	struct io_kiocb *req;
+	int i;
+
+	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
+		struct hlist_head *list;
+
+		list = &ctx->cancel_hash[i];
+		hlist_for_each_entry(req, list, hash_node) {
+			if (req->file != file)
+				continue;
+			return req;
+		}
+	}
+	return NULL;
+}
+
 static bool io_poll_disarm(struct io_kiocb *req)
 	__must_hold(&ctx->completion_lock)
 {
@@ -6319,15 +6340,22 @@ static bool io_poll_disarm(struct io_kiocb *req)
 
 struct io_cancel_data {
 	struct io_ring_ctx *ctx;
-	u64 data;
+	union {
+		u64 data;
+		struct file *file;
+	};
 	u32 flags;
 };
 
 static int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 	__must_hold(&ctx->completion_lock)
 {
-	struct io_kiocb *req = io_poll_find(ctx, cd->data, false);
+	struct io_kiocb *req;
 
+	if (cd->flags & IORING_ASYNC_CANCEL_FD)
+		req = io_poll_file_find(ctx, cd->file);
+	else
+		req = io_poll_find(ctx, cd->data, false);
 	if (!req)
 		return -ENOENT;
 	io_poll_cancel_req(req);
@@ -6764,7 +6792,11 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
 	struct io_cancel_data *cd = data;
 
-	return req->ctx == cd->ctx && req->cqe.user_data == cd->data;
+	if (req->ctx != cd->ctx)
+		return false;
+	if (cd->flags & IORING_ASYNC_CANCEL_FD)
+		return req->file == cd->file;
+	return req->cqe.user_data == cd->data;
 }
 
 static int io_async_cancel_one(struct io_uring_task *tctx,
@@ -6813,9 +6845,11 @@ static int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
 	if (ret != -ENOENT)
 		goto out;
 
-	spin_lock_irq(&ctx->timeout_lock);
-	ret = io_timeout_cancel(ctx, cd->data);
-	spin_unlock_irq(&ctx->timeout_lock);
+	if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) {
+		spin_lock_irq(&ctx->timeout_lock);
+		ret = io_timeout_cancel(ctx, cd->data);
+		spin_unlock_irq(&ctx->timeout_lock);
+	}
 out:
 	spin_unlock(&ctx->completion_lock);
 	return ret;
@@ -6826,15 +6860,17 @@ static int io_async_cancel_prep(struct io_kiocb *req,
 {
 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
 		return -EINVAL;
-	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+	if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
 		return -EINVAL;
 	if (sqe->ioprio || sqe->off || sqe->len || sqe->splice_fd_in)
 		return -EINVAL;
 
 	req->cancel.addr = READ_ONCE(sqe->addr);
 	req->cancel.flags = READ_ONCE(sqe->cancel_flags);
-	if (req->cancel.flags & ~IORING_ASYNC_CANCEL_ALL)
+	if (req->cancel.flags & ~(IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_FD))
 		return -EINVAL;
+	if (req->cancel.flags & IORING_ASYNC_CANCEL_FD)
+		req->cancel.fd = READ_ONCE(sqe->fd);
 
 	return 0;
 }
@@ -6884,7 +6920,20 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 	};
 	int ret;
 
+	if (cd.flags & IORING_ASYNC_CANCEL_FD) {
+		if (req->flags & REQ_F_FIXED_FILE)
+			req->file = io_file_get_fixed(req, req->cancel.fd, issue_flags);
+		else
+			req->file = io_file_get_normal(req, req->cancel.fd);
+		if (!req->file) {
+			ret = -EBADF;
+			goto done;
+		}
+		cd.file = req->file;
+	}
+
 	ret = __io_async_cancel(&cd, req, issue_flags);
+done:
 	if (ret < 0)
 		req_set_fail(req);
 	io_req_complete_post(req, ret, 0);
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 476e58a2837f..cc7fe82a1798 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -191,8 +191,11 @@ enum {
  * ASYNC_CANCEL flags.
  *
  * IORING_ASYNC_CANCEL_ALL	Cancel all requests that match the given key
+ * IORING_ASYNC_CANCEL_FD	Key off 'fd' for cancelation rather than the
+ *				request 'user_data'
  */
 #define IORING_ASYNC_CANCEL_ALL	(1U << 0)
+#define IORING_ASYNC_CANCEL_FD	(1U << 1)
 
 /*
  * IO completion data structure (Completion Queue Entry)
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-04-15 13:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-04-15 13:33 [PATCHSET next 0/5] Extend cancelation support Jens Axboe
2022-04-15 13:33 ` [PATCH 1/5] io_uring: remove dead 'poll_only' argument to io_poll_cancel() Jens Axboe
2022-04-15 13:33 ` [PATCH 2/5] io_uring: pass in struct io_cancel_data consistently Jens Axboe
2022-04-15 13:33 ` [PATCH 3/5] io_uring: rename io_cancel_data->user_data to just 'data' Jens Axboe
2022-04-15 13:33 ` [PATCH 4/5] io_uring: add support for IORING_ASYNC_CANCEL_ALL Jens Axboe
2022-04-15 13:33 ` [PATCH 5/5] io_uring: allow IORING_OP_ASYNC_CANCEL with 'fd' key Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox