public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH V2] io_uring: cancelable uring_cmd
@ 2023-09-22 14:28 Ming Lei
  2023-09-22 15:02 ` Jens Axboe
  0 siblings, 1 reply; 3+ messages in thread
From: Ming Lei @ 2023-09-22 14:28 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-block; +Cc: Ming Lei, Gabriel Krisman Bertazi

uring_cmd may never complete, such as ublk, in which uring cmd isn't
completed until one new block request is coming from ublk block device.

Add cancelable uring_cmd to provide mechanism to driver for cancelling
pending commands in its own way.

Add API of io_uring_cmd_mark_cancelable() for driver to mark one command as
cancelable, then io_uring will cancel this command in
io_uring_cancel_generic(). ->uring_cmd() callback is reused for canceling
command in driver's way, then driver gets notified with the cancelling
from io_uring.

Add API of io_uring_cmd_get_task() to help driver cancel handler
deal with the canceling.

Cc: Gabriel Krisman Bertazi <[email protected]>
Suggested-by: Jens Axboe <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---

ublk patches:
	https://github.com/ming1/linux/commits/uring_exit_and_ublk

V2:
	- use ->uring_cmd() with IO_URING_F_CANCEL for canceling command

 include/linux/io_uring.h       | 15 +++++++++++
 include/linux/io_uring_types.h |  6 +++++
 include/uapi/linux/io_uring.h  |  7 +++--
 io_uring/io_uring.c            | 35 +++++++++++++++++++++++++
 io_uring/uring_cmd.c           | 48 ++++++++++++++++++++++++++++++++++
 5 files changed, 109 insertions(+), 2 deletions(-)

diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 106cdc55ff3b..205179a1c191 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -20,6 +20,9 @@ enum io_uring_cmd_flags {
 	IO_URING_F_SQE128		= (1 << 8),
 	IO_URING_F_CQE32		= (1 << 9),
 	IO_URING_F_IOPOLL		= (1 << 10),
+
+	/* set when uring wants to cancel one issued command */
+	IO_URING_F_CANCEL		= (1 << 11),
 };
 
 struct io_uring_cmd {
@@ -82,6 +85,9 @@ static inline void io_uring_free(struct task_struct *tsk)
 		__io_uring_free(tsk);
 }
 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+		unsigned int issue_flags);
+struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd);
 #else
 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
 			      struct iov_iter *iter, void *ioucmd)
@@ -122,6 +128,15 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
 {
 	return -EOPNOTSUPP;
 }
+static inline int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+		unsigned int issue_flags)
+{
+	return -EOPNOTSUPP;
+}
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+	return NULL;
+}
 #endif
 
 #endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 13d19b9be9f4..1571db76bec1 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -265,6 +265,12 @@ struct io_ring_ctx {
 		 */
 		struct io_wq_work_list	iopoll_list;
 		bool			poll_multi_queue;
+
+		/*
+		 * Any cancelable uring_cmd is added to this list in
+		 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+		 */
+		struct hlist_head	cancelable_uring_cmd;
 	} ____cacheline_aligned_in_smp;
 
 	struct {
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 8e61f8b7c2ce..29a7a7e71f57 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -249,10 +249,13 @@ enum io_uring_op {
  * sqe->uring_cmd_flags
  * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
  *				along with setting sqe->buf_index.
+ * IORING_URING_CANCELABLE	not for userspace
  * IORING_URING_CMD_POLLED	driver use only
  */
-#define IORING_URING_CMD_FIXED	(1U << 0)
-#define IORING_URING_CMD_POLLED	(1U << 31)
+#define IORING_URING_CMD_FIXED		(1U << 0)
+/* set by driver, and handled by io_uring to cancel this cmd */
+#define IORING_URING_CMD_CANCELABLE	(1U << 30)
+#define IORING_URING_CMD_POLLED		(1U << 31)
 
 
 /*
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 783ed0fff71b..a3135fd47a4e 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3256,6 +3256,40 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
 	return ret;
 }
 
+static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+		struct task_struct *task, bool cancel_all)
+{
+	struct hlist_node *tmp;
+	struct io_kiocb *req;
+	bool ret = false;
+
+	mutex_lock(&ctx->uring_lock);
+	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
+			hash_node) {
+		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
+				struct io_uring_cmd);
+		struct file *file = req->file;
+
+		if (WARN_ON_ONCE(!file->f_op->uring_cmd))
+			continue;
+
+		if (!cancel_all && req->task != task)
+			continue;
+
+		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+			/* ->sqe isn't available if no async data */
+			if (!req_has_async_data(req))
+				cmd->sqe = NULL;
+			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
+			ret = true;
+		}
+	}
+	io_submit_flush_completions(ctx);
+	mutex_unlock(&ctx->uring_lock);
+
+	return ret;
+}
+
 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 						struct task_struct *task,
 						bool cancel_all)
@@ -3307,6 +3341,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 	ret |= io_kill_timeouts(ctx, task, cancel_all);
 	if (task)
 		ret |= io_run_task_work() > 0;
+	ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
 	return ret;
 }
 
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 537795fddc87..d6b200a0be33 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -13,6 +13,52 @@
 #include "rsrc.h"
 #include "uring_cmd.h"
 
+static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
+		unsigned int issue_flags)
+{
+	if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+		struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+		struct io_ring_ctx *ctx = req->ctx;
+
+		io_ring_submit_lock(ctx, issue_flags);
+		cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
+		hlist_del(&req->hash_node);
+		io_ring_submit_unlock(ctx, issue_flags);
+	}
+}
+
+/*
+ * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
+ * will try to cancel this issued command by sending ->uring_cmd() with
+ * issue_flags of IO_URING_F_CANCEL.
+ *
+ * The command is guaranteed to not be done when calling ->uring_cmd()
+ * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
+ * with race between io_uring canceling and normal completion.
+ */
+int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+		unsigned int issue_flags)
+{
+	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+	struct io_ring_ctx *ctx = req->ctx;
+
+	io_ring_submit_lock(ctx, issue_flags);
+	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
+		cmd->flags |= IORING_URING_CMD_CANCELABLE;
+		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
+	}
+	io_ring_submit_unlock(ctx, issue_flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
+
+struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+	return cmd_to_io_kiocb(cmd)->task;
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_get_task);
+
 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
 {
 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
@@ -56,6 +102,8 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
 {
 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 
+	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
+
 	if (ret < 0)
 		req_set_fail(req);
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH V2] io_uring: cancelable uring_cmd
  2023-09-22 14:28 [PATCH V2] io_uring: cancelable uring_cmd Ming Lei
@ 2023-09-22 15:02 ` Jens Axboe
  2023-09-22 15:24   ` Ming Lei
  0 siblings, 1 reply; 3+ messages in thread
From: Jens Axboe @ 2023-09-22 15:02 UTC (permalink / raw)
  To: Ming Lei, io-uring, linux-block; +Cc: Gabriel Krisman Bertazi

On 9/22/23 8:28 AM, Ming Lei wrote:
> uring_cmd may never complete, such as ublk, in which uring cmd isn't
> completed until one new block request is coming from ublk block device.
> 
> Add cancelable uring_cmd to provide mechanism to driver for cancelling
> pending commands in its own way.
> 
> Add API of io_uring_cmd_mark_cancelable() for driver to mark one command as
> cancelable, then io_uring will cancel this command in
> io_uring_cancel_generic(). ->uring_cmd() callback is reused for canceling
> command in driver's way, then driver gets notified with the cancelling
> from io_uring.
> 
> Add API of io_uring_cmd_get_task() to help driver cancel handler
> deal with the canceling.

This looks better, a few comments:

> diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> index 8e61f8b7c2ce..29a7a7e71f57 100644
> --- a/include/uapi/linux/io_uring.h
> +++ b/include/uapi/linux/io_uring.h
> @@ -249,10 +249,13 @@ enum io_uring_op {
>   * sqe->uring_cmd_flags
>   * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
>   *				along with setting sqe->buf_index.
> + * IORING_URING_CANCELABLE	not for userspace
>   * IORING_URING_CMD_POLLED	driver use only
>   */
> -#define IORING_URING_CMD_FIXED	(1U << 0)
> -#define IORING_URING_CMD_POLLED	(1U << 31)
> +#define IORING_URING_CMD_FIXED		(1U << 0)
> +/* set by driver, and handled by io_uring to cancel this cmd */
> +#define IORING_URING_CMD_CANCELABLE	(1U << 30)
> +#define IORING_URING_CMD_POLLED		(1U << 31)

If IORING_URING_CANCELABLE isn't UAPI, why stuff it in here? Should we
have a split where we retain the upper 8 bits for internal use, or
something like that?

> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 783ed0fff71b..a3135fd47a4e 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -3256,6 +3256,40 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
>  	return ret;
>  }
>  
> +static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
> +		struct task_struct *task, bool cancel_all)
> +{
> +	struct hlist_node *tmp;
> +	struct io_kiocb *req;
> +	bool ret = false;
> +
> +	mutex_lock(&ctx->uring_lock);
> +	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
> +			hash_node) {
> +		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
> +				struct io_uring_cmd);
> +		struct file *file = req->file;
> +
> +		if (WARN_ON_ONCE(!file->f_op->uring_cmd))
> +			continue;
> +
> +		if (!cancel_all && req->task != task)
> +			continue;
> +
> +		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
> +			/* ->sqe isn't available if no async data */
> +			if (!req_has_async_data(req))
> +				cmd->sqe = NULL;
> +			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
> +			ret = true;
> +		}
> +	}
> +	io_submit_flush_completions(ctx);
> +	mutex_unlock(&ctx->uring_lock);
> +
> +	return ret;
> +}

I think it'd be saner to drop uring_lock here, and then:

> @@ -3307,6 +3341,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
>  	ret |= io_kill_timeouts(ctx, task, cancel_all);
>  	if (task)
>  		ret |= io_run_task_work() > 0;
> +	ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
>  	return ret;
>  }

move this hunk into the uring_lock section. Also ensure that we do run
task_work for cancelation, should the uring_cmd side require that
(either now or eventually).

> diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
> index 537795fddc87..d6b200a0be33 100644
> --- a/io_uring/uring_cmd.c
> +++ b/io_uring/uring_cmd.c
> @@ -13,6 +13,52 @@
>  #include "rsrc.h"
>  #include "uring_cmd.h"
>  
> +static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
> +		unsigned int issue_flags)
> +{
> +	if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
> +		struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> +		struct io_ring_ctx *ctx = req->ctx;
> +
> +		io_ring_submit_lock(ctx, issue_flags);
> +		cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
> +		hlist_del(&req->hash_node);
> +		io_ring_submit_unlock(ctx, issue_flags);
> +	}
> +}

static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
		unsigned int issue_flags)
{
	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
	struct io_ring_ctx *ctx = req->ctx;

	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
		return;

	io_ring_submit_lock(ctx, issue_flags);
	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
	hlist_del(&req->hash_node);
	io_ring_submit_unlock(ctx, issue_flags);
}

is cleaner imho. Minor nit.

> +
> +/*
> + * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
> + * will try to cancel this issued command by sending ->uring_cmd() with
> + * issue_flags of IO_URING_F_CANCEL.
> + *
> + * The command is guaranteed to not be done when calling ->uring_cmd()
> + * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
> + * with race between io_uring canceling and normal completion.
> + */
> +int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> +		unsigned int issue_flags)
> +{
> +	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> +	struct io_ring_ctx *ctx = req->ctx;
> +
> +	io_ring_submit_lock(ctx, issue_flags);
> +	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
> +		cmd->flags |= IORING_URING_CMD_CANCELABLE;
> +		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
> +	}
> +	io_ring_submit_unlock(ctx, issue_flags);
> +
> +	return 0;
> +}

A bit inconsistent here in terms of the locking. I'm assuming the
marking happens within issue, in which case it should be fine to do:

int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
		unsigned int issue_flags)
{
	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
	struct io_ring_ctx *ctx = req->ctx;

	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
		cmd->flags |= IORING_URING_CMD_CANCELABLE;
		io_ring_submit_lock(ctx, issue_flags);
		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
		io_ring_submit_unlock(ctx, issue_flags);
	}

	return 0;
}

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH V2] io_uring: cancelable uring_cmd
  2023-09-22 15:02 ` Jens Axboe
@ 2023-09-22 15:24   ` Ming Lei
  0 siblings, 0 replies; 3+ messages in thread
From: Ming Lei @ 2023-09-22 15:24 UTC (permalink / raw)
  To: Jens Axboe; +Cc: io-uring, linux-block, Gabriel Krisman Bertazi, ming.lei

On Fri, Sep 22, 2023 at 09:02:44AM -0600, Jens Axboe wrote:
> On 9/22/23 8:28 AM, Ming Lei wrote:
> > uring_cmd may never complete, such as ublk, in which uring cmd isn't
> > completed until one new block request is coming from ublk block device.
> > 
> > Add cancelable uring_cmd to provide mechanism to driver for cancelling
> > pending commands in its own way.
> > 
> > Add API of io_uring_cmd_mark_cancelable() for driver to mark one command as
> > cancelable, then io_uring will cancel this command in
> > io_uring_cancel_generic(). ->uring_cmd() callback is reused for canceling
> > command in driver's way, then driver gets notified with the cancelling
> > from io_uring.
> > 
> > Add API of io_uring_cmd_get_task() to help driver cancel handler
> > deal with the canceling.
> 
> This looks better, a few comments:
> 
> > diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> > index 8e61f8b7c2ce..29a7a7e71f57 100644
> > --- a/include/uapi/linux/io_uring.h
> > +++ b/include/uapi/linux/io_uring.h
> > @@ -249,10 +249,13 @@ enum io_uring_op {
> >   * sqe->uring_cmd_flags
> >   * IORING_URING_CMD_FIXED	use registered buffer; pass this flag
> >   *				along with setting sqe->buf_index.
> > + * IORING_URING_CANCELABLE	not for userspace
> >   * IORING_URING_CMD_POLLED	driver use only
> >   */
> > -#define IORING_URING_CMD_FIXED	(1U << 0)
> > -#define IORING_URING_CMD_POLLED	(1U << 31)
> > +#define IORING_URING_CMD_FIXED		(1U << 0)
> > +/* set by driver, and handled by io_uring to cancel this cmd */
> > +#define IORING_URING_CMD_CANCELABLE	(1U << 30)
> > +#define IORING_URING_CMD_POLLED		(1U << 31)
> 
> If IORING_URING_CANCELABLE isn't UAPI, why stuff it in here? Should we
> have a split where we retain the upper 8 bits for internal use, or
> something like that?

Yeah, it is for internal use, same with IORING_URING_CMD_POLLED.

I think we can retain upper 8 bits for internal use, and move
the two definitions into kernel header.

> 
> > diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> > index 783ed0fff71b..a3135fd47a4e 100644
> > --- a/io_uring/io_uring.c
> > +++ b/io_uring/io_uring.c
> > @@ -3256,6 +3256,40 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
> >  	return ret;
> >  }
> >  
> > +static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
> > +		struct task_struct *task, bool cancel_all)
> > +{
> > +	struct hlist_node *tmp;
> > +	struct io_kiocb *req;
> > +	bool ret = false;
> > +
> > +	mutex_lock(&ctx->uring_lock);
> > +	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
> > +			hash_node) {
> > +		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
> > +				struct io_uring_cmd);
> > +		struct file *file = req->file;
> > +
> > +		if (WARN_ON_ONCE(!file->f_op->uring_cmd))
> > +			continue;
> > +
> > +		if (!cancel_all && req->task != task)
> > +			continue;
> > +
> > +		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
> > +			/* ->sqe isn't available if no async data */
> > +			if (!req_has_async_data(req))
> > +				cmd->sqe = NULL;
> > +			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
> > +			ret = true;
> > +		}
> > +	}
> > +	io_submit_flush_completions(ctx);
> > +	mutex_unlock(&ctx->uring_lock);
> > +
> > +	return ret;
> > +}
> 
> I think it'd be saner to drop uring_lock here, and then:
> 
> > @@ -3307,6 +3341,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
> >  	ret |= io_kill_timeouts(ctx, task, cancel_all);
> >  	if (task)
> >  		ret |= io_run_task_work() > 0;
> > +	ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
> >  	return ret;
> >  }
> 
> move this hunk into the uring_lock section. Also ensure that we do run
> task_work for cancelation, should the uring_cmd side require that
> (either now or eventually).

OK, io_run_task_work() has run task_work already.

> 
> > diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
> > index 537795fddc87..d6b200a0be33 100644
> > --- a/io_uring/uring_cmd.c
> > +++ b/io_uring/uring_cmd.c
> > @@ -13,6 +13,52 @@
> >  #include "rsrc.h"
> >  #include "uring_cmd.h"
> >  
> > +static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
> > +		unsigned int issue_flags)
> > +{
> > +	if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
> > +		struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> > +		struct io_ring_ctx *ctx = req->ctx;
> > +
> > +		io_ring_submit_lock(ctx, issue_flags);
> > +		cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
> > +		hlist_del(&req->hash_node);
> > +		io_ring_submit_unlock(ctx, issue_flags);
> > +	}
> > +}
> 
> static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
> 		unsigned int issue_flags)
> {
> 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> 	struct io_ring_ctx *ctx = req->ctx;
> 
> 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
> 		return;
> 
> 	io_ring_submit_lock(ctx, issue_flags);
> 	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
> 	hlist_del(&req->hash_node);
> 	io_ring_submit_unlock(ctx, issue_flags);
> }
> 
> is cleaner imho. Minor nit.
> 
> > +
> > +/*
> > + * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
> > + * will try to cancel this issued command by sending ->uring_cmd() with
> > + * issue_flags of IO_URING_F_CANCEL.
> > + *
> > + * The command is guaranteed to not be done when calling ->uring_cmd()
> > + * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
> > + * with race between io_uring canceling and normal completion.
> > + */
> > +int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> > +		unsigned int issue_flags)
> > +{
> > +	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> > +	struct io_ring_ctx *ctx = req->ctx;
> > +
> > +	io_ring_submit_lock(ctx, issue_flags);
> > +	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
> > +		cmd->flags |= IORING_URING_CMD_CANCELABLE;
> > +		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
> > +	}
> > +	io_ring_submit_unlock(ctx, issue_flags);
> > +
> > +	return 0;
> > +}
> 
> A bit inconsistent here in terms of the locking. I'm assuming the
> marking happens within issue, in which case it should be fine to do:
> 
> int io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
> 		unsigned int issue_flags)
> {
> 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> 	struct io_ring_ctx *ctx = req->ctx;
> 
> 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
> 		cmd->flags |= IORING_URING_CMD_CANCELABLE;
> 		io_ring_submit_lock(ctx, issue_flags);
> 		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
> 		io_ring_submit_unlock(ctx, issue_flags);
> 	}

OK, mask & clear bit can be moved out of lock in both two functions.


Thanks,
Ming


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-09-22 15:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-09-22 14:28 [PATCH V2] io_uring: cancelable uring_cmd Ming Lei
2023-09-22 15:02 ` Jens Axboe
2023-09-22 15:24   ` Ming Lei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox