public inbox for [email protected]
 help / color / mirror / Atom feed
From: Kanchan Joshi <[email protected]>
To: [email protected], [email protected],
	[email protected]
Cc: [email protected], [email protected], [email protected],
	[email protected], [email protected], [email protected],
	[email protected]
Subject: [RFC 13/13] nvme: Add async passthru polling support
Date: Mon, 20 Dec 2021 19:47:34 +0530	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

From: Pankaj Raghav <[email protected]>

IO_URING already has polling support for read and write. This patch
extends that support for uring cmd passthu. The unused flag in
uring_cmd struct is used to indicate if the completion should be polled.
If device side polling is not enabled, then the submission request will
fallback to a non-polled request.

Signed-off-by: Pankaj Raghav <[email protected]>
---
 block/blk-mq.c                |  3 +-
 drivers/nvme/host/core.c      |  1 +
 drivers/nvme/host/ioctl.c     | 79 ++++++++++++++++++++++++++++++++++-
 drivers/nvme/host/multipath.c |  1 +
 drivers/nvme/host/nvme.h      |  4 ++
 fs/io_uring.c                 | 45 ++++++++++++++++++--
 include/linux/blk-mq.h        |  1 +
 include/linux/io_uring.h      | 10 ++++-
 8 files changed, 135 insertions(+), 9 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index c77991688bfd..acfa55c96a43 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1193,7 +1193,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
-static bool blk_rq_is_poll(struct request *rq)
+bool blk_rq_is_poll(struct request *rq)
 {
 	if (!rq->mq_hctx)
 		return false;
@@ -1203,6 +1203,7 @@ static bool blk_rq_is_poll(struct request *rq)
 		return false;
 	return true;
 }
+EXPORT_SYMBOL_GPL(blk_rq_is_poll);
 
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5199adf7ae92..f0697cbe2bf1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3676,6 +3676,7 @@ static const struct file_operations nvme_ns_chr_fops = {
 	.unlocked_ioctl	= nvme_ns_chr_ioctl,
 	.compat_ioctl	= compat_ptr_ioctl,
 	.async_cmd	= nvme_ns_chr_async_cmd,
+	.iopoll		= nvme_iopoll,
 };
 
 static int nvme_add_ns_cdev(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index bdaf8f317aa8..ce2fe94df3ad 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -31,6 +31,12 @@ struct nvme_uring_cmd {
 	void __user *meta_buffer;
 };
 
+static inline bool is_polling_enabled(struct io_uring_cmd *ioucmd,
+				      struct request *req)
+{
+	return (ioucmd->flags & URING_CMD_POLLED) && blk_rq_is_poll(req);
+}
+
 static struct nvme_uring_cmd *nvme_uring_cmd(struct io_uring_cmd *ioucmd)
 {
 	return (struct nvme_uring_cmd *)&ioucmd->pdu;
@@ -76,8 +82,16 @@ static void nvme_end_async_pt(struct request *req, blk_status_t err)
 
 	cmd->req = req;
 	req->bio = bio;
-	/* this takes care of setting up task-work */
-	io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+
+	/*IO can be completed immediately when the callback
+	 * is in the same task context
+	 */
+	if (is_polling_enabled(ioucmd, req)) {
+		nvme_pt_task_cb(ioucmd);
+	} else {
+		/* this takes care of setting up task-work */
+		io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+	}
 }
 
 static void nvme_setup_uring_cmd_data(struct request *rq,
@@ -183,6 +197,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
 		}
 	}
 	if (ioucmd) { /* async dispatch */
+
+		if (bio && is_polling_enabled(ioucmd, req)) {
+			ioucmd->bio = bio;
+			bio->bi_opf |= REQ_POLLED;
+		}
+
 		nvme_setup_uring_cmd_data(req, ioucmd, meta, meta_buffer,
 				meta_len, write);
 		blk_execute_rq_nowait(req, 0, nvme_end_async_pt);
@@ -496,6 +516,32 @@ int nvme_ns_chr_async_cmd(struct io_uring_cmd *ioucmd,
 	return nvme_ns_async_ioctl(ns, ioucmd);
 }
 
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+		unsigned int flags)
+{
+	struct bio *bio = NULL;
+	struct nvme_ns *ns = NULL;
+	struct request_queue *q = NULL;
+	int ret = 0;
+
+	rcu_read_lock();
+	bio = READ_ONCE(kiocb->private);
+	ns = container_of(file_inode(kiocb->ki_filp)->i_cdev, struct nvme_ns,
+			  cdev);
+	q = ns->queue;
+
+	/* bio and driver_cb are a part of the same union type in io_uring_cmd
+	 * struct. When there are no poll queues, driver_cb is used for IRQ cb
+	 * but polling is performed from the io_uring side. To avoid unnecessary
+	 * polling, a check is added to see if it is a polled queue and return 0
+	 * if it is not.
+	 */
+	if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio && bio->bi_bdev)
+		ret = bio_poll(bio, iob, flags);
+	rcu_read_unlock();
+	return ret;
+}
+
 #ifdef CONFIG_NVME_MULTIPATH
 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
 		void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -577,6 +623,35 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
 	srcu_read_unlock(&head->srcu, srcu_idx);
 	return ret;
 }
+
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+			unsigned int flags)
+{
+	struct bio *bio = NULL;
+	struct request_queue *q = NULL;
+	struct cdev *cdev = file_inode(kiocb->ki_filp)->i_cdev;
+	struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+	int srcu_idx = srcu_read_lock(&head->srcu);
+	struct nvme_ns *ns = nvme_find_path(head);
+	int ret = -EWOULDBLOCK;
+
+	if (ns) {
+		bio = READ_ONCE(kiocb->private);
+		q = ns->queue;
+	    /* bio and driver_cb are a part of the same union type in io_uring_cmd
+	     * struct. When there are no poll queues, driver_cb is used for IRQ cb
+	     * but polling is performed from the io_uring side. To avoid unnecessary
+	     * polling, a check is added to see if it is a polled queue and return 0
+	     * if it is not.
+	     */
+		if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio &&
+		    bio->bi_bdev)
+			ret = bio_poll(bio, iob, flags);
+	}
+
+	srcu_read_unlock(&head->srcu, srcu_idx);
+	return ret;
+}
 #endif /* CONFIG_NVME_MULTIPATH */
 
 static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1e59c8e06622..df91b2953932 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -424,6 +424,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
 	.unlocked_ioctl	= nvme_ns_head_chr_ioctl,
 	.compat_ioctl	= compat_ptr_ioctl,
 	.async_cmd	= nvme_ns_head_chr_async_cmd,
+	.iopoll		= nvme_ns_head_iopoll,
 };
 
 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 56a7cc8421fc..730ada8a3e8e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,8 +752,12 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 		unsigned long arg);
 int nvme_ns_chr_async_cmd(struct io_uring_cmd *ucmd,
 		enum io_uring_cmd_flags flags);
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+		unsigned int flags);
 int nvme_ns_head_chr_async_cmd(struct io_uring_cmd *ucmd,
 		enum io_uring_cmd_flags flags);
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+			unsigned int flags);
 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
 extern const struct attribute_group *nvme_ns_id_attr_groups[];
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f77dde1bdc75..ae2e7666622e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2655,7 +2655,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 		if (READ_ONCE(req->iopoll_completed))
 			break;
 
-		ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
+		if (req->opcode == IORING_OP_URING_CMD ||
+		    req->opcode == IORING_OP_URING_CMD_FIXED) {
+			/* uring_cmd structure does not contain kiocb struct */
+			struct kiocb kiocb_uring_cmd;
+
+			kiocb_uring_cmd.private = req->uring_cmd.bio;
+			kiocb_uring_cmd.ki_filp = req->uring_cmd.file;
+			ret = req->uring_cmd.file->f_op->iopoll(&kiocb_uring_cmd,
+			      &iob, poll_flags);
+		} else {
+			ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob,
+							   poll_flags);
+		}
+
 		if (unlikely(ret < 0))
 			return ret;
 		else if (ret)
@@ -2768,6 +2781,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
 			    wq_list_empty(&ctx->iopoll_list))
 				break;
 		}
+
+		/*
+		 * In some scenarios, completion callback has been queued up to be
+		 * completed in-task context but polling happens in the same task
+		 * not giving a chance for the completion callback to complete.
+		 */
+		if (current->task_works)
+			io_run_task_work();
+
 		ret = io_do_iopoll(ctx, !min);
 		if (ret < 0)
 			break;
@@ -4122,6 +4144,14 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
 	return 0;
 }
 
+static void io_complete_uring_cmd_iopoll(struct io_kiocb *req, long res)
+{
+	WRITE_ONCE(req->result, res);
+	/* order with io_iopoll_complete() checking ->result */
+	smp_wmb();
+	WRITE_ONCE(req->iopoll_completed, 1);
+}
+
 /*
  * Called by consumers of io_uring_cmd, if they originally returned
  * -EIOCBQUEUED upon receiving the command.
@@ -4132,7 +4162,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret)
 
 	if (ret < 0)
 		req_set_fail(req);
-	io_req_complete(req, ret);
+
+	if (req->uring_cmd.flags & URING_CMD_POLLED)
+		io_complete_uring_cmd_iopoll(req, ret);
+	else
+		io_req_complete(req, ret);
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
 
@@ -4147,8 +4181,11 @@ static int io_uring_cmd_prep(struct io_kiocb *req,
 		return -EOPNOTSUPP;
 
 	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
-		printk_once(KERN_WARNING "io_uring: iopoll not supported!\n");
-		return -EOPNOTSUPP;
+		req->uring_cmd.flags = URING_CMD_POLLED;
+		req->uring_cmd.bio = NULL;
+		req->iopoll_completed = 0;
+	} else {
+		req->uring_cmd.flags = 0;
 	}
 
 	cmd->op = READ_ONCE(csqe->op);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e35a5d835b1f..2233ccf41c19 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -933,6 +933,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
 int blk_rq_append_bio(struct request *rq, struct bio *bio);
 void blk_execute_rq_nowait(struct request *rq, bool at_head,
 		rq_end_io_fn *end_io);
+bool blk_rq_is_poll(struct request *rq);
 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
 
 struct req_iterator {
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 07732bc850af..bbc9c4ea19c3 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -6,6 +6,7 @@
 #include <linux/xarray.h>
 
 enum {
+	URING_CMD_POLLED = (1 << 0),
 	URING_CMD_FIXEDBUFS = (1 << 1),
 };
 /*
@@ -17,8 +18,13 @@ struct io_uring_cmd {
 	__u16		op;
 	__u16		flags;
 	__u32		len;
-	/* used if driver requires update in task context*/
-	void (*driver_cb)(struct io_uring_cmd *cmd);
+	union {
+		void *bio; // Used for polling based completion
+
+		/* used if driver requires update in task context for IRQ based completion*/
+		void (*driver_cb)(struct io_uring_cmd *cmd);
+	};
+
 	__u64		pdu[5];	/* 40 bytes available inline for free use */
 };
 
-- 
2.25.1


  parent reply	other threads:[~2021-12-21  2:57 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20211220142227epcas5p280851b0a62baa78379979eb81af7a096@epcas5p2.samsung.com>
2021-12-20 14:17 ` [RFC 00/13] uring-passthru for nvme Kanchan Joshi
     [not found]   ` <CGME20211220142228epcas5p2978d92d38f2015148d5f72913d6dbc3e@epcas5p2.samsung.com>
2021-12-20 14:17     ` [RFC 01/13] io_uring: add infra for uring_cmd completion in submitter-task Kanchan Joshi
2022-02-17  2:13       ` Luis Chamberlain
2022-02-17 15:39         ` Kanchan Joshi
2022-02-17 15:50           ` Jens Axboe
2022-02-17 17:56             ` Luis Chamberlain
2022-02-18 17:41               ` Kanchan Joshi
2022-02-17 18:46             ` Luis Chamberlain
2022-02-17 18:53               ` Jens Axboe
     [not found]   ` <CGME20211220142231epcas5p1482c78f91feabdbc3e62341790ab22e1@epcas5p1.samsung.com>
2021-12-20 14:17     ` [RFC 02/13] nvme: wire-up support for async-passthru on char-device Kanchan Joshi
     [not found]   ` <CGME20211220142233epcas5p3b54aa591fb7b81bfb58bc33b5f92a2d3@epcas5p3.samsung.com>
2021-12-20 14:17     ` [RFC 03/13] io_uring: mark iopoll not supported for uring-cmd Kanchan Joshi
2022-02-17  2:16       ` Luis Chamberlain
2022-02-17  2:52         ` Jens Axboe
     [not found]   ` <CGME20211220142235epcas5p3b8d56cd39d9710278ec3360be47f2cca@epcas5p3.samsung.com>
2021-12-20 14:17     ` [RFC 04/13] io_uring: modify unused field in io_uring_cmd to store flags Kanchan Joshi
     [not found]   ` <CGME20211220142237epcas5p48729a52293e4f7627e6ec53ca67b9c58@epcas5p4.samsung.com>
2021-12-20 14:17     ` [RFC 05/13] io_uring: add flag and helper for fixed-buffer uring-cmd Kanchan Joshi
     [not found]   ` <CGME20211220142239epcas5p3efc3c89bd536f3f5d728c81bc550e143@epcas5p3.samsung.com>
2021-12-20 14:17     ` [RFC 06/13] io_uring: add support for uring_cmd with fixed-buffer Kanchan Joshi
     [not found]   ` <CGME20211220142242epcas5p45dddab51a9f20a8ec3d8b8e4f1dda40a@epcas5p4.samsung.com>
2021-12-20 14:17     ` [RFC 07/13] nvme: enable passthrough " Kanchan Joshi
     [not found]   ` <CGME20211220142244epcas5p2f311ed168b8f31b9301bcc2002076db4@epcas5p2.samsung.com>
2021-12-20 14:17     ` [RFC 08/13] io_uring: plug for async bypass Kanchan Joshi
     [not found]   ` <CGME20211220142246epcas5p303c64b6b1b832c7fcd5ac31fc79c91d1@epcas5p3.samsung.com>
2021-12-20 14:17     ` [RFC 09/13] block: wire-up support for plugging Kanchan Joshi
     [not found]   ` <CGME20211220142248epcas5p1e5904e10396f8cdea54bbd8d7aeca9a6@epcas5p1.samsung.com>
2021-12-20 14:17     ` [RFC 10/13] block: factor out helper for bio allocation from cache Kanchan Joshi
     [not found]   ` <CGME20211220142250epcas5p34b9d93b1dd3388af6209a4223befe40f@epcas5p3.samsung.com>
2021-12-20 14:17     ` [RFC 11/13] nvme: enable bio-cache for fixed-buffer passthru Kanchan Joshi
     [not found]   ` <CGME20211220142252epcas5p4611297f9970acbc8ee3b0e325ca5ceec@epcas5p4.samsung.com>
2021-12-20 14:17     ` [RFC 12/13] nvme: allow user passthrough commands to poll Kanchan Joshi
     [not found]   ` <CGME20211220142256epcas5p49e0804ff8b075e8063259f94ccc9ced0@epcas5p4.samsung.com>
2021-12-20 14:17     ` Kanchan Joshi [this message]
2021-12-21  3:45   ` [RFC 00/13] uring-passthru for nvme Jens Axboe
2021-12-21 14:36     ` Kanchan Joshi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox