public inbox for [email protected]
 help / color / mirror / Atom feed
From: Kanchan Joshi <[email protected]>
To: [email protected], [email protected], [email protected], [email protected]
Cc: [email protected], [email protected],
	[email protected], [email protected],
	[email protected], [email protected],
	Kanchan Joshi <[email protected]>,
	Anuj Gupta <[email protected]>
Subject: [RFC PATCH 06/12] pci: implement register/unregister functionality
Date: Sat, 29 Apr 2023 15:09:19 +0530	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

Implement the register callback. It checks if any raw-queue is available to
be attached. If found, it returns the qid.
During queue registration, iod and command-id bitmap are also preallocated.

Unregister callback does the opposite and returns the corresponding
queue to the pool of available raw-queues.

Signed-off-by: Kanchan Joshi <[email protected]>
Signed-off-by: Anuj Gupta <[email protected]>
---
 drivers/nvme/host/pci.c | 154 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 154 insertions(+)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d366a76cc304..b4498e198e8a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -116,6 +116,15 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 static void nvme_delete_io_queues(struct nvme_dev *dev);
 static void nvme_update_attrs(struct nvme_dev *dev);
 
+enum {
+	Q_FREE,
+	Q_ALLOC
+};
+struct rawq_info {
+	int nr_free;
+	int q_state[];
+};
+
 /*
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
@@ -164,6 +173,8 @@ struct nvme_dev {
 	unsigned int nr_write_queues;
 	unsigned int nr_poll_queues;
 	unsigned int nr_raw_queues;
+	struct mutex rawq_lock;
+	struct rawq_info *rawqi;
 };
 
 static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
@@ -195,6 +206,10 @@ struct nvme_queue {
 	struct nvme_dev *dev;
 	spinlock_t sq_lock;
 	void *sq_cmds;
+	 /* only used for raw queues: */
+	unsigned long *cmdid_bmp;
+	spinlock_t cmdid_lock;
+	struct nvme_iod *iod;
 	 /* only used for poll queues: */
 	spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
 	struct nvme_completion *cqes;
@@ -1661,6 +1676,141 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled,
 	return result;
 }
 
+static int setup_rawq_info(struct nvme_dev *dev, int nr_rawq)
+{
+	struct rawq_info *rawqi;
+	int size = sizeof(struct rawq_info) + nr_rawq * sizeof(int);
+
+	rawqi = kzalloc(size, GFP_KERNEL);
+	if (rawqi == NULL)
+		return -ENOMEM;
+	rawqi->nr_free = nr_rawq;
+	dev->rawqi = rawqi;
+	return 0;
+}
+
+static int nvme_pci_get_rawq(struct nvme_dev *dev)
+{
+	int i, qid, nr_rawq;
+	struct rawq_info *rawqi = NULL;
+	int ret = -EINVAL;
+
+	nr_rawq = dev->nr_raw_queues;
+	if (!nr_rawq)
+		return ret;
+
+	mutex_lock(&dev->rawq_lock);
+	if (dev->rawqi == NULL) {
+		ret = setup_rawq_info(dev, nr_rawq);
+		if (ret)
+			goto unlock;
+	}
+	rawqi = dev->rawqi;
+	if (rawqi->nr_free == 0) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+	for (i = 0; i < nr_rawq; i++) {
+		if (rawqi->q_state[i] == Q_FREE) {
+			rawqi->q_state[i] = Q_ALLOC;
+			qid = dev->nr_allocated_queues - nr_rawq - i;
+			rawqi->nr_free--;
+			ret = qid;
+			goto unlock;
+		}
+	}
+unlock:
+	mutex_unlock(&dev->rawq_lock);
+	return ret;
+}
+
+static int nvme_pci_put_rawq(struct nvme_dev *dev, int qid)
+{
+	int i, nr_rawq;
+	struct rawq_info *rawqi = NULL;
+	struct nvme_queue *nvmeq;
+
+	nr_rawq = dev->nr_raw_queues;
+	if (!nr_rawq || dev->rawqi == NULL)
+		return -EINVAL;
+
+	i = dev->nr_allocated_queues - nr_rawq - qid;
+	mutex_lock(&dev->rawq_lock);
+	rawqi = dev->rawqi;
+	if (rawqi->q_state[i] == Q_ALLOC) {
+		rawqi->q_state[i] = Q_FREE;
+		rawqi->nr_free++;
+	}
+	mutex_unlock(&dev->rawq_lock);
+	nvmeq = &dev->queues[qid];
+	kfree(nvmeq->cmdid_bmp);
+	kfree(nvmeq->iod);
+	return 0;
+}
+
+static int nvme_pci_alloc_cmdid_bmp(struct nvme_queue *nvmeq)
+{
+	int size = BITS_TO_LONGS(nvmeq->q_depth) * sizeof(unsigned long);
+
+	if (!test_bit(NVMEQ_RAW, &nvmeq->flags))
+		return -EINVAL;
+	nvmeq->cmdid_bmp = kzalloc(size, GFP_KERNEL);
+	if (!nvmeq->cmdid_bmp)
+		return -ENOMEM;
+	spin_lock_init(&nvmeq->cmdid_lock);
+	return 0;
+}
+
+static int nvme_pci_alloc_iod_array(struct nvme_queue *nvmeq)
+{
+	if (!test_bit(NVMEQ_RAW, &nvmeq->flags))
+		return -EINVAL;
+	nvmeq->iod = kcalloc(nvmeq->q_depth - 1, sizeof(struct nvme_iod),
+				 GFP_KERNEL);
+	if (!nvmeq->iod)
+		return -ENOMEM;
+	return 0;
+}
+
+static int nvme_pci_setup_rawq(struct nvme_queue *nvmeq)
+{
+	int ret;
+
+	ret = nvme_pci_alloc_cmdid_bmp(nvmeq);
+	if (ret)
+		return ret;
+	ret = nvme_pci_alloc_iod_array(nvmeq);
+	if (ret) {
+		kfree(nvmeq->cmdid_bmp);
+		return ret;
+	}
+	return ret;
+}
+
+static int nvme_pci_register_queue(void *data)
+{
+	struct nvme_ns *ns = (struct nvme_ns *) data;
+	struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
+	int qid, ret;
+
+	qid = nvme_pci_get_rawq(dev);
+	if (qid > 0) {
+		/* setup command-id bitmap and iod array */
+		ret = nvme_pci_setup_rawq(&dev->queues[qid]);
+		if (ret < 0)
+			qid = ret;
+	}
+	return qid;
+}
+
+static int nvme_pci_unregister_queue(void *data, int qid)
+{
+	struct nvme_ns *ns = (struct nvme_ns *) data;
+	struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
+
+	return nvme_pci_put_rawq(dev, qid);
+}
+
 static const struct blk_mq_ops nvme_mq_admin_ops = {
 	.queue_rq	= nvme_queue_rq,
 	.complete	= nvme_pci_complete_rq,
@@ -1679,6 +1829,8 @@ static const struct blk_mq_ops nvme_mq_ops = {
 	.map_queues	= nvme_pci_map_queues,
 	.timeout	= nvme_timeout,
 	.poll		= nvme_poll,
+	.register_queue	= nvme_pci_register_queue,
+	.unregister_queue =  nvme_pci_unregister_queue,
 };
 
 static void nvme_dev_remove_admin(struct nvme_dev *dev)
@@ -2698,6 +2850,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 	nvme_free_tagset(dev);
 	put_device(dev->dev);
 	kfree(dev->queues);
+	kfree(dev->rawqi);
 	kfree(dev);
 }
 
@@ -2938,6 +3091,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
 		return ERR_PTR(-ENOMEM);
 	INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
 	mutex_init(&dev->shutdown_lock);
+	mutex_init(&dev->rawq_lock);
 
 	dev->nr_write_queues = write_queues;
 	dev->nr_poll_queues = poll_queues;
-- 
2.25.1


  parent reply	other threads:[~2023-04-29  9:43 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20230429094228epcas5p4a80d8ed77433989fa804ecf449f83b0b@epcas5p4.samsung.com>
2023-04-29  9:39 ` [RFC PATCH 00/12] io_uring attached nvme queue Kanchan Joshi
     [not found]   ` <CGME20230429094238epcas5p4efa3dc785fa54ab974852c7f90113025@epcas5p4.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 01/12] nvme: refactor nvme_alloc_io_tag_set Kanchan Joshi
     [not found]   ` <CGME20230429094240epcas5p1a7411f266412244115411b05da509e4a@epcas5p1.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 02/12] pci: enable "raw_queues = N" module parameter Kanchan Joshi
     [not found]   ` <CGME20230429094243epcas5p13be3ca62dc2b03299d09cafaf11923c1@epcas5p1.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 03/12] fs, block: interface to register/unregister the raw-queue Kanchan Joshi
     [not found]   ` <CGME20230429094245epcas5p2843abc5cd54ffe301d36459543bcd228@epcas5p2.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 04/12] io_uring, fs: plumb support to register/unregister raw-queue Kanchan Joshi
     [not found]   ` <CGME20230429094247epcas5p333e0f515000de60fb64dc2590cf9fcd8@epcas5p3.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 05/12] nvme: wire-up register/unregister queue f_op callback Kanchan Joshi
     [not found]   ` <CGME20230429094249epcas5p18bd717f4e34077c0fcf28458f11de8d1@epcas5p1.samsung.com>
2023-04-29  9:39     ` Kanchan Joshi [this message]
     [not found]   ` <CGME20230429094251epcas5p144d042853e10f090e3119338c2306546@epcas5p1.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 07/12] io_uring: support for using registered queue in uring-cmd Kanchan Joshi
     [not found]   ` <CGME20230429094253epcas5p3cfff90e1c003b6fc9c7c4a61287beecb@epcas5p3.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 08/12] block: add mq_ops to submit and complete commands from raw-queue Kanchan Joshi
     [not found]   ` <CGME20230429094255epcas5p11bcbe76772289f27c41a50ce502c998d@epcas5p1.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 09/12] nvme: carve out a helper to prepare nvme_command from ioucmd->cmd Kanchan Joshi
     [not found]   ` <CGME20230429094257epcas5p463574920bba26cd219275e57c2063d85@epcas5p4.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 10/12] nvme: submisssion/completion of uring_cmd to/from the registered queue Kanchan Joshi
     [not found]   ` <CGME20230429094259epcas5p11f0f3422eb4aa4e3ebf00e0666790efa@epcas5p1.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 11/12] pci: modify nvme_setup_prp_simple parameters Kanchan Joshi
     [not found]   ` <CGME20230429094301epcas5p48cf45da2f83d9ca8140ee777c7446d11@epcas5p4.samsung.com>
2023-04-29  9:39     ` [RFC PATCH 12/12] pci: implement submission/completion for rawq commands Kanchan Joshi
2023-04-29 17:17   ` [RFC PATCH 00/12] io_uring attached nvme queue Jens Axboe
2023-05-01 11:36     ` Kanchan Joshi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox