From: Pavel Begunkov <asml.silence@gmail.com>
To: io-uring@vger.kernel.org, linux-block@vger.kernel.org,
linux-nvme@lists.infradead.org
Cc: linux-fsdevel@vger.kernel.org, Keith Busch <kbusch@kernel.org>,
David Wei <dw@davidwei.uk>,
Vishal Verma <vishal1.verma@intel.com>,
asml.silence@gmail.com
Subject: [RFC 06/12] nvme-pci: add support for user passed dma vectors
Date: Fri, 27 Jun 2025 16:10:33 +0100 [thread overview]
Message-ID: <0803e60c420ad80570abd736a1549fffaeb6435d.1751035820.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1751035820.git.asml.silence@gmail.com>
Implement ->get_dma_device blk-mq callback and add BIO_DMAVEC handling.
If the drivers see BIO_DMAVEC, instead of mapping pages, it'll directly
populate the prp list with the provided dma addresses.
Suggested-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
drivers/nvme/host/pci.c | 158 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 158 insertions(+)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8ff12e415cb5..44a6366f2d9a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -637,11 +637,59 @@ static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
}
}
+static void nvme_sync_dma(struct nvme_dev *nvme_dev, struct request *req,
+ enum dma_data_direction dir)
+{
+ bool for_cpu = dir == DMA_FROM_DEVICE;
+ struct device *dev = nvme_dev->dev;
+ struct bio *bio = req->bio;
+ int offset, length;
+ struct dmavec *dmav;
+
+ if (!dma_dev_need_sync(dev))
+ return;
+
+ offset = bio->bi_iter.bi_bvec_done;
+ length = blk_rq_payload_bytes(req);
+ dmav = &bio->bi_dmavec[bio->bi_iter.bi_idx];
+
+ while (length) {
+ u64 dma_addr = dmav->addr + offset;
+ int dma_len = min(dmav->len - offset, length);
+
+ if (for_cpu)
+ __dma_sync_single_for_cpu(dev, dma_addr, dma_len, dir);
+ else
+ __dma_sync_single_for_device(dev, dma_addr,
+ dma_len, dir);
+
+ length -= dma_len;
+ }
+}
+
+static void nvme_unmap_premapped_data(struct nvme_dev *dev,
+ struct nvme_queue *nvmeq,
+ struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ if (rq_data_dir(req) == READ)
+ nvme_sync_dma(dev, req, DMA_FROM_DEVICE);
+
+ if (!iod->dma_len)
+ nvme_free_descriptors(nvmeq, req);
+}
+
static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq,
struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ if (req->bio && bio_flagged(req->bio, BIO_DMAVEC)) {
+ nvme_unmap_premapped_data(dev, nvmeq, req);
+ return;
+ }
+
if (iod->dma_len) {
dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
rq_dma_dir(req));
@@ -846,6 +894,104 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
return BLK_STS_OK;
}
+static blk_status_t nvme_dma_premapped(struct nvme_dev *dev, struct request *req,
+ struct nvme_queue *nvmeq,
+ struct nvme_rw_command *cmnd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int length = blk_rq_payload_bytes(req);
+ u64 dma_addr, first_dma_addr;
+ struct bio *bio = req->bio;
+ int dma_len, offset;
+ struct dmavec *dmav;
+ dma_addr_t prp_dma;
+ __le64 *prp_list;
+ int i;
+
+ if (rq_data_dir(req) == WRITE)
+ nvme_sync_dma(dev, req, DMA_TO_DEVICE);
+
+ offset = bio->bi_iter.bi_bvec_done;
+ dmav = &bio->bi_dmavec[bio->bi_iter.bi_idx];
+ dma_addr = dmav->addr + offset;
+ dma_len = dmav->len - offset;
+ first_dma_addr = dma_addr;
+ offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+
+ length -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ goto done;
+ }
+
+ dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+ } else {
+ dmav++;
+ dma_addr = dmav->addr;
+ dma_len = dmav->len;
+ }
+
+ if (length <= NVME_CTRL_PAGE_SIZE) {
+ iod->first_dma = dma_addr;
+ goto done;
+ }
+
+ if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
+ NVME_SMALL_POOL_SIZE / sizeof(__le64))
+ iod->flags |= IOD_SMALL_DESCRIPTOR;
+
+ prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
+ &prp_dma);
+ if (!prp_list)
+ return BLK_STS_RESOURCE;
+
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+ __le64 *old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
+ GFP_ATOMIC, &prp_dma);
+ if (!prp_list)
+ goto free_prps;
+ iod->descriptors[iod->nr_descriptors++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= NVME_CTRL_PAGE_SIZE;
+ dma_addr += NVME_CTRL_PAGE_SIZE;
+ length -= NVME_CTRL_PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ dmav++;
+ dma_addr = dmav->addr;
+ dma_len = dmav->len;
+ }
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(first_dma_addr);
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+ return BLK_STS_OK;
+free_prps:
+ nvme_free_descriptors(nvmeq, req);
+ return BLK_STS_RESOURCE;
+bad_sgl:
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
+ "Invalid SGL for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), iod->sgt.nents);
+ return BLK_STS_IOERR;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -854,6 +1000,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
+ if (req->bio && bio_flagged(req->bio, BIO_DMAVEC))
+ return nvme_dma_premapped(dev, req, nvmeq, &cmnd->rw);
+
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -1874,6 +2023,14 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
return result;
}
+static struct device *nvme_pci_get_dma_device(struct request_queue *q)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
+
+ return dev->dev;
+}
+
static const struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
@@ -1892,6 +2049,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
+ .get_dma_device = nvme_pci_get_dma_device,
};
static void nvme_dev_remove_admin(struct nvme_dev *dev)
--
2.49.0
next prev parent reply other threads:[~2025-06-27 15:09 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-27 15:10 [RFC 00/12] io_uring dmabuf read/write support Pavel Begunkov
2025-06-27 15:10 ` [RFC 01/12] file: add callback returning dev for dma operations Pavel Begunkov
2025-06-27 15:10 ` [RFC 02/12] iov_iter: introduce iter type for pre-registered dma Pavel Begunkov
2025-06-27 15:10 ` [RFC 03/12] block: move around bio flagging helpers Pavel Begunkov
2025-06-27 15:10 ` [RFC 04/12] block: introduce dmavec bio type Pavel Begunkov
2025-06-27 15:10 ` [RFC 05/12] block: implement ->get_dma_device callback Pavel Begunkov
2025-06-27 15:10 ` Pavel Begunkov [this message]
2025-06-27 15:10 ` [RFC 07/12] io_uring/rsrc: extended reg buffer registration Pavel Begunkov
2025-06-27 15:10 ` [RFC 08/12] io_uring: add basic dmabuf helpers Pavel Begunkov
2025-06-27 15:10 ` [RFC 09/12] io_uring/rsrc: add imu flags Pavel Begunkov
2025-06-27 15:10 ` [RFC 10/12] io_uring/rsrc: add dmabuf-backed buffer registeration Pavel Begunkov
2025-06-27 15:10 ` [RFC 11/12] io_uring/rsrc: implement dmabuf regbuf import Pavel Begunkov
2025-06-27 15:10 ` [RFC 12/12] io_uring/rw: enable dma registered buffers Pavel Begunkov
2025-07-03 14:23 ` [RFC 00/12] io_uring dmabuf read/write support Christoph Hellwig
2025-07-03 14:37 ` Christian König
2025-07-07 11:15 ` Pavel Begunkov
2025-07-07 14:48 ` Christoph Hellwig
2025-07-07 15:41 ` Pavel Begunkov
2025-07-08 9:45 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0803e60c420ad80570abd736a1549fffaeb6435d.1751035820.git.asml.silence@gmail.com \
--to=asml.silence@gmail.com \
--cc=dw@davidwei.uk \
--cc=io-uring@vger.kernel.org \
--cc=kbusch@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=vishal1.verma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox