public inbox for [email protected]
 help / color / mirror / Atom feed
From: Christoph Hellwig <[email protected]>
To: Kanchan Joshi <[email protected]>
Cc: [email protected], [email protected], [email protected],
	[email protected], [email protected],
	[email protected], [email protected],
	Anuj Gupta <[email protected]>
Subject: Re: [PATCH for-next v10 6/7] block: extend functionality to map bvec iterator
Date: Wed, 28 Sep 2022 19:40:39 +0200	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

On Tue, Sep 27, 2022 at 11:06:09PM +0530, Kanchan Joshi wrote:
> Extend blk_rq_map_user_iov so that it can handle bvec iterator.
> It  maps the pages from bvec iterator into a bio and place the bio into
> request.
> 
> This helper will be used by nvme for uring-passthrough path when IO is
> done using pre-mapped buffers.

Can we avoid duplicating some of the checks?  Something like the below
incremental patch.  Note that this now also allows the copy path for
all kinds of iov_iters, but as the copy from/to iter code is safe
and the sanity check was just or the map path that should be fine.
It's best split into a prep patch, though.

---
diff --git a/block/blk-map.c b/block/blk-map.c
index a1aa8dacb02bc..c51de30767403 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -549,26 +549,16 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
 EXPORT_SYMBOL(blk_rq_append_bio);
 
 /* Prepare bio for passthrough IO given ITER_BVEC iter */
-static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
-				bool *copy)
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
 {
 	struct request_queue *q = rq->q;
-	size_t nr_iter, nr_segs, i;
-	struct bio *bio = NULL;
-	struct bio_vec *bv, *bvecs, *bvprvp = NULL;
+	size_t nr_iter = iov_iter_count(iter);
+	size_t nr_segs = iter->nr_segs;
+	struct bio_vec *bvecs, *bvprvp = NULL;
 	struct queue_limits *lim = &q->limits;
 	unsigned int nsegs = 0, bytes = 0;
-	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
-
-	/* see if we need to copy pages due to any weird situation */
-	if (blk_queue_may_bounce(q))
-		goto out_copy;
-	else if (iov_iter_alignment(iter) & align)
-		goto out_copy;
-	/* virt-alignment gap is checked anyway down, so avoid extra loop here */
-
-	nr_iter = iov_iter_count(iter);
-	nr_segs = iter->nr_segs;
+	struct bio *bio;
+	size_t i;
 
 	if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
 		return -EINVAL;
@@ -586,14 +576,15 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
 	/* loop to perform a bunch of sanity checks */
 	bvecs = (struct bio_vec *)iter->bvec;
 	for (i = 0; i < nr_segs; i++) {
-		bv = &bvecs[i];
+		struct bio_vec *bv = &bvecs[i];
+
 		/*
 		 * If the queue doesn't support SG gaps and adding this
 		 * offset would create a gap, fallback to copy.
 		 */
 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
 			bio_map_put(bio);
-			goto out_copy;
+			return -EREMOTEIO;
 		}
 		/* check full condition */
 		if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
@@ -611,9 +602,6 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
 put_bio:
 	bio_map_put(bio);
 	return -EINVAL;
-out_copy:
-	*copy = true;
-	return 0;
 }
 
 /**
@@ -635,33 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 			struct rq_map_data *map_data,
 			const struct iov_iter *iter, gfp_t gfp_mask)
 {
-	bool copy = false;
+	bool copy = false, map_bvec = false;
 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
 	struct bio *bio = NULL;
 	struct iov_iter i;
 	int ret = -EINVAL;
 
-	if (iov_iter_is_bvec(iter)) {
-		ret = blk_rq_map_user_bvec(rq, iter, &copy);
-		if (ret != 0)
-			goto fail;
-		if (copy)
-			goto do_copy;
-		return ret;
-	}
-	if (!iter_is_iovec(iter))
-		goto fail;
-
 	if (map_data)
 		copy = true;
 	else if (blk_queue_may_bounce(q))
 		copy = true;
 	else if (iov_iter_alignment(iter) & align)
 		copy = true;
+	else if (iov_iter_is_bvec(iter))
+		map_bvec = true;
+	else if (!iter_is_iovec(iter))
+		copy = true;
 	else if (queue_virt_boundary(q))
 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
-do_copy:
+	if (map_bvec) {
+		ret = blk_rq_map_user_bvec(rq, iter);
+		if (!ret)
+			return 0;
+		if (ret != -EREMOTEIO)
+			goto fail;
+		/* fall back to copying the data on limits mismatches */
+		copy = true;
+	}
+
 	i = *iter;
 	do {
 		if (copy)

  reply	other threads:[~2022-09-28 17:40 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <CGME20220927174622epcas5p1685c0f97a7ee2ee13ba25f5fb58dff00@epcas5p1.samsung.com>
2022-09-27 17:36 ` [PATCH for-next v10 0/7] Fixed-buffer for uring-cmd/passthru Kanchan Joshi
     [not found]   ` <CGME20220927174626epcas5p4002acda6f0578ee314ee5e611b8d6662@epcas5p4.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 1/7] io_uring: add io_uring_cmd_import_fixed Kanchan Joshi
     [not found]   ` <CGME20220927174628epcas5p21beda845f26eedeb538cb67e286954d4@epcas5p2.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 2/7] io_uring: introduce fixed buffer support for io_uring_cmd Kanchan Joshi
     [not found]   ` <CGME20220927174631epcas5p12cd6ffbd7dad819b0af75733ce6cdd2c@epcas5p1.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 3/7] nvme: refactor nvme_add_user_metadata Kanchan Joshi
2022-09-28 17:18       ` Christoph Hellwig
2022-09-29 11:28         ` Anuj Gupta
     [not found]   ` <CGME20220927174633epcas5p4d492bdebde981e2c019e30c47cf00869@epcas5p4.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 4/7] nvme: refactor nvme_alloc_request Kanchan Joshi
2022-09-28 17:19       ` Christoph Hellwig
2022-09-29 11:30         ` Anuj Gupta
     [not found]   ` <CGME20220927174636epcas5p49008baa36dcbf2f61c25ba89c4707c0c@epcas5p4.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 5/7] block: factor out bio_map_get helper Kanchan Joshi
2022-09-28 17:31       ` Christoph Hellwig
2022-09-28 17:49         ` Jens Axboe
2022-09-28 17:53           ` Jens Axboe
2022-09-29 11:34         ` Anuj Gupta
     [not found]   ` <CGME20220927174639epcas5p22b46aed144d81d82b2a9b9de586808ac@epcas5p2.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 6/7] block: extend functionality to map bvec iterator Kanchan Joshi
2022-09-28 17:40       ` Christoph Hellwig [this message]
2022-09-29 11:33         ` Anuj Gupta
     [not found]   ` <CGME20220927174642epcas5p1dafa31776d4eb8180e18f149ed25640c@epcas5p1.samsung.com>
2022-09-27 17:36     ` [PATCH for-next v10 7/7] nvme: wire up fixed buffer support for nvme passthrough Kanchan Joshi
2022-09-28 17:59       ` Christoph Hellwig
2022-09-29 11:36         ` Anuj Gupta
2022-09-28 14:28   ` [PATCH for-next v10 0/7] Fixed-buffer for uring-cmd/passthru Jens Axboe
2022-09-28 17:12     ` Christoph Hellwig
2022-09-28 17:13       ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox