From: Caleb Sander Mateos <csander@purestorage.com>
To: Keith Busch <kbusch@kernel.org>, Jens Axboe <axboe@kernel.dk>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Pavel Begunkov <asml.silence@gmail.com>
Cc: Chaitanya Kulkarni <kch@nvidia.com>,
linux-nvme@lists.infradead.org, io-uring@vger.kernel.org,
linux-kernel@vger.kernel.org,
Caleb Sander Mateos <csander@purestorage.com>
Subject: [PATCH v4 2/3] nvme/ioctl: move blk_mq_free_request() out of nvme_map_user_request()
Date: Fri, 28 Mar 2025 09:46:46 -0600 [thread overview]
Message-ID: <20250328154647.2590171-3-csander@purestorage.com> (raw)
In-Reply-To: <20250328154647.2590171-1-csander@purestorage.com>
The callers of nvme_map_user_request() (nvme_submit_user_cmd() and
nvme_uring_cmd_io()) allocate the request, so have them free it if
nvme_map_user_request() fails.
Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
---
drivers/nvme/host/ioctl.c | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 0634e24eac97..42dfd29ed39e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -127,41 +127,39 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
int ret;
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata) {
- ret = -EINVAL;
- goto out;
- }
+ if (!supports_metadata)
+ return -EINVAL;
+
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (flags & NVME_IOCTL_VEC) {
- ret = -EINVAL;
- goto out;
- }
+ if (flags & NVME_IOCTL_VEC)
+ return -EINVAL;
+
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd,
iou_issue_flags);
if (ret < 0)
- goto out;
+ return ret;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
} else {
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
}
if (ret)
- goto out;
+ return ret;
bio = req->bio;
if (bdev)
bio_set_dev(bio, bdev);
@@ -174,12 +172,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
@@ -200,11 +196,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
meta_len, NULL, flags, 0);
if (ret)
- return ret;
+ goto out_free_req;
}
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
@@ -216,11 +212,14 @@ static int nvme_submit_user_cmd(struct request_queue *q,
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
if (effects)
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+ return ret;
+out_free_req:
+ blk_mq_free_request(req);
return ret;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
@@ -520,20 +519,24 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (d.data_len) {
ret = nvme_map_user_request(req, d.addr,
d.data_len, nvme_to_user_ptr(d.metadata),
d.metadata_len, ioucmd, vec, issue_flags);
if (ret)
- return ret;
+ goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
pdu->req = req;
req->end_io_data = ioucmd;
req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)
{
if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
--
2.45.2
next prev parent reply other threads:[~2025-03-28 15:47 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-28 15:46 [PATCH v4 0/3] nvme_map_user_request() cleanup Caleb Sander Mateos
2025-03-28 15:46 ` [PATCH v4 1/3] nvme/ioctl: don't warn on vectorized uring_cmd with fixed buffer Caleb Sander Mateos
2025-03-28 15:46 ` Caleb Sander Mateos [this message]
2025-03-28 15:46 ` [PATCH v4 3/3] nvme/ioctl: move fixed buffer lookup to nvme_uring_cmd_io() Caleb Sander Mateos
2025-03-31 6:46 ` Kanchan Joshi
2025-03-31 14:36 ` Keith Busch
2025-04-02 13:21 ` Kanchan Joshi
2025-03-28 17:31 ` [PATCH v4 0/3] nvme_map_user_request() cleanup Keith Busch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250328154647.2590171-3-csander@purestorage.com \
--to=csander@purestorage.com \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=io-uring@vger.kernel.org \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox