* [PATCH 1/4] io_uring/cmd: rename struct uring_cache to io_uring_cmd_data
@ 2024-11-20 16:02 Mark Harmstone
2024-11-20 16:02 ` [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size Mark Harmstone
` (2 more replies)
0 siblings, 3 replies; 7+ messages in thread
From: Mark Harmstone @ 2024-11-20 16:02 UTC (permalink / raw)
To: linux-btrfs, io-uring; +Cc: Jens Axboe
From: Jens Axboe <[email protected]>
In preparation for making this more generically available for
->uring_cmd() usage that needs stable command data, rename it and move
it to io_uring/cmd.h instead.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/io_uring/cmd.h | 4 ++++
io_uring/io_uring.c | 2 +-
io_uring/uring_cmd.c | 10 +++++-----
io_uring/uring_cmd.h | 4 ----
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index c189d36ad55e..24cff2b9b9d4 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -18,6 +18,10 @@ struct io_uring_cmd {
u8 pdu[32]; /* available inline for free use */
};
+struct io_uring_cmd_data {
+ struct io_uring_sqe sqes[2];
+};
+
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
{
return sqe->cmd;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index b2736e3491b8..8ae6bf746fcc 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -315,7 +315,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_async_rw));
ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
- sizeof(struct uring_cache));
+ sizeof(struct io_uring_cmd_data));
spin_lock_init(&ctx->msg_lock);
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb));
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index e2e8485932d6..eefc203a1214 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -16,10 +16,10 @@
#include "rsrc.h"
#include "uring_cmd.h"
-static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
+static struct io_uring_cmd_data *io_uring_async_get(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- struct uring_cache *cache;
+ struct io_uring_cmd_data *cache;
cache = io_alloc_cache_get(&ctx->uring_cache);
if (cache) {
@@ -35,7 +35,7 @@ static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache = req->async_data;
+ struct io_uring_cmd_data *cache = req->async_data;
if (issue_flags & IO_URING_F_UNLOCKED)
return;
@@ -183,7 +183,7 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache;
+ struct io_uring_cmd_data *cache;
cache = io_uring_async_get(req);
if (unlikely(!cache))
@@ -256,7 +256,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
if (ret == -EAGAIN) {
- struct uring_cache *cache = req->async_data;
+ struct io_uring_cmd_data *cache = req->async_data;
if (ioucmd->sqe != (void *) cache)
memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index a361f98664d2..515823ca68b8 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -1,9 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-struct uring_cache {
- struct io_uring_sqe sqes[2];
-};
-
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
--
2.45.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size
2024-11-20 16:02 [PATCH 1/4] io_uring/cmd: rename struct uring_cache to io_uring_cmd_data Mark Harmstone
@ 2024-11-20 16:02 ` Mark Harmstone
2024-11-20 16:49 ` Jens Axboe
2024-11-20 16:02 ` [PATCH 3/4] io_uring/cmd: add per-op data to struct io_uring_cmd_data Mark Harmstone
2024-11-20 16:02 ` [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read() Mark Harmstone
2 siblings, 1 reply; 7+ messages in thread
From: Mark Harmstone @ 2024-11-20 16:02 UTC (permalink / raw)
To: linux-btrfs, io-uring; +Cc: Mark Harmstone
Correct the value of io_issue_defs[IORING_OP_URING_CMD].async_size so
that it is derived from the size of the struct rather than being
calculated.
Signed-off-by: Mark Harmstone <[email protected]>
---
io_uring/opdef.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index a2be3bbca5ff..c7746f67cc65 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "io_uring.h"
#include "opdef.h"
@@ -414,7 +415,7 @@ const struct io_issue_def io_issue_defs[] = {
.plug = 1,
.iopoll = 1,
.iopoll_queue = 1,
- .async_size = 2 * sizeof(struct io_uring_sqe),
+ .async_size = sizeof(struct io_uring_cmd_data),
.prep = io_uring_cmd_prep,
.issue = io_uring_cmd,
},
--
2.45.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/4] io_uring/cmd: add per-op data to struct io_uring_cmd_data
2024-11-20 16:02 [PATCH 1/4] io_uring/cmd: rename struct uring_cache to io_uring_cmd_data Mark Harmstone
2024-11-20 16:02 ` [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size Mark Harmstone
@ 2024-11-20 16:02 ` Mark Harmstone
2024-11-20 16:02 ` [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read() Mark Harmstone
2 siblings, 0 replies; 7+ messages in thread
From: Mark Harmstone @ 2024-11-20 16:02 UTC (permalink / raw)
To: linux-btrfs, io-uring; +Cc: Jens Axboe
From: Jens Axboe <[email protected]>
In case an op handler for ->uring_cmd() needs stable storage for user
data, it can allocate io_uring_cmd_data->op_data and use it for the
duration of the request. When the request gets cleaned up, uring_cmd
will free it automatically.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/io_uring/cmd.h | 1 +
io_uring/uring_cmd.c | 13 +++++++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 24cff2b9b9d4..3df6636ec3a3 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -20,6 +20,7 @@ struct io_uring_cmd {
struct io_uring_cmd_data {
struct io_uring_sqe sqes[2];
+ void *op_data;
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index eefc203a1214..019d6f49ff20 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -23,12 +23,16 @@ static struct io_uring_cmd_data *io_uring_async_get(struct io_kiocb *req)
cache = io_alloc_cache_get(&ctx->uring_cache);
if (cache) {
+ cache->op_data = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = cache;
return cache;
}
- if (!io_alloc_async_data(req))
- return req->async_data;
+ if (!io_alloc_async_data(req)) {
+ cache = req->async_data;
+ cache->op_data = NULL;
+ return cache;
+ }
return NULL;
}
@@ -37,6 +41,11 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_uring_cmd_data *cache = req->async_data;
+ if (cache->op_data) {
+ kfree(cache->op_data);
+ cache->op_data = NULL;
+ }
+
if (issue_flags & IO_URING_F_UNLOCKED)
return;
if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
--
2.45.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read()
2024-11-20 16:02 [PATCH 1/4] io_uring/cmd: rename struct uring_cache to io_uring_cmd_data Mark Harmstone
2024-11-20 16:02 ` [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size Mark Harmstone
2024-11-20 16:02 ` [PATCH 3/4] io_uring/cmd: add per-op data to struct io_uring_cmd_data Mark Harmstone
@ 2024-11-20 16:02 ` Mark Harmstone
2024-11-20 16:07 ` Mark Harmstone
2024-11-20 16:53 ` Jens Axboe
2 siblings, 2 replies; 7+ messages in thread
From: Mark Harmstone @ 2024-11-20 16:02 UTC (permalink / raw)
To: linux-btrfs, io-uring; +Cc: Mark Harmstone
If we return -EAGAIN the first time because we need to block,
btrfs_uring_encoded_read() will get called twice. Take a copy of args
the first time, to prevent userspace from messing around with it.
Signed-off-by: Mark Harmstone <[email protected]>
---
fs/btrfs/ioctl.c | 74 ++++++++++++++++++++++++++++++++----------------
1 file changed, 49 insertions(+), 25 deletions(-)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 488dcd022dea..97f7812cbf7c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4873,7 +4873,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
{
size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
size_t copy_end;
- struct btrfs_ioctl_encoded_io_args args = { 0 };
+ struct btrfs_ioctl_encoded_io_args *args;
int ret;
u64 disk_bytenr, disk_io_size;
struct file *file;
@@ -4888,6 +4888,9 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
struct extent_state *cached_state = NULL;
u64 start, lockend;
void __user *sqe_addr;
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+ struct io_uring_cmd_data *data = req->async_data;
+ bool need_copy = false;
if (!capable(CAP_SYS_ADMIN)) {
ret = -EPERM;
@@ -4899,34 +4902,55 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
io_tree = &inode->io_tree;
sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
+ if (!data->op_data) {
+ data->op_data = kzalloc(sizeof(*args), GFP_NOFS);
+ if (!data->op_data) {
+ ret = -ENOMEM;
+ goto out_acct;
+ }
+
+ need_copy = true;
+ }
+
+ args = data->op_data;
+
if (issue_flags & IO_URING_F_COMPAT) {
#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
- struct btrfs_ioctl_encoded_io_args_32 args32;
-
copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
- if (copy_from_user(&args32, sqe_addr, copy_end)) {
- ret = -EFAULT;
- goto out_acct;
+
+ if (need_copy) {
+ struct btrfs_ioctl_encoded_io_args_32 args32;
+
+ if (copy_from_user(&args32, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
+
+ args->iov = compat_ptr(args32.iov);
+ args->iovcnt = args32.iovcnt;
+ args->offset = args32.offset;
+ args->flags = args32.flags;
}
- args.iov = compat_ptr(args32.iov);
- args.iovcnt = args32.iovcnt;
- args.offset = args32.offset;
- args.flags = args32.flags;
#else
return -ENOTTY;
#endif
} else {
copy_end = copy_end_kernel;
- if (copy_from_user(&args, sqe_addr, copy_end)) {
- ret = -EFAULT;
- goto out_acct;
+
+ if (need_copy) {
+ if (copy_from_user(args, sqe_addr, copy_end)) {
+ ret = -EFAULT;
+ goto out_acct;
+ }
}
}
- if (args.flags != 0)
- return -EINVAL;
+ if (args->flags != 0) {
+ ret = -EINVAL;
+ goto out_acct;
+ }
- ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ ret = import_iovec(ITER_DEST, args->iov, args->iovcnt, ARRAY_SIZE(iovstack),
&iov, &iter);
if (ret < 0)
goto out_acct;
@@ -4936,8 +4960,8 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
goto out_free;
}
- pos = args.offset;
- ret = rw_verify_area(READ, file, &pos, args.len);
+ pos = args->offset;
+ ret = rw_verify_area(READ, file, &pos, args->len);
if (ret < 0)
goto out_free;
@@ -4950,15 +4974,15 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
start = ALIGN_DOWN(pos, fs_info->sectorsize);
lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
- ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
+ ret = btrfs_encoded_read(&kiocb, &iter, args, &cached_state,
&disk_bytenr, &disk_io_size);
if (ret < 0 && ret != -EIOCBQUEUED)
goto out_free;
file_accessed(file);
- if (copy_to_user(sqe_addr + copy_end, (const char *)&args + copy_end_kernel,
- sizeof(args) - copy_end_kernel)) {
+ if (copy_to_user(sqe_addr + copy_end, (const char *)args + copy_end_kernel,
+ sizeof(*args) - copy_end_kernel)) {
if (ret == -EIOCBQUEUED) {
unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
@@ -4975,7 +4999,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
* undo this.
*/
if (!iov) {
- iov = kmemdup(iovstack, sizeof(struct iovec) * args.iovcnt,
+ iov = kmemdup(iovstack, sizeof(struct iovec) * args->iovcnt,
GFP_NOFS);
if (!iov) {
unlock_extent(io_tree, start, lockend, &cached_state);
@@ -4988,13 +5012,13 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
count = min_t(u64, iov_iter_count(&iter), disk_io_size);
/* Match ioctl by not returning past EOF if uncompressed. */
- if (!args.compression)
- count = min_t(u64, count, args.len);
+ if (!args->compression)
+ count = min_t(u64, count, args->len);
ret = btrfs_uring_read_extent(&kiocb, &iter, start, lockend,
cached_state, disk_bytenr,
disk_io_size, count,
- args.compression, iov, cmd);
+ args->compression, iov, cmd);
goto out_acct;
}
--
2.45.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read()
2024-11-20 16:02 ` [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read() Mark Harmstone
@ 2024-11-20 16:07 ` Mark Harmstone
2024-11-20 16:53 ` Jens Axboe
1 sibling, 0 replies; 7+ messages in thread
From: Mark Harmstone @ 2024-11-20 16:07 UTC (permalink / raw)
To: [email protected], [email protected]
Could someone please apply my kmemdup patch from last week so that this
applies cleanly? "btrfs: use kmemdup in btrfs_uring_encoded_read"
On 20/11/24 16:02, Mark Harmstone wrote:
> If we return -EAGAIN the first time because we need to block,
> btrfs_uring_encoded_read() will get called twice. Take a copy of args
> the first time, to prevent userspace from messing around with it.
>
> Signed-off-by: Mark Harmstone <[email protected]>
> ---
> fs/btrfs/ioctl.c | 74 ++++++++++++++++++++++++++++++++----------------
> 1 file changed, 49 insertions(+), 25 deletions(-)
>
> diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
> index 488dcd022dea..97f7812cbf7c 100644
> --- a/fs/btrfs/ioctl.c
> +++ b/fs/btrfs/ioctl.c
> @@ -4873,7 +4873,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> {
> size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
> size_t copy_end;
> - struct btrfs_ioctl_encoded_io_args args = { 0 };
> + struct btrfs_ioctl_encoded_io_args *args;
> int ret;
> u64 disk_bytenr, disk_io_size;
> struct file *file;
> @@ -4888,6 +4888,9 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> struct extent_state *cached_state = NULL;
> u64 start, lockend;
> void __user *sqe_addr;
> + struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> + struct io_uring_cmd_data *data = req->async_data;
> + bool need_copy = false;
>
> if (!capable(CAP_SYS_ADMIN)) {
> ret = -EPERM;
> @@ -4899,34 +4902,55 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> io_tree = &inode->io_tree;
> sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
>
> + if (!data->op_data) {
> + data->op_data = kzalloc(sizeof(*args), GFP_NOFS);
> + if (!data->op_data) {
> + ret = -ENOMEM;
> + goto out_acct;
> + }
> +
> + need_copy = true;
> + }
> +
> + args = data->op_data;
> +
> if (issue_flags & IO_URING_F_COMPAT) {
> #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
> - struct btrfs_ioctl_encoded_io_args_32 args32;
> -
> copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags);
> - if (copy_from_user(&args32, sqe_addr, copy_end)) {
> - ret = -EFAULT;
> - goto out_acct;
> +
> + if (need_copy) {
> + struct btrfs_ioctl_encoded_io_args_32 args32;
> +
> + if (copy_from_user(&args32, sqe_addr, copy_end)) {
> + ret = -EFAULT;
> + goto out_acct;
> + }
> +
> + args->iov = compat_ptr(args32.iov);
> + args->iovcnt = args32.iovcnt;
> + args->offset = args32.offset;
> + args->flags = args32.flags;
> }
> - args.iov = compat_ptr(args32.iov);
> - args.iovcnt = args32.iovcnt;
> - args.offset = args32.offset;
> - args.flags = args32.flags;
> #else
> return -ENOTTY;
> #endif
> } else {
> copy_end = copy_end_kernel;
> - if (copy_from_user(&args, sqe_addr, copy_end)) {
> - ret = -EFAULT;
> - goto out_acct;
> +
> + if (need_copy) {
> + if (copy_from_user(args, sqe_addr, copy_end)) {
> + ret = -EFAULT;
> + goto out_acct;
> + }
> }
> }
>
> - if (args.flags != 0)
> - return -EINVAL;
> + if (args->flags != 0) {
> + ret = -EINVAL;
> + goto out_acct;
> + }
>
> - ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
> + ret = import_iovec(ITER_DEST, args->iov, args->iovcnt, ARRAY_SIZE(iovstack),
> &iov, &iter);
> if (ret < 0)
> goto out_acct;
> @@ -4936,8 +4960,8 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> goto out_free;
> }
>
> - pos = args.offset;
> - ret = rw_verify_area(READ, file, &pos, args.len);
> + pos = args->offset;
> + ret = rw_verify_area(READ, file, &pos, args->len);
> if (ret < 0)
> goto out_free;
>
> @@ -4950,15 +4974,15 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> start = ALIGN_DOWN(pos, fs_info->sectorsize);
> lockend = start + BTRFS_MAX_UNCOMPRESSED - 1;
>
> - ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
> + ret = btrfs_encoded_read(&kiocb, &iter, args, &cached_state,
> &disk_bytenr, &disk_io_size);
> if (ret < 0 && ret != -EIOCBQUEUED)
> goto out_free;
>
> file_accessed(file);
>
> - if (copy_to_user(sqe_addr + copy_end, (const char *)&args + copy_end_kernel,
> - sizeof(args) - copy_end_kernel)) {
> + if (copy_to_user(sqe_addr + copy_end, (const char *)args + copy_end_kernel,
> + sizeof(*args) - copy_end_kernel)) {
> if (ret == -EIOCBQUEUED) {
> unlock_extent(io_tree, start, lockend, &cached_state);
> btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
> @@ -4975,7 +4999,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> * undo this.
> */
> if (!iov) {
> - iov = kmemdup(iovstack, sizeof(struct iovec) * args.iovcnt,
> + iov = kmemdup(iovstack, sizeof(struct iovec) * args->iovcnt,
> GFP_NOFS);
> if (!iov) {
> unlock_extent(io_tree, start, lockend, &cached_state);
> @@ -4988,13 +5012,13 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> count = min_t(u64, iov_iter_count(&iter), disk_io_size);
>
> /* Match ioctl by not returning past EOF if uncompressed. */
> - if (!args.compression)
> - count = min_t(u64, count, args.len);
> + if (!args->compression)
> + count = min_t(u64, count, args->len);
>
> ret = btrfs_uring_read_extent(&kiocb, &iter, start, lockend,
> cached_state, disk_bytenr,
> disk_io_size, count,
> - args.compression, iov, cmd);
> + args->compression, iov, cmd);
>
> goto out_acct;
> }
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size
2024-11-20 16:02 ` [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size Mark Harmstone
@ 2024-11-20 16:49 ` Jens Axboe
0 siblings, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2024-11-20 16:49 UTC (permalink / raw)
To: Mark Harmstone, linux-btrfs, io-uring
On 11/20/24 9:02 AM, Mark Harmstone wrote:
> Correct the value of io_issue_defs[IORING_OP_URING_CMD].async_size so
> that it is derived from the size of the struct rather than being
> calculated.
You should probably just fold this with patch 1, where the type is
changed.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read()
2024-11-20 16:02 ` [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read() Mark Harmstone
2024-11-20 16:07 ` Mark Harmstone
@ 2024-11-20 16:53 ` Jens Axboe
1 sibling, 0 replies; 7+ messages in thread
From: Jens Axboe @ 2024-11-20 16:53 UTC (permalink / raw)
To: Mark Harmstone, linux-btrfs, io-uring
On 11/20/24 9:02 AM, Mark Harmstone wrote:
> If we return -EAGAIN the first time because we need to block,
> btrfs_uring_encoded_read() will get called twice. Take a copy of args
> the first time, to prevent userspace from messing around with it.
>
> Signed-off-by: Mark Harmstone <[email protected]>
> ---
> fs/btrfs/ioctl.c | 74 ++++++++++++++++++++++++++++++++----------------
> 1 file changed, 49 insertions(+), 25 deletions(-)
>
> diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
> index 488dcd022dea..97f7812cbf7c 100644
> --- a/fs/btrfs/ioctl.c
> +++ b/fs/btrfs/ioctl.c
> @@ -4873,7 +4873,7 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> {
> size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags);
> size_t copy_end;
> - struct btrfs_ioctl_encoded_io_args args = { 0 };
> + struct btrfs_ioctl_encoded_io_args *args;
> int ret;
> u64 disk_bytenr, disk_io_size;
> struct file *file;
> @@ -4888,6 +4888,9 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> struct extent_state *cached_state = NULL;
> u64 start, lockend;
> void __user *sqe_addr;
> + struct io_kiocb *req = cmd_to_io_kiocb(cmd);
> + struct io_uring_cmd_data *data = req->async_data;
> + bool need_copy = false;
>
> if (!capable(CAP_SYS_ADMIN)) {
> ret = -EPERM;
> @@ -4899,34 +4902,55 @@ static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue
> io_tree = &inode->io_tree;
> sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr));
>
> + if (!data->op_data) {
> + data->op_data = kzalloc(sizeof(*args), GFP_NOFS);
> + if (!data->op_data) {
> + ret = -ENOMEM;
> + goto out_acct;
> + }
> +
> + need_copy = true;
> + }
I'd probably get rid of this need_copy variable and just do the copy
here? Might look cleaner with an btrfs_alloc_copy_foo() helper, as you
could just do:
ret = btrfs_alloc_copy_foo(...);
if (unlikely(ret))
return ret;
and hide all that ugly compat business outside the meat of this
function.
More of a style thing, the change itself looks fine.
--
Jens Axboe
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2024-11-20 16:53 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-11-20 16:02 [PATCH 1/4] io_uring/cmd: rename struct uring_cache to io_uring_cmd_data Mark Harmstone
2024-11-20 16:02 ` [PATCH 2/4] io_uring: use sizeof for io_issue_defs[IORING_OP_URING_CMD].async_size Mark Harmstone
2024-11-20 16:49 ` Jens Axboe
2024-11-20 16:02 ` [PATCH 3/4] io_uring/cmd: add per-op data to struct io_uring_cmd_data Mark Harmstone
2024-11-20 16:02 ` [PATCH 4/4] btrfs: don't read from userspace twice in btrfs_uring_encoded_read() Mark Harmstone
2024-11-20 16:07 ` Mark Harmstone
2024-11-20 16:53 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox