* [PATCH V3 01/16] io_uring: increase io_kiocb->flags into 64bit
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD Ming Lei
` (18 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
The 32bit io_kiocb->flags has been used up, so extend it to 64bit.
Signed-off-by: Ming Lei <[email protected]>
---
include/linux/io_uring_types.h | 65 +++++++++++++++++-----------------
io_uring/io_uring.c | 2 +-
2 files changed, 34 insertions(+), 33 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 00689c12f6ab..f7b05552cc31 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -408,68 +408,68 @@ enum {
enum {
/* ctx owns file */
- REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
+ REQ_F_FIXED_FILE = BIT_ULL(REQ_F_FIXED_FILE_BIT),
/* drain existing IO first */
- REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
+ REQ_F_IO_DRAIN = BIT_ULL(REQ_F_IO_DRAIN_BIT),
/* linked sqes */
- REQ_F_LINK = BIT(REQ_F_LINK_BIT),
+ REQ_F_LINK = BIT_ULL(REQ_F_LINK_BIT),
/* doesn't sever on completion < 0 */
- REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
+ REQ_F_HARDLINK = BIT_ULL(REQ_F_HARDLINK_BIT),
/* IOSQE_ASYNC */
- REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
+ REQ_F_FORCE_ASYNC = BIT_ULL(REQ_F_FORCE_ASYNC_BIT),
/* IOSQE_BUFFER_SELECT */
- REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
+ REQ_F_BUFFER_SELECT = BIT_ULL(REQ_F_BUFFER_SELECT_BIT),
/* IOSQE_CQE_SKIP_SUCCESS */
- REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
+ REQ_F_CQE_SKIP = BIT_ULL(REQ_F_CQE_SKIP_BIT),
/* fail rest of links */
- REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
+ REQ_F_FAIL = BIT_ULL(REQ_F_FAIL_BIT),
/* on inflight list, should be cancelled and waited on exit reliably */
- REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
+ REQ_F_INFLIGHT = BIT_ULL(REQ_F_INFLIGHT_BIT),
/* read/write uses file position */
- REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
+ REQ_F_CUR_POS = BIT_ULL(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
- REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
+ REQ_F_NOWAIT = BIT_ULL(REQ_F_NOWAIT_BIT),
/* has or had linked timeout */
- REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
+ REQ_F_LINK_TIMEOUT = BIT_ULL(REQ_F_LINK_TIMEOUT_BIT),
/* needs cleanup */
- REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
+ REQ_F_NEED_CLEANUP = BIT_ULL(REQ_F_NEED_CLEANUP_BIT),
/* already went through poll handler */
- REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
+ REQ_F_POLLED = BIT_ULL(REQ_F_POLLED_BIT),
/* buffer already selected */
- REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
+ REQ_F_BUFFER_SELECTED = BIT_ULL(REQ_F_BUFFER_SELECTED_BIT),
/* buffer selected from ring, needs commit */
- REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
+ REQ_F_BUFFER_RING = BIT_ULL(REQ_F_BUFFER_RING_BIT),
/* caller should reissue async */
- REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
+ REQ_F_REISSUE = BIT_ULL(REQ_F_REISSUE_BIT),
/* supports async reads/writes */
- REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
+ REQ_F_SUPPORT_NOWAIT = BIT_ULL(REQ_F_SUPPORT_NOWAIT_BIT),
/* regular file */
- REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
+ REQ_F_ISREG = BIT_ULL(REQ_F_ISREG_BIT),
/* has creds assigned */
- REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
+ REQ_F_CREDS = BIT_ULL(REQ_F_CREDS_BIT),
/* skip refcounting if not set */
- REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
+ REQ_F_REFCOUNT = BIT_ULL(REQ_F_REFCOUNT_BIT),
/* there is a linked timeout that has to be armed */
- REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
+ REQ_F_ARM_LTIMEOUT = BIT_ULL(REQ_F_ARM_LTIMEOUT_BIT),
/* ->async_data allocated */
- REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
+ REQ_F_ASYNC_DATA = BIT_ULL(REQ_F_ASYNC_DATA_BIT),
/* don't post CQEs while failing linked requests */
- REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
+ REQ_F_SKIP_LINK_CQES = BIT_ULL(REQ_F_SKIP_LINK_CQES_BIT),
/* single poll may be active */
- REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
+ REQ_F_SINGLE_POLL = BIT_ULL(REQ_F_SINGLE_POLL_BIT),
/* double poll may active */
- REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
+ REQ_F_DOUBLE_POLL = BIT_ULL(REQ_F_DOUBLE_POLL_BIT),
/* request has already done partial IO */
- REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
+ REQ_F_PARTIAL_IO = BIT_ULL(REQ_F_PARTIAL_IO_BIT),
/* fast poll multishot mode */
- REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
+ REQ_F_APOLL_MULTISHOT = BIT_ULL(REQ_F_APOLL_MULTISHOT_BIT),
/* ->extra1 and ->extra2 are initialised */
- REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
+ REQ_F_CQE32_INIT = BIT_ULL(REQ_F_CQE32_INIT_BIT),
/* recvmsg special flag, clear EPOLLIN */
- REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
+ REQ_F_CLEAR_POLLIN = BIT_ULL(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
- REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ REQ_F_HASH_LOCKED = BIT_ULL(REQ_F_HASH_LOCKED_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
@@ -530,7 +530,8 @@ struct io_kiocb {
* and after selection it points to the buffer ID itself.
*/
u16 buf_index;
- unsigned int flags;
+ u32 __pad;
+ u64 flags;
struct io_cqe cqe;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 722624b6d0dc..acd8959afe91 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -4418,7 +4418,7 @@ static int __init io_uring_init(void)
BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
- BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
+ BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(u64));
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
2023-03-14 12:57 ` [PATCH V3 01/16] io_uring: increase io_kiocb->flags into 64bit Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-18 14:31 ` Jens Axboe
2023-03-14 12:57 ` [PATCH V3 03/16] io_uring: support OP_READ/OP_WRITE for fused slave request Ming Lei
` (17 subsequent siblings)
19 siblings, 1 reply; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
64byte SQE(slave) is another normal 64byte OP. For any OP which needs
to support slave OP, io_issue_defs[op].fused_slave has to be set as 1,
and its ->issue() needs to retrieve buffer from master request's
fused_cmd_kbuf.
Follows the key points of the design/implementation:
1) The master uring command produces and provides immutable command
buffer(struct io_uring_bvec_buf) to the slave request, and the slave
OP can retrieve any part of this buffer by sqe->addr and sqe->len.
2) Master command is always completed after the slave request is
completed.
- Before slave request is submitted, the buffer ownership is
transferred to slave request. After slave request is completed,
the buffer ownership is returned back to master request.
- This way also guarantees correct SQE order since the master
request uses slave request's LINK flag.
3) Master request is always completed by driver, so that driver
can know when the buffer is done with slave quest.
The motivation is for supporting zero copy for fuse/ublk, in which
the device holds IO request buffer, and IO handling is often normal
IO OP(fs, net, ..). With IORING_OP_FUSED_CMD, we can implement this kind
of zero copy easily & reliably.
Signed-off-by: Ming Lei <[email protected]>
---
include/linux/io_uring.h | 49 ++++++-
include/linux/io_uring_types.h | 15 ++
include/uapi/linux/io_uring.h | 1 +
io_uring/Makefile | 2 +-
io_uring/fused_cmd.c | 245 +++++++++++++++++++++++++++++++++
io_uring/fused_cmd.h | 11 ++
io_uring/io_uring.c | 26 +++-
io_uring/io_uring.h | 3 +
io_uring/opdef.c | 12 ++
io_uring/opdef.h | 2 +
10 files changed, 360 insertions(+), 6 deletions(-)
create mode 100644 io_uring/fused_cmd.c
create mode 100644 io_uring/fused_cmd.h
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 934e5dd4ccc0..f3a23e00b47c 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include <linux/bvec.h>
#include <uapi/linux/io_uring.h>
enum io_uring_cmd_flags {
@@ -20,6 +21,26 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = (1 << 8),
IO_URING_F_CQE32 = (1 << 9),
IO_URING_F_IOPOLL = (1 << 10),
+
+ /* for FUSED_CMD only */
+ IO_URING_F_FUSED_WRITE = (1 << 11), /* slave writes to buffer */
+ IO_URING_F_FUSED_READ = (1 << 12), /* slave reads from buffer */
+ /* driver incapable of FUSED_CMD should fail cmd when seeing F_FUSED */
+ IO_URING_F_FUSED = IO_URING_F_FUSED_WRITE |
+ IO_URING_F_FUSED_READ,
+};
+
+union io_uring_fused_cmd_data {
+ /*
+ * In case of slave request IOSQE_CQE_SKIP_SUCCESS, return slave
+ * result via master command; otherwise we simply return success
+ * if buffer is provided, and slave request will return its result
+ * via its CQE
+ */
+ s32 slave_res;
+
+ /* fused cmd private, driver do not touch it */
+ struct io_kiocb *__slave;
};
struct io_uring_cmd {
@@ -33,10 +54,31 @@ struct io_uring_cmd {
};
u32 cmd_op;
u32 flags;
- u8 pdu[32]; /* available inline for free use */
+
+ /* for fused command, the available pdu is a bit less */
+ union {
+ struct {
+ union io_uring_fused_cmd_data data;
+ u8 pdu[24]; /* available inline for free use */
+ } fused;
+ u8 pdu[32]; /* available inline for free use */
+ };
+};
+
+struct io_uring_bvec_buf {
+ unsigned long len;
+ unsigned int nr_bvecs;
+
+ /* offset in the 1st bvec */
+ unsigned int offset;
+ struct bio_vec *bvec;
+ struct bio_vec __bvec[];
};
#if defined(CONFIG_IO_URING)
+void io_fused_cmd_provide_kbuf(struct io_uring_cmd *ioucmd, bool locked,
+ const struct io_uring_bvec_buf *imu,
+ void (*complete_tw_cb)(struct io_uring_cmd *));
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
@@ -66,6 +108,11 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
#else
+static inline void io_fused_cmd_provide_kbuf(struct io_uring_cmd *ioucmd,
+ bool locked, const struct io_uring_bvec_buf *fused_cmd_kbuf,
+ unsigned int len, void (*complete_tw_cb)(struct io_uring_cmd *))
+{
+}
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
{
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index f7b05552cc31..0b3294067dfc 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -401,6 +401,7 @@ enum {
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_FUSED_SLAVE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -470,6 +471,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT_ULL(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT_ULL(REQ_F_HASH_LOCKED_BIT),
+ /* slave request in fused cmd, won't be one uring cmd */
+ REQ_F_FUSED_SLAVE = BIT_ULL(REQ_F_FUSED_SLAVE_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
@@ -552,6 +555,18 @@ struct io_kiocb {
* REQ_F_BUFFER_RING is set.
*/
struct io_buffer_list *buf_list;
+
+ /*
+ * store kernel (sub)buffer of fused master request which OP
+ * is IORING_OP_FUSED_CMD
+ */
+ const struct io_uring_bvec_buf *fused_cmd_kbuf;
+
+ /*
+ * store fused command master request for fuse slave request,
+ * which uses fuse master's kernel buffer for handling this OP
+ */
+ struct io_kiocb *fused_master_req;
};
union {
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 709de6d4feb2..f07d005ee898 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -223,6 +223,7 @@ enum io_uring_op {
IORING_OP_URING_CMD,
IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC,
+ IORING_OP_FUSED_CMD,
/* this goes last, obviously */
IORING_OP_LAST,
diff --git a/io_uring/Makefile b/io_uring/Makefile
index 8cc8e5387a75..5301077e61c5 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -7,5 +7,5 @@ obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \
openclose.o uring_cmd.o epoll.o \
statx.o net.o msg_ring.o timeout.o \
sqpoll.o fdinfo.o tctx.o poll.o \
- cancel.o kbuf.o rsrc.o rw.o opdef.o notif.o
+ cancel.o kbuf.o rsrc.o rw.o opdef.o notif.o fused_cmd.o
obj-$(CONFIG_IO_WQ) += io-wq.o
diff --git a/io_uring/fused_cmd.c b/io_uring/fused_cmd.c
new file mode 100644
index 000000000000..7efcad590516
--- /dev/null
+++ b/io_uring/fused_cmd.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "io_uring.h"
+#include "opdef.h"
+#include "rsrc.h"
+#include "uring_cmd.h"
+#include "fused_cmd.h"
+
+static bool io_fused_slave_valid(const struct io_uring_sqe *sqe, u8 op)
+{
+ unsigned int sqe_flags = READ_ONCE(sqe->flags);
+
+ if (op == IORING_OP_FUSED_CMD || op == IORING_OP_URING_CMD)
+ return false;
+
+ if (sqe_flags & REQ_F_BUFFER_SELECT)
+ return false;
+
+ if (!io_issue_defs[op].fused_slave)
+ return false;
+
+ return true;
+}
+
+static inline void io_fused_cmd_update_link_flags(struct io_kiocb *req,
+ const struct io_kiocb *slave)
+{
+ /*
+ * We have to keep slave SQE in order, so update master link flags
+ * with slave request's given master command isn't completed until
+ * the slave request is done
+ */
+ if (slave->flags & (REQ_F_LINK | REQ_F_HARDLINK))
+ req->flags |= REQ_F_LINK;
+}
+
+int io_fused_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ __must_hold(&req->ctx->uring_lock)
+{
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ const struct io_uring_sqe *slave_sqe = sqe + 1;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *slave;
+ u8 slave_op;
+ int ret;
+
+ if (unlikely(!(ctx->flags & IORING_SETUP_SQE128)))
+ return -EINVAL;
+
+ if (unlikely(sqe->__pad1))
+ return -EINVAL;
+
+ ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
+ if (unlikely(ioucmd->flags))
+ return -EINVAL;
+
+ slave_op = READ_ONCE(slave_sqe->opcode);
+ if (unlikely(!io_fused_slave_valid(slave_sqe, slave_op)))
+ return -EINVAL;
+
+ ioucmd->cmd = sqe->cmd;
+ ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
+ req->fused_cmd_kbuf = NULL;
+
+ /* take one extra reference for the slave request */
+ io_get_task_refs(1);
+
+ ret = -ENOMEM;
+ if (unlikely(!io_alloc_req(ctx, &slave)))
+ goto fail;
+
+ ret = io_init_slave_req(ctx, slave, slave_sqe);
+ if (unlikely(ret))
+ goto fail_free_req;
+
+ /*
+ * The slave request won't be linked to io_uring submission link list,
+ * so it can't be handled by IORING_OP_LINK_TIMEOUT, however, we can do
+ * that on master command directly
+ */
+ io_fused_cmd_update_link_flags(req, slave);
+
+ ioucmd->fused.data.__slave = slave;
+
+ return 0;
+
+fail_free_req:
+ io_free_req(slave);
+fail:
+ current->io_uring->cached_refs += 1;
+ return ret;
+}
+
+static inline bool io_fused_slave_write_to_buf(u8 op)
+{
+ switch (op) {
+ case IORING_OP_READ:
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_RECV:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int io_fused_cmd(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ const struct io_kiocb *slave = ioucmd->fused.data.__slave;
+ int ret = -EINVAL;
+
+ /*
+ * Pass buffer direction for driver to validate if the read/write
+ * is legal
+ */
+ if (io_fused_slave_write_to_buf(slave->opcode))
+ issue_flags |= IO_URING_F_FUSED_WRITE;
+ else
+ issue_flags |= IO_URING_F_FUSED_READ;
+
+ ret = io_uring_cmd(req, issue_flags);
+ if (ret != IOU_ISSUE_SKIP_COMPLETE)
+ io_free_req(ioucmd->fused.data.__slave);
+
+ return ret;
+}
+
+int io_import_kbuf_for_slave(unsigned long buf_off, unsigned int len, int dir,
+ struct iov_iter *iter, struct io_kiocb *slave)
+{
+ struct io_kiocb *req = slave->fused_master_req;
+ const struct io_uring_bvec_buf *kbuf;
+ unsigned long offset;
+
+ if (unlikely(!(slave->flags & REQ_F_FUSED_SLAVE) || !req))
+ return -EINVAL;
+
+ if (unlikely(!req->fused_cmd_kbuf))
+ return -EINVAL;
+
+ /* req->fused_cmd_kbuf is immutable */
+ kbuf = req->fused_cmd_kbuf;
+ offset = kbuf->offset;
+
+ if (!kbuf->bvec)
+ return -EINVAL;
+
+ if (unlikely(buf_off > kbuf->len))
+ return -EFAULT;
+
+ if (unlikely(len > kbuf->len - buf_off))
+ return -EFAULT;
+
+ /* don't use io_import_fixed which doesn't support multipage bvec */
+ offset += buf_off;
+ iov_iter_bvec(iter, dir, kbuf->bvec, kbuf->nr_bvecs, offset + len);
+
+ if (offset)
+ iov_iter_advance(iter, offset);
+
+ return 0;
+}
+
+/*
+ * Called when slave request is completed,
+ *
+ * Return back ownership of the fused_cmd kbuf to master request, and
+ * notify master request.
+ */
+void io_fused_cmd_return_kbuf(struct io_kiocb *slave)
+{
+ struct io_kiocb *req = slave->fused_master_req;
+ struct io_uring_cmd *ioucmd;
+
+ if (unlikely(!req || !(slave->flags & REQ_F_FUSED_SLAVE)))
+ return;
+
+ /* return back the buffer */
+ slave->fused_master_req = NULL;
+ ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ ioucmd->fused.data.__slave = NULL;
+
+ /*
+ * If slave OP skips CQE, return the result via master command; or
+ * if slave request is failed, REQ_F_CQE_SKIP will be cleared, return
+ * result too
+ */
+ if ((slave->flags & REQ_F_CQE_SKIP) || slave->cqe.res < 0)
+ ioucmd->fused.data.slave_res = slave->cqe.res;
+ else
+ ioucmd->fused.data.slave_res = 0;
+ io_uring_cmd_complete_in_task(ioucmd, ioucmd->task_work_cb);
+}
+
+/*
+ * This API needs to be called when master command has prepared
+ * FUSED_CMD buffer, and offset/len in ->fused.data is for retrieving
+ * sub-buffer in the command buffer, which is often figured out by
+ * command payload data.
+ *
+ * Master command is always completed after the slave request
+ * is completed, so driver has to set completion callback for
+ * getting notification.
+ *
+ * Ownership of the fused_cmd kbuf is transferred to slave request.
+ */
+void io_fused_cmd_provide_kbuf(struct io_uring_cmd *ioucmd, bool locked,
+ const struct io_uring_bvec_buf *fused_cmd_kbuf,
+ void (*complete_tw_cb)(struct io_uring_cmd *))
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_kiocb *slave = ioucmd->fused.data.__slave;
+
+ if (WARN_ON_ONCE(unlikely(!slave ||
+ !(slave->flags & REQ_F_FUSED_SLAVE))))
+ return;
+
+ /*
+ * Once the fused slave request is completed, the driver will
+ * be notified by callback of complete_tw_cb
+ */
+ ioucmd->task_work_cb = complete_tw_cb;
+
+ /* now we get the buffer */
+ req->fused_cmd_kbuf = fused_cmd_kbuf;
+ slave->fused_master_req = req;
+
+ trace_io_uring_submit_sqe(slave, true);
+ if (locked)
+ io_req_task_submit(slave, &locked);
+ else
+ io_req_task_queue(slave);
+}
+EXPORT_SYMBOL_GPL(io_fused_cmd_provide_kbuf);
diff --git a/io_uring/fused_cmd.h b/io_uring/fused_cmd.h
new file mode 100644
index 000000000000..86ad87d1b0ec
--- /dev/null
+++ b/io_uring/fused_cmd.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef IOU_FUSED_CMD_H
+#define IOU_FUSED_CMD_H
+
+int io_fused_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_fused_cmd(struct io_kiocb *req, unsigned int issue_flags);
+void io_fused_cmd_return_kbuf(struct io_kiocb *slave);
+int io_import_kbuf_for_slave(unsigned long buf, unsigned int len, int dir,
+ struct iov_iter *iter, struct io_kiocb *slave);
+
+#endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index acd8959afe91..c8d1aab2ac4c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -91,6 +91,7 @@
#include "cancel.h"
#include "net.h"
#include "notif.h"
+#include "fused_cmd.h"
#include "timeout.h"
#include "poll.h"
@@ -110,7 +111,7 @@
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
- REQ_F_ASYNC_DATA)
+ REQ_F_ASYNC_DATA | REQ_F_FUSED_SLAVE)
#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
IO_REQ_CLEAN_FLAGS)
@@ -964,6 +965,9 @@ static void __io_req_complete_post(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ if (req->flags & REQ_F_FUSED_SLAVE)
+ io_fused_cmd_return_kbuf(req);
+
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP))
io_fill_cqe_req(ctx, req);
@@ -1848,6 +1852,8 @@ static void io_clean_op(struct io_kiocb *req)
spin_lock(&req->ctx->completion_lock);
io_put_kbuf_comp(req);
spin_unlock(&req->ctx->completion_lock);
+ } else if (req->flags & REQ_F_FUSED_SLAVE) {
+ io_fused_cmd_return_kbuf(req);
}
if (req->flags & REQ_F_NEED_CLEANUP) {
@@ -2156,8 +2162,8 @@ static void io_init_req_drain(struct io_kiocb *req)
}
}
-static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static inline int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe, bool slave)
__must_hold(&ctx->uring_lock)
{
const struct io_issue_def *def;
@@ -2210,6 +2216,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
}
+ if (slave) {
+ if (!def->fused_slave)
+ return -EINVAL;
+ req->flags |= REQ_F_FUSED_SLAVE;
+ }
+
if (!def->ioprio && sqe->ioprio)
return -EINVAL;
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
@@ -2250,6 +2262,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
return def->prep(req, sqe);
}
+int io_init_slave_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ return io_init_req(ctx, req, sqe, true);
+}
+
static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
struct io_kiocb *req, int ret)
{
@@ -2294,7 +2312,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct io_submit_link *link = &ctx->submit_state.link;
int ret;
- ret = io_init_req(ctx, req, sqe);
+ ret = io_init_req(ctx, req, sqe, false);
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 2711865f1e19..637e12e4fb9f 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -78,6 +78,9 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);
+int io_init_slave_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe);
+
#define io_lockdep_assert_cq_locked(ctx) \
do { \
if (ctx->flags & IORING_SETUP_IOPOLL) { \
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index cca7c5b55208..63b90e8e65f8 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -33,6 +33,7 @@
#include "poll.h"
#include "cancel.h"
#include "rw.h"
+#include "fused_cmd.h"
static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags)
{
@@ -428,6 +429,12 @@ const struct io_issue_def io_issue_defs[] = {
.prep = io_eopnotsupp_prep,
#endif
},
+ [IORING_OP_FUSED_CMD] = {
+ .needs_file = 1,
+ .plug = 1,
+ .prep = io_fused_cmd_prep,
+ .issue = io_fused_cmd,
+ },
};
@@ -648,6 +655,11 @@ const struct io_cold_def io_cold_defs[] = {
.fail = io_sendrecv_fail,
#endif
},
+ [IORING_OP_FUSED_CMD] = {
+ .name = "FUSED_CMD",
+ .async_size = uring_cmd_pdu_size(1),
+ .prep_async = io_uring_cmd_prep_async,
+ },
};
const char *io_uring_get_opcode(u8 opcode)
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index c22c8696e749..306f6fc48ed4 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -29,6 +29,8 @@ struct io_issue_def {
unsigned iopoll_queue : 1;
/* opcode specific path will handle ->async_data allocation if needed */
unsigned manual_alloc : 1;
+ /* can be slave op of fused command */
+ unsigned fused_slave : 1;
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* Re: [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD
2023-03-14 12:57 ` [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD Ming Lei
@ 2023-03-18 14:31 ` Jens Axboe
2023-03-18 15:24 ` Ming Lei
0 siblings, 1 reply; 49+ messages in thread
From: Jens Axboe @ 2023-03-18 14:31 UTC (permalink / raw)
To: Ming Lei, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov
On 3/14/23 6:57?AM, Ming Lei wrote:
> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> to support slave OP, io_issue_defs[op].fused_slave has to be set as 1,
> and its ->issue() needs to retrieve buffer from master request's
> fused_cmd_kbuf.
Since we'd be introducing this as a new concept, probably makes sense to
name it something other than master/slave. What about primary and
secondary? Producer/consumer?
> +static inline bool io_fused_slave_write_to_buf(u8 op)
> +{
> + switch (op) {
> + case IORING_OP_READ:
> + case IORING_OP_READV:
> + case IORING_OP_READ_FIXED:
> + case IORING_OP_RECVMSG:
> + case IORING_OP_RECV:
> + return 1;
> + default:
> + return 0;
> + }
> +}
Maybe add a data direction bit to the hot opdef part? Any command that
has fused support should ensure that it is set correctly.
> +int io_import_kbuf_for_slave(unsigned long buf_off, unsigned int len, int dir,
> + struct iov_iter *iter, struct io_kiocb *slave)
> +{
The kbuf naming should probably also change, as it kind of overlaps with
the kbufs we already have and which are not really related.
--
Jens Axboe
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD
2023-03-18 14:31 ` Jens Axboe
@ 2023-03-18 15:24 ` Ming Lei
2023-03-18 16:00 ` Jens Axboe
2023-03-18 16:13 ` Ming Lei
0 siblings, 2 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 15:24 UTC (permalink / raw)
To: Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov, ming.lei
On Sat, Mar 18, 2023 at 08:31:44AM -0600, Jens Axboe wrote:
> On 3/14/23 6:57?AM, Ming Lei wrote:
> > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > to support slave OP, io_issue_defs[op].fused_slave has to be set as 1,
> > and its ->issue() needs to retrieve buffer from master request's
> > fused_cmd_kbuf.
>
> Since we'd be introducing this as a new concept, probably makes sense to
> name it something other than master/slave. What about primary and
> secondary? Producer/consumer?
Either of the two looks fine for me, and I will take secondary in next
version if no one objects.
>
> > +static inline bool io_fused_slave_write_to_buf(u8 op)
> > +{
> > + switch (op) {
> > + case IORING_OP_READ:
> > + case IORING_OP_READV:
> > + case IORING_OP_READ_FIXED:
> > + case IORING_OP_RECVMSG:
> > + case IORING_OP_RECV:
> > + return 1;
> > + default:
> > + return 0;
> > + }
> > +}
>
> Maybe add a data direction bit to the hot opdef part? Any command that
> has fused support should ensure that it is set correctly.
Good idea!
>
> > +int io_import_kbuf_for_slave(unsigned long buf_off, unsigned int len, int dir,
> > + struct iov_iter *iter, struct io_kiocb *slave)
> > +{
>
> The kbuf naming should probably also change, as it kind of overlaps with
> the kbufs we already have and which are not really related.
How about _bvec_buf_ or simply _buf_?
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD
2023-03-18 15:24 ` Ming Lei
@ 2023-03-18 16:00 ` Jens Axboe
2023-03-18 16:13 ` Ming Lei
1 sibling, 0 replies; 49+ messages in thread
From: Jens Axboe @ 2023-03-18 16:00 UTC (permalink / raw)
To: Ming Lei
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov
On 3/18/23 9:24?AM, Ming Lei wrote:
>>> +int io_import_kbuf_for_slave(unsigned long buf_off, unsigned int len, int dir,
>>> + struct iov_iter *iter, struct io_kiocb *slave)
>>> +{
>>
>> The kbuf naming should probably also change, as it kind of overlaps with
>> the kbufs we already have and which are not really related.
>
> How about _bvec_buf_ or simply _buf_?
Either one is fine, buf probably good enough and makes it a bit shorter.
--
Jens Axboe
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD
2023-03-18 15:24 ` Ming Lei
2023-03-18 16:00 ` Jens Axboe
@ 2023-03-18 16:13 ` Ming Lei
1 sibling, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 16:13 UTC (permalink / raw)
To: Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov, ming.lei
On Sat, Mar 18, 2023 at 11:24:07PM +0800, Ming Lei wrote:
> On Sat, Mar 18, 2023 at 08:31:44AM -0600, Jens Axboe wrote:
> > On 3/14/23 6:57?AM, Ming Lei wrote:
> > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > to support slave OP, io_issue_defs[op].fused_slave has to be set as 1,
> > > and its ->issue() needs to retrieve buffer from master request's
> > > fused_cmd_kbuf.
> >
> > Since we'd be introducing this as a new concept, probably makes sense to
> > name it something other than master/slave. What about primary and
> > secondary? Producer/consumer?
>
> Either of the two looks fine for me, and I will take secondary in next
> version if no one objects.
Thinking of further, probably master/slave is still better since slave
OP can be thought as part of master command, and it does serve for
master command.
That said master command not only provides buffer reference to slave OP,
but also requires slave OP to consume the buffer reference and complete the OP.
> > How about _bvec_buf_ or simply _buf_?
> Either one is fine, buf probably good enough and makes it a bit shorter.
OK.
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* [PATCH V3 03/16] io_uring: support OP_READ/OP_WRITE for fused slave request
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
2023-03-14 12:57 ` [PATCH V3 01/16] io_uring: increase io_kiocb->flags into 64bit Ming Lei
2023-03-14 12:57 ` [PATCH V3 02/16] io_uring: add IORING_OP_FUSED_CMD Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 04/16] io_uring: support OP_SEND_ZC/OP_RECV " Ming Lei
` (16 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Start to allow fused slave request to support OP_READ/OP_WRITE, and
the buffer can be retrieved from master request.
Once the slave request is completed, the master buffer will be returned
back.
Signed-off-by: Ming Lei <[email protected]>
---
io_uring/opdef.c | 2 ++
io_uring/rw.c | 20 ++++++++++++++++++++
2 files changed, 22 insertions(+)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 63b90e8e65f8..f044629e5475 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -235,6 +235,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
+ .fused_slave = 1,
.prep = io_prep_rw,
.issue = io_read,
},
@@ -248,6 +249,7 @@ const struct io_issue_def io_issue_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
+ .fused_slave = 1,
.prep = io_prep_rw,
.issue = io_write,
},
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 4c233910e200..36d31a943317 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -19,6 +19,7 @@
#include "kbuf.h"
#include "rsrc.h"
#include "rw.h"
+#include "fused_cmd.h"
struct io_rw {
/* NOTE: kiocb has the file as the first member, so don't do it here */
@@ -371,6 +372,17 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
size_t sqe_len;
ssize_t ret;
+ /*
+ * SLAVE OP passes buffer offset from sqe->addr actually, since
+ * the fused cmd kbuf's mapped start address is zero.
+ */
+ if (req->flags & REQ_F_FUSED_SLAVE) {
+ ret = io_import_kbuf_for_slave(rw->addr, rw->len, ddir, iter, req);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
if (ret)
@@ -428,11 +440,19 @@ static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
*/
static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
{
+ struct io_kiocb *req = cmd_to_io_kiocb(rw);
struct kiocb *kiocb = &rw->kiocb;
struct file *file = kiocb->ki_filp;
ssize_t ret = 0;
loff_t *ppos;
+ /*
+ * Fused slave req hasn't user buffer, so ->read/->write can't
+ * be supported
+ */
+ if (req->flags & REQ_F_FUSED_SLAVE)
+ return -EOPNOTSUPP;
+
/*
* Don't support polled IO through this interface, and we can't
* support non-blocking either. For the latter, this just causes
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 04/16] io_uring: support OP_SEND_ZC/OP_RECV for fused slave request
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (2 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 03/16] io_uring: support OP_READ/OP_WRITE for fused slave request Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 05/16] block: ublk_drv: mark device as LIVE before adding disk Ming Lei
` (15 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Start to allow fused slave request to support OP_SEND_ZC/OP_RECV, and
the buffer can be retrieved from master request.
Once the slave request is completed, the master buffer will be returned
back.
Signed-off-by: Ming Lei <[email protected]>
---
io_uring/net.c | 30 ++++++++++++++++++++++++++++--
io_uring/opdef.c | 3 +++
2 files changed, 31 insertions(+), 2 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index b7f190ca528e..78fb2d08ecd0 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -16,6 +16,7 @@
#include "net.h"
#include "notif.h"
#include "rsrc.h"
+#include "fused_cmd.h"
#if defined(CONFIG_NET)
struct io_shutdown {
@@ -68,6 +69,13 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
+#define user_ptr_to_u64(x) ( \
+{ \
+ typecheck(void __user *, (x)); \
+ (u64)(unsigned long)(x); \
+} \
+)
+
static inline bool io_check_multishot(struct io_kiocb *req,
unsigned int issue_flags)
{
@@ -378,7 +386,11 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
+ if (!(req->flags & REQ_F_FUSED_SLAVE))
+ ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
+ else
+ ret = io_import_kbuf_for_slave(user_ptr_to_u64(sr->buf),
+ sr->len, ITER_SOURCE, &msg.msg_iter, req);
if (unlikely(ret))
return ret;
@@ -869,7 +881,11 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
sr->buf = buf;
}
- ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
+ if (!(req->flags & REQ_F_FUSED_SLAVE))
+ ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
+ else
+ ret = io_import_kbuf_for_slave(user_ptr_to_u64(sr->buf),
+ sr->len, ITER_DEST, &msg.msg_iter, req);
if (unlikely(ret))
goto out_free;
@@ -983,6 +999,9 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index);
+ if (req->flags & REQ_F_FUSED_SLAVE)
+ return -EINVAL;
+
if (unlikely(idx >= ctx->nr_user_bufs))
return -EFAULT;
idx = array_index_nospec(idx, ctx->nr_user_bufs);
@@ -1119,8 +1138,15 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret))
return ret;
msg.sg_from_iter = io_sg_from_iter;
+ } else if (req->flags & REQ_F_FUSED_SLAVE) {
+ ret = io_import_kbuf_for_slave(user_ptr_to_u64(zc->buf),
+ zc->len, ITER_SOURCE, &msg.msg_iter, req);
+ if (unlikely(ret))
+ return ret;
+ msg.sg_from_iter = io_sg_from_iter;
} else {
io_notif_set_extended(zc->notif);
+
ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
if (unlikely(ret))
return ret;
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index f044629e5475..0a9d39a9db16 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -271,6 +271,7 @@ const struct io_issue_def io_issue_defs[] = {
.audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
+ .fused_slave = 1,
#if defined(CONFIG_NET)
.prep = io_sendmsg_prep,
.issue = io_send,
@@ -285,6 +286,7 @@ const struct io_issue_def io_issue_defs[] = {
.buffer_select = 1,
.audit_skip = 1,
.ioprio = 1,
+ .fused_slave = 1,
#if defined(CONFIG_NET)
.prep = io_recvmsg_prep,
.issue = io_recv,
@@ -411,6 +413,7 @@ const struct io_issue_def io_issue_defs[] = {
.audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
+ .fused_slave = 1,
#if defined(CONFIG_NET)
.prep = io_send_zc_prep,
.issue = io_send_zc,
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 05/16] block: ublk_drv: mark device as LIVE before adding disk
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (3 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 04/16] io_uring: support OP_SEND_ZC/OP_RECV " Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 06/16] block: ublk_drv: add common exit handling Ming Lei
` (14 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
IO can be started before add_disk() returns, such as reading parititon table,
then the monitor work should work for making forward progress.
So mark device as LIVE before adding disk, meantime change to
DEAD if add_disk() fails.
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index d1d1c8d606c8..fb5a557afde8 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1602,17 +1602,18 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
get_device(&ub->cdev_dev);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
ret = add_disk(disk);
if (ret) {
/*
* Has to drop the reference since ->free_disk won't be
* called in case of add_disk failure.
*/
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
goto out_put_disk;
}
set_bit(UB_STATE_USED, &ub->state);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
out_put_disk:
if (ret)
put_disk(disk);
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 06/16] block: ublk_drv: add common exit handling
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (4 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 05/16] block: ublk_drv: mark device as LIVE before adding disk Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 07/16] block: ublk_drv: don't consider flush request in map/unmap io Ming Lei
` (13 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Simplify exit handling a bit, and prepare for supporting fused command.
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index fb5a557afde8..64821755f415 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -655,14 +655,15 @@ static void ublk_complete_rq(struct request *req)
struct ublk_queue *ubq = req->mq_hctx->driver_data;
struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
+ int res = BLK_STS_OK;
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
if (io->res < 0) {
- blk_mq_end_request(req, errno_to_blk_status(io->res));
- return;
+ res = errno_to_blk_status(io->res);
+ goto exit;
}
/*
@@ -671,10 +672,8 @@ static void ublk_complete_rq(struct request *req)
*
* Both the two needn't unmap.
*/
- if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE) {
- blk_mq_end_request(req, BLK_STS_OK);
- return;
- }
+ if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE)
+ goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
unmapped_bytes = ublk_unmap_io(ubq, req, io);
@@ -691,6 +690,10 @@ static void ublk_complete_rq(struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+
+ return;
+exit:
+ blk_mq_end_request(req, res);
}
/*
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 07/16] block: ublk_drv: don't consider flush request in map/unmap io
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (5 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 06/16] block: ublk_drv: add common exit handling Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 08/16] block: ublk_drv: add two helpers to clean up map/unmap request Ming Lei
` (12 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
There isn't data in request of REQ_OP_FLUSH always, so don't consider
it in both ublk_map_io() and ublk_unmap_io().
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 64821755f415..5b13a58b424b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -529,15 +529,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+
/*
* no zero copy, we delay copy WRITE request data into ublksrv
* context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages
*/
- if (req_op(req) != REQ_OP_WRITE && req_op(req) != REQ_OP_FLUSH)
- return rq_bytes;
-
- if (ublk_rq_has_data(req)) {
+ if (ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE) {
struct ublk_map_data data = {
.ubq = ubq,
.rq = req,
@@ -772,9 +770,7 @@ static inline void __ublk_rq_task_work(struct request *req)
return;
}
- if (ublk_need_get_data(ubq) &&
- (req_op(req) == REQ_OP_WRITE ||
- req_op(req) == REQ_OP_FLUSH)) {
+ if (ublk_need_get_data(ubq) && (req_op(req) == REQ_OP_WRITE)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 08/16] block: ublk_drv: add two helpers to clean up map/unmap request
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (6 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 07/16] block: ublk_drv: don't consider flush request in map/unmap io Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 09/16] block: ublk_drv: clean up several helpers Ming Lei
` (11 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Add two helpers for checking if map/unmap is needed, since we may have
passthrough request which needs map or unmap in future, such as for
supporting report zones.
Meantime don't mark ublk_copy_user_pages as inline since this function
is a bit fat now.
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 19 ++++++++++++++-----
1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 5b13a58b424b..469e15057d7a 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -488,8 +488,7 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
return done;
}
-static inline int ublk_copy_user_pages(struct ublk_map_data *data,
- bool to_vm)
+static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
{
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
const unsigned long start_vm = data->io->addr;
@@ -525,6 +524,16 @@ static inline int ublk_copy_user_pages(struct ublk_map_data *data,
return done;
}
+static inline bool ublk_need_map_req(const struct request *req)
+{
+ return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
+}
+
+static inline bool ublk_need_unmap_req(const struct request *req)
+{
+ return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
+}
+
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct ublk_io *io)
{
@@ -535,7 +544,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
* context and the big benefit is that pinning pages in current
* context is pretty fast, see ublk_pin_user_pages
*/
- if (ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE) {
+ if (ublk_need_map_req(req)) {
struct ublk_map_data data = {
.ubq = ubq,
.rq = req,
@@ -556,7 +565,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (req_op(req) == REQ_OP_READ && ublk_rq_has_data(req)) {
+ if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = {
.ubq = ubq,
.rq = req,
@@ -770,7 +779,7 @@ static inline void __ublk_rq_task_work(struct request *req)
return;
}
- if (ublk_need_get_data(ubq) && (req_op(req) == REQ_OP_WRITE)) {
+ if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
/*
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 09/16] block: ublk_drv: clean up several helpers
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (7 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 08/16] block: ublk_drv: add two helpers to clean up map/unmap request Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 10/16] block: ublk_drv: cleanup 'struct ublk_map_data' Ming Lei
` (10 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Convert the following pattern in several helpers
if (Z)
return true
return false
into:
return Z;
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 469e15057d7a..0ae12b2bf89a 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -298,9 +298,7 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
{
- if (ubq->flags & UBLK_F_NEED_GET_DATA)
- return true;
- return false;
+ return ubq->flags & UBLK_F_NEED_GET_DATA;
}
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
@@ -349,25 +347,19 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
static inline bool ublk_queue_can_use_recovery_reissue(
struct ublk_queue *ubq)
{
- if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
- return true;
- return false;
+ return (ubq->flags & UBLK_F_USER_RECOVERY) &&
+ (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
}
static inline bool ublk_queue_can_use_recovery(
struct ublk_queue *ubq)
{
- if (ubq->flags & UBLK_F_USER_RECOVERY)
- return true;
- return false;
+ return ubq->flags & UBLK_F_USER_RECOVERY;
}
static inline bool ublk_can_use_recovery(struct ublk_device *ub)
{
- if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
- return true;
- return false;
+ return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
}
static void ublk_free_disk(struct gendisk *disk)
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 10/16] block: ublk_drv: cleanup 'struct ublk_map_data'
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (8 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 09/16] block: ublk_drv: clean up several helpers Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 11/16] block: ublk_drv: cleanup ublk_copy_user_pages Ming Lei
` (9 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
'struct ublk_map_data' is passed to ublk_copy_user_pages()
for copying data between userspace buffer and request pages.
Here what matters is userspace buffer address/len and 'struct request',
so replace ->io field with user buffer address, and rename max_bytes
as len.
Meantime remove 'ubq' field from ublk_map_data, since it isn't used
any more.
Then code becomes more readable.
Reviewed-by: Ziyang Zhang <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 27 ++++++++++++---------------
1 file changed, 12 insertions(+), 15 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 0ae12b2bf89a..e4e45a60349b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -420,10 +420,9 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32
struct ublk_map_data {
- const struct ublk_queue *ubq;
const struct request *rq;
- const struct ublk_io *io;
- unsigned max_bytes;
+ unsigned long ubuf;
+ unsigned int len;
};
struct ublk_io_iter {
@@ -483,14 +482,14 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
{
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
- const unsigned long start_vm = data->io->addr;
+ const unsigned long start_vm = data->ubuf;
unsigned int done = 0;
struct ublk_io_iter iter = {
.pg_off = start_vm & (PAGE_SIZE - 1),
.bio = data->rq->bio,
.iter = data->rq->bio->bi_iter,
};
- const unsigned int nr_pages = round_up(data->max_bytes +
+ const unsigned int nr_pages = round_up(data->len +
(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
while (done < nr_pages) {
@@ -503,13 +502,13 @@ static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
iter.pages);
if (iter.nr_pages <= 0)
return done == 0 ? iter.nr_pages : done;
- len = ublk_copy_io_pages(&iter, data->max_bytes, to_vm);
+ len = ublk_copy_io_pages(&iter, data->len, to_vm);
for (i = 0; i < iter.nr_pages; i++) {
if (to_vm)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
- data->max_bytes -= len;
+ data->len -= len;
done += iter.nr_pages;
}
@@ -538,15 +537,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
*/
if (ublk_need_map_req(req)) {
struct ublk_map_data data = {
- .ubq = ubq,
.rq = req,
- .io = io,
- .max_bytes = rq_bytes,
+ .ubuf = io->addr,
+ .len = rq_bytes,
};
ublk_copy_user_pages(&data, true);
- return rq_bytes - data.max_bytes;
+ return rq_bytes - data.len;
}
return rq_bytes;
}
@@ -559,17 +557,16 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = {
- .ubq = ubq,
.rq = req,
- .io = io,
- .max_bytes = io->res,
+ .ubuf = io->addr,
+ .len = io->res,
};
WARN_ON_ONCE(io->res > rq_bytes);
ublk_copy_user_pages(&data, false);
- return io->res - data.max_bytes;
+ return io->res - data.len;
}
return rq_bytes;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 11/16] block: ublk_drv: cleanup ublk_copy_user_pages
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (9 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 10/16] block: ublk_drv: cleanup 'struct ublk_map_data' Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 12/16] block: ublk_drv: grab request reference when the request is handled by userspace Ming Lei
` (8 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Clean up ublk_copy_user_pages() by using iov iter, and code
gets simplified a lot and becomes much more readable than before.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 112 +++++++++++++++++----------------------
1 file changed, 49 insertions(+), 63 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e4e45a60349b..814084dc6cbb 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -419,49 +419,39 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32
-struct ublk_map_data {
- const struct request *rq;
- unsigned long ubuf;
- unsigned int len;
-};
-
struct ublk_io_iter {
struct page *pages[UBLK_MAX_PIN_PAGES];
- unsigned pg_off; /* offset in the 1st page in pages */
- int nr_pages; /* how many page pointers in pages */
struct bio *bio;
struct bvec_iter iter;
};
-static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
- unsigned max_bytes, bool to_vm)
+/* return how many pages are copied */
+static void ublk_copy_io_pages(struct ublk_io_iter *data,
+ size_t total, size_t pg_off, int dir)
{
- const unsigned total = min_t(unsigned, max_bytes,
- PAGE_SIZE - data->pg_off +
- ((data->nr_pages - 1) << PAGE_SHIFT));
unsigned done = 0;
unsigned pg_idx = 0;
while (done < total) {
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- const unsigned int bytes = min3(bv.bv_len, total - done,
- (unsigned)(PAGE_SIZE - data->pg_off));
+ unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
+ (unsigned)(PAGE_SIZE - pg_off));
void *bv_buf = bvec_kmap_local(&bv);
void *pg_buf = kmap_local_page(data->pages[pg_idx]);
- if (to_vm)
- memcpy(pg_buf + data->pg_off, bv_buf, bytes);
+ if (dir == ITER_DEST)
+ memcpy(pg_buf + pg_off, bv_buf, bytes);
else
- memcpy(bv_buf, pg_buf + data->pg_off, bytes);
+ memcpy(bv_buf, pg_buf + pg_off, bytes);
kunmap_local(pg_buf);
kunmap_local(bv_buf);
/* advance page array */
- data->pg_off += bytes;
- if (data->pg_off == PAGE_SIZE) {
+ pg_off += bytes;
+ if (pg_off == PAGE_SIZE) {
pg_idx += 1;
- data->pg_off = 0;
+ pg_off = 0;
}
done += bytes;
@@ -475,41 +465,40 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
data->iter = data->bio->bi_iter;
}
}
-
- return done;
}
-static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
+/*
+ * Copy data between request pages and io_iter, and 'offset'
+ * is the start point of linear offset of request.
+ */
+static size_t ublk_copy_user_pages(const struct request *req,
+ struct iov_iter *uiter, int dir)
{
- const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
- const unsigned long start_vm = data->ubuf;
- unsigned int done = 0;
struct ublk_io_iter iter = {
- .pg_off = start_vm & (PAGE_SIZE - 1),
- .bio = data->rq->bio,
- .iter = data->rq->bio->bi_iter,
+ .bio = req->bio,
+ .iter = req->bio->bi_iter,
};
- const unsigned int nr_pages = round_up(data->len +
- (start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
-
- while (done < nr_pages) {
- const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
- nr_pages - done);
- unsigned i, len;
-
- iter.nr_pages = get_user_pages_fast(start_vm +
- (done << PAGE_SHIFT), to_pin, gup_flags,
- iter.pages);
- if (iter.nr_pages <= 0)
- return done == 0 ? iter.nr_pages : done;
- len = ublk_copy_io_pages(&iter, data->len, to_vm);
- for (i = 0; i < iter.nr_pages; i++) {
- if (to_vm)
+ size_t done = 0;
+
+ while (iov_iter_count(uiter) && iter.bio) {
+ unsigned nr_pages;
+ size_t len, off;
+ int i;
+
+ len = iov_iter_get_pages2(uiter, iter.pages,
+ iov_iter_count(uiter),
+ UBLK_MAX_PIN_PAGES, &off);
+ if (len <= 0)
+ return done;
+
+ ublk_copy_io_pages(&iter, len, off, dir);
+ nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
+ for (i = 0; i < nr_pages; i++) {
+ if (dir == ITER_DEST)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
- data->len -= len;
- done += iter.nr_pages;
+ done += len;
}
return done;
@@ -536,15 +525,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
* context is pretty fast, see ublk_pin_user_pages
*/
if (ublk_need_map_req(req)) {
- struct ublk_map_data data = {
- .rq = req,
- .ubuf = io->addr,
- .len = rq_bytes,
- };
+ struct iov_iter iter;
+ struct iovec iov;
+ const int dir = ITER_DEST;
- ublk_copy_user_pages(&data, true);
+ import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
+ &iov, &iter);
- return rq_bytes - data.len;
+ return ublk_copy_user_pages(req, &iter, dir);
}
return rq_bytes;
}
@@ -556,17 +544,15 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
const unsigned int rq_bytes = blk_rq_bytes(req);
if (ublk_need_unmap_req(req)) {
- struct ublk_map_data data = {
- .rq = req,
- .ubuf = io->addr,
- .len = io->res,
- };
+ struct iov_iter iter;
+ struct iovec iov;
+ const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
- ublk_copy_user_pages(&data, false);
-
- return io->res - data.len;
+ import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
+ &iov, &iter);
+ return ublk_copy_user_pages(req, &iter, dir);
}
return rq_bytes;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 12/16] block: ublk_drv: grab request reference when the request is handled by userspace
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (10 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 11/16] block: ublk_drv: cleanup ublk_copy_user_pages Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 13/16] block: ublk_drv: support to copy any part of request pages Ming Lei
` (7 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Add one reference counter into request pdu data, and hold this reference
in the request's lifetime. This way is always safe. In theory, the ublk
request won't be completed until fused commands are done. However, it
is userspace, and application can submit fused command at will.
Prepare for supporting zero copy, which needs to retrieve request buffer
by fused command, so we have to guarantee:
- the fused command can't succeed unless the request isn't queued
- when any fused command is successful, this request can't be freed
until all fused commands on this request are done.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 67 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 64 insertions(+), 3 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 814084dc6cbb..0e036a579b9b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <linux/task_work.h>
#include <linux/namei.h>
+#include <linux/kref.h>
#include <uapi/linux/ublk_cmd.h>
#define UBLK_MINORS (1U << MINORBITS)
@@ -62,6 +63,17 @@
struct ublk_rq_data {
struct llist_node node;
struct callback_head work;
+
+ /*
+ * Only for applying fused command to support zero copy:
+ *
+ * - if there is any fused command aiming at this request, not complete
+ * request until all fused commands are done
+ *
+ * - fused command has to fail unless this reference is grabbed
+ * successfully
+ */
+ struct kref ref;
};
struct ublk_uring_cmd_pdu {
@@ -180,6 +192,9 @@ struct ublk_params_header {
__u32 types;
};
+static inline void __ublk_complete_rq(struct request *req);
+static void ublk_complete_rq(struct kref *ref);
+
static dev_t ublk_chr_devt;
static struct class *ublk_chr_class;
@@ -288,6 +303,35 @@ static int ublk_apply_params(struct ublk_device *ub)
return 0;
}
+static inline bool ublk_support_zc(const struct ublk_queue *ubq)
+{
+ return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
+static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
+ struct request *req)
+{
+ if (ublk_support_zc(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ return kref_get_unless_zero(&data->ref);
+ }
+
+ return true;
+}
+
+static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
+ struct request *req)
+{
+ if (ublk_support_zc(ubq)) {
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+
+ kref_put(&data->ref, ublk_complete_rq);
+ } else {
+ __ublk_complete_rq(req);
+ }
+}
+
static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
{
if (IS_BUILTIN(CONFIG_BLK_DEV_UBLK) &&
@@ -632,13 +676,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
}
/* todo: handle partial completion */
-static void ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
int res = BLK_STS_OK;
+ /* called from ublk_abort_queue() code path */
+ if (io->flags & UBLK_IO_FLAG_ABORTED) {
+ res = BLK_STS_IOERR;
+ goto exit;
+ }
+
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -678,6 +728,15 @@ static void ublk_complete_rq(struct request *req)
blk_mq_end_request(req, res);
}
+static void ublk_complete_rq(struct kref *ref)
+{
+ struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
+ ref);
+ struct request *req = blk_mq_rq_from_pdu(data);
+
+ __ublk_complete_rq(req);
+}
+
/*
* Since __ublk_rq_task_work always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
@@ -696,7 +755,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
if (ublk_queue_can_use_recovery_reissue(ubq))
blk_mq_requeue_request(req, false);
else
- blk_mq_end_request(req, BLK_STS_IOERR);
+ ublk_put_req_ref(ubq, req);
}
}
@@ -732,6 +791,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
static inline void __ublk_rq_task_work(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -803,6 +863,7 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
+ kref_init(&data->ref);
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
}
@@ -1013,7 +1074,7 @@ static void ublk_commit_completion(struct ublk_device *ub,
req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
if (req && likely(!blk_should_fake_timeout(req->q)))
- ublk_complete_rq(req);
+ ublk_put_req_ref(ubq, req);
}
/*
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 13/16] block: ublk_drv: support to copy any part of request pages
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (11 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 12/16] block: ublk_drv: grab request reference when the request is handled by userspace Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 14/16] block: ublk_drv: add read()/write() support for ublk char device Ming Lei
` (6 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Add 'offset' to 'struct ublk_map_data', so that ublk_copy_user_pages()
can be used to copy any sub-buffer(linear mapped) of the request.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 26 ++++++++++++++++++++++----
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 0e036a579b9b..f79af40d1a8c 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -511,19 +511,37 @@ static void ublk_copy_io_pages(struct ublk_io_iter *data,
}
}
+static bool ublk_advance_io_iter(struct ublk_io_iter *iter, unsigned int offset)
+{
+ struct bio *bio = iter->bio;
+
+ for_each_bio(bio) {
+ if (bio->bi_iter.bi_size > offset) {
+ iter->bio = bio;
+ iter->iter = bio->bi_iter;
+ bio_advance_iter(iter->bio, &iter->iter, offset);
+ return true;
+ }
+ offset -= bio->bi_iter.bi_size;
+ }
+ return false;
+}
+
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
*/
static size_t ublk_copy_user_pages(const struct request *req,
- struct iov_iter *uiter, int dir)
+ unsigned offset, struct iov_iter *uiter, int dir)
{
struct ublk_io_iter iter = {
.bio = req->bio,
- .iter = req->bio->bi_iter,
};
size_t done = 0;
+ if (!ublk_advance_io_iter(&iter, offset))
+ return 0;
+
while (iov_iter_count(uiter) && iter.bio) {
unsigned nr_pages;
size_t len, off;
@@ -576,7 +594,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
&iov, &iter);
- return ublk_copy_user_pages(req, &iter, dir);
+ return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
@@ -596,7 +614,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
&iov, &iter);
- return ublk_copy_user_pages(req, &iter, dir);
+ return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 14/16] block: ublk_drv: add read()/write() support for ublk char device
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (12 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 13/16] block: ublk_drv: support to copy any part of request pages Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 15/16] block: ublk_drv: don't check buffer in case of zero copy Ming Lei
` (5 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
We are going to support zero copy by fused uring command, the userspace
can't read from or write to the io buffer any more, it becomes not
flexible for applications:
1) some targets need to zero buffer explicitly, such as when reading
unmapped qcow2 cluster
2) some targets need to support passthrough command, such as zoned
report zones, and still need to read/write the io buffer
Support pread()/pwrite() on ublk char device for reading/writing request
io buffer, so ublk server can handle the above cases easily.
This also can help to make zero copy becoming the primary option, and
non-zero-copy will become legacy code path since the added read()/write()
can cover non-zero-copy feature.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 131 ++++++++++++++++++++++++++++++++++
include/uapi/linux/ublk_cmd.h | 31 +++++++-
2 files changed, 161 insertions(+), 1 deletion(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index f79af40d1a8c..a31dc9b460b7 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1318,6 +1318,36 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
ublk_queue_cmd(ubq, req);
}
+static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
+ struct ublk_queue *ubq, int tag, size_t offset)
+{
+ struct request *req;
+
+ if (!ublk_support_zc(ubq))
+ return NULL;
+
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (!req)
+ return NULL;
+
+ if (!ublk_get_req_ref(ubq, req))
+ return NULL;
+
+ if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
+ goto fail_put;
+
+ if (!ublk_rq_has_data(req))
+ goto fail_put;
+
+ if (offset > blk_rq_bytes(req))
+ goto fail_put;
+
+ return req;
+fail_put:
+ ublk_put_req_ref(ubq, req);
+ return NULL;
+}
+
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -1419,11 +1449,112 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
}
+static inline bool ublk_check_ubuf_dir(const struct request *req,
+ int ubuf_dir)
+{
+ /* copy ubuf to request pages */
+ if (req_op(req) == REQ_OP_READ && ubuf_dir == ITER_SOURCE)
+ return true;
+
+ /* copy request pages to ubuf */
+ if (req_op(req) == REQ_OP_WRITE && ubuf_dir == ITER_DEST)
+ return true;
+
+ return false;
+}
+
+static struct request *ublk_check_and_get_req(struct kiocb *iocb,
+ struct iov_iter *iter, size_t *off, int dir)
+{
+ struct ublk_device *ub = iocb->ki_filp->private_data;
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ u16 tag, q_id;
+
+ if (!ub)
+ return ERR_PTR(-EACCES);
+
+ if (!user_backed_iter(iter))
+ return ERR_PTR(-EACCES);
+
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ return ERR_PTR(-EACCES);
+
+ tag = ublk_pos_to_tag(iocb->ki_pos);
+ q_id = ublk_pos_to_hwq(iocb->ki_pos);
+ buf_off = ublk_pos_to_buf_offset(iocb->ki_pos);
+
+ if (q_id >= ub->dev_info.nr_hw_queues)
+ return ERR_PTR(-EINVAL);
+
+ ubq = ublk_get_queue(ub, q_id);
+ if (!ubq)
+ return ERR_PTR(-EINVAL);
+
+ if (tag >= ubq->q_depth)
+ return ERR_PTR(-EINVAL);
+
+ req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ if (!req)
+ return ERR_PTR(-EINVAL);
+
+ if (!req->mq_hctx || !req->mq_hctx->driver_data)
+ goto fail;
+
+ if (!ublk_check_ubuf_dir(req, dir))
+ goto fail;
+
+ *off = buf_off;
+ return req;
+fail:
+ ublk_put_req_ref(ubq, req);
+ return ERR_PTR(-EACCES);
+}
+
+static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ size_t ret;
+
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ if (unlikely(IS_ERR(req)))
+ return PTR_ERR(req);
+
+ ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
+ ubq = req->mq_hctx->driver_data;
+ ublk_put_req_ref(ubq, req);
+
+ return ret;
+}
+
+static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct ublk_queue *ubq;
+ struct request *req;
+ size_t buf_off;
+ size_t ret;
+
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ if (unlikely(IS_ERR(req)))
+ return PTR_ERR(req);
+
+ ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
+ ubq = req->mq_hctx->driver_data;
+ ublk_put_req_ref(ubq, req);
+
+ return ret;
+}
+
static const struct file_operations ublk_ch_fops = {
.owner = THIS_MODULE,
.open = ublk_ch_open,
.release = ublk_ch_release,
.llseek = no_llseek,
+ .read_iter = ublk_ch_read_iter,
+ .write_iter = ublk_ch_write_iter,
.uring_cmd = ublk_ch_uring_cmd,
.mmap = ublk_ch_mmap,
};
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index f6238ccc7800..d1a6b3dc0327 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -54,7 +54,36 @@
#define UBLKSRV_IO_BUF_OFFSET 0x80000000
/* tag bit is 12bit, so at most 4096 IOs for each queue */
-#define UBLK_MAX_QUEUE_DEPTH 4096
+#define UBLK_TAG_BITS 12
+#define UBLK_MAX_QUEUE_DEPTH (1U << UBLK_TAG_BITS)
+
+/* used for locating each io buffer for pread()/pwrite() on char device */
+#define UBLK_BUFS_SIZE_BITS 42
+#define UBLK_BUFS_SIZE_MASK ((1ULL << UBLK_BUFS_SIZE_BITS) - 1)
+#define UBLK_BUF_SIZE_BITS (UBLK_BUFS_SIZE_BITS - UBLK_TAG_BITS)
+#define UBLK_BUF_MAX_SIZE (1ULL << UBLK_BUF_SIZE_BITS)
+
+static inline __u16 ublk_pos_to_hwq(__u64 pos)
+{
+ return pos >> UBLK_BUFS_SIZE_BITS;
+}
+
+static inline __u32 ublk_pos_to_buf_offset(__u64 pos)
+{
+ return (pos & UBLK_BUFS_SIZE_MASK) & (UBLK_BUF_MAX_SIZE - 1);
+}
+
+static inline __u16 ublk_pos_to_tag(__u64 pos)
+{
+ return (pos & UBLK_BUFS_SIZE_MASK) >> UBLK_BUF_SIZE_BITS;
+}
+
+/* offset of single buffer, which has to be < UBLK_BUX_MAX_SIZE */
+static inline __u64 ublk_pos(__u16 q_id, __u16 tag, __u32 offset)
+{
+ return (((__u64)q_id) << UBLK_BUFS_SIZE_BITS) |
+ ((((__u64)tag) << UBLK_BUF_SIZE_BITS) + offset);
+}
/*
* zero copy requires 4k block size, and can remap ublk driver's io
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 15/16] block: ublk_drv: don't check buffer in case of zero copy
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (13 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 14/16] block: ublk_drv: add read()/write() support for ublk char device Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-14 12:57 ` [PATCH V3 16/16] block: ublk_drv: apply io_uring FUSED_CMD for supporting " Ming Lei
` (4 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
In case of zero copy, ublk server needn't to pre-allocate IO buffer
and provide it to driver more.
Meantime not set the buffer in case of zero copy any more, and the
userspace can use pread()/pwrite() to read from/write to the io request
buffer, which is easier & simpler from userspace viewpoint.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index a31dc9b460b7..e77eca0a45bb 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1406,25 +1406,30 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
/* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
+ if (!ublk_support_zc(ubq)) {
+ if (!ub_cmd->addr && !ublk_need_get_data(ubq))
+ goto out;
+ io->addr = ub_cmd->addr;
+ }
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
- io->addr = ub_cmd->addr;
-
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ goto out;
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
* not enabled or it is Read IO.
*/
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
- goto out;
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
- goto out;
- io->addr = ub_cmd->addr;
+ if (!ublk_support_zc(ubq)) {
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ req_op(req) == REQ_OP_READ))
+ goto out;
+ io->addr = ub_cmd->addr;
+ }
io->flags |= UBLK_IO_FLAG_ACTIVE;
io->cmd = cmd;
ublk_commit_completion(ub, ub_cmd);
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* [PATCH V3 16/16] block: ublk_drv: apply io_uring FUSED_CMD for supporting zero copy
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (14 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 15/16] block: ublk_drv: don't check buffer in case of zero copy Ming Lei
@ 2023-03-14 12:57 ` Ming Lei
2023-03-16 3:13 ` [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Xiaoguang Wang
` (3 subsequent siblings)
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-14 12:57 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Ming Lei
Apply io_uring fused command for supporting zero copy:
1) init the fused cmd buffer(io_mapped_buf) in ublk_map_io(),
and deinit it in ublk_unmap_io(), and this buffer is immutable,
so it is just fine to retrieve it from concurrent fused command.
1) add sub-command opcode of UBLK_IO_FUSED_SUBMIT_IO for retrieving
this fused cmd(zero copy) buffer
2) call io_fused_cmd_provide_kbuf() to provide buffer to slave
request; meantime setup complete callback via this API, once
slave request is completed, the complete callback is called
for freeing the buffer and completing the uring fused command
Also request reference is held during fused command lifetime, and
this way guarantees that request buffer won't be freed until
fused commands are done.
Signed-off-by: Ming Lei <[email protected]>
---
drivers/block/ublk_drv.c | 191 ++++++++++++++++++++++++++++++++--
include/uapi/linux/ublk_cmd.h | 6 +-
2 files changed, 185 insertions(+), 12 deletions(-)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e77eca0a45bb..e0879db6220f 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -74,10 +74,15 @@ struct ublk_rq_data {
* successfully
*/
struct kref ref;
+ bool allocated_bvec;
+ struct io_uring_bvec_buf buf[0];
};
struct ublk_uring_cmd_pdu {
- struct ublk_queue *ubq;
+ union {
+ struct ublk_queue *ubq;
+ struct request *req;
+ };
};
/*
@@ -566,6 +571,69 @@ static size_t ublk_copy_user_pages(const struct request *req,
return done;
}
+/*
+ * The built command buffer is immutable, so it is fine to feed it to
+ * concurrent io_uring fused commands
+ */
+static int ublk_init_zero_copy_buffer(struct request *rq)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ struct io_uring_bvec_buf *imu = data->buf;
+ struct req_iterator rq_iter;
+ unsigned int nr_bvecs = 0;
+ struct bio_vec *bvec;
+ unsigned int offset;
+ struct bio_vec bv;
+
+ if (!ublk_rq_has_data(rq))
+ goto exit;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvecs++;
+
+ if (!nr_bvecs)
+ goto exit;
+
+ if (rq->bio != rq->biotail) {
+ int idx = 0;
+
+ bvec = kvmalloc_array(sizeof(struct bio_vec), nr_bvecs,
+ GFP_NOIO);
+ if (!bvec)
+ return -ENOMEM;
+
+ offset = 0;
+ rq_for_each_bvec(bv, rq, rq_iter)
+ bvec[idx++] = bv;
+ data->allocated_bvec = true;
+ } else {
+ struct bio *bio = rq->bio;
+
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ }
+ imu->bvec = bvec;
+ imu->nr_bvecs = nr_bvecs;
+ imu->offset = offset;
+ imu->len = blk_rq_bytes(rq);
+
+ return 0;
+exit:
+ imu->bvec = NULL;
+ return 0;
+}
+
+static void ublk_deinit_zero_copy_buffer(struct request *rq)
+{
+ struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ struct io_uring_bvec_buf *imu = data->buf;
+
+ if (data->allocated_bvec) {
+ kvfree(imu->bvec);
+ data->allocated_bvec = false;
+ }
+}
+
static inline bool ublk_need_map_req(const struct request *req)
{
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
@@ -576,11 +644,23 @@ static inline bool ublk_need_unmap_req(const struct request *req)
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_READ;
}
-static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
+static int ublk_map_io(const struct ublk_queue *ubq, struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+ if (ublk_support_zc(ubq)) {
+ int ret = ublk_init_zero_copy_buffer(req);
+
+ /*
+ * The only failure is -ENOMEM for allocating fused cmd
+ * buffer, return zero so that we can requeue this req.
+ */
+ if (unlikely(ret))
+ return 0;
+ return rq_bytes;
+ }
+
/*
* no zero copy, we delay copy WRITE request data into ublksrv
* context and the big benefit is that pinning pages in current
@@ -600,11 +680,17 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
}
static int ublk_unmap_io(const struct ublk_queue *ubq,
- const struct request *req,
+ struct request *req,
struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
+ if (ublk_support_zc(ubq)) {
+ ublk_deinit_zero_copy_buffer(req);
+
+ return rq_bytes;
+ }
+
if (ublk_need_unmap_req(req)) {
struct iov_iter iter;
struct iovec iov;
@@ -688,6 +774,12 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
}
+static inline struct ublk_uring_cmd_pdu *ublk_get_uring_fused_cmd_pdu(
+ struct io_uring_cmd *ioucmd)
+{
+ return (struct ublk_uring_cmd_pdu *)&ioucmd->fused.pdu;
+}
+
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
return ubq->ubq_daemon->flags & PF_EXITING;
@@ -743,6 +835,7 @@ static inline void __ublk_complete_rq(struct request *req)
return;
exit:
+ ublk_deinit_zero_copy_buffer(req);
blk_mq_end_request(req, res);
}
@@ -1348,6 +1441,67 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return NULL;
}
+static void ublk_fused_cmd_done_cb(struct io_uring_cmd *cmd)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_fused_cmd_pdu(cmd);
+ struct request *req = pdu->req;
+ struct ublk_queue *ubq = req->mq_hctx->driver_data;
+
+ ublk_put_req_ref(ubq, req);
+ io_uring_cmd_done(cmd, cmd->fused.data.slave_res, 0);
+}
+
+static inline bool ublk_check_fused_buf_dir(const struct request *req,
+ unsigned int flags)
+{
+ flags &= IO_URING_F_FUSED;
+
+ if (req_op(req) == REQ_OP_READ && flags == IO_URING_F_FUSED_WRITE)
+ return true;
+
+ if (req_op(req) == REQ_OP_WRITE && flags == IO_URING_F_FUSED_READ)
+ return true;
+
+ return false;
+}
+
+static int ublk_handle_fused_cmd(struct io_uring_cmd *cmd,
+ struct ublk_queue *ubq, int tag, unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_fused_cmd_pdu(cmd);
+ struct ublk_device *ub = cmd->file->private_data;
+ struct ublk_rq_data *data;
+ struct request *req;
+
+ if (!ub)
+ return -EPERM;
+
+ if (!(issue_flags & IO_URING_F_FUSED))
+ goto exit;
+
+ req = __ublk_check_and_get_req(ub, ubq, tag, 0);
+ if (!req)
+ goto exit;
+
+ pr_devel("%s: qid %d tag %u request bytes %u, issue flags %x\n",
+ __func__, tag, ubq->q_id, blk_rq_bytes(req),
+ issue_flags);
+
+ if (!ublk_check_fused_buf_dir(req, issue_flags))
+ goto exit_put_ref;
+
+ pdu->req = req;
+ data = blk_mq_rq_to_pdu(req);
+ io_fused_cmd_provide_kbuf(cmd, !(issue_flags & IO_URING_F_UNLOCKED),
+ data->buf, ublk_fused_cmd_done_cb);
+ return -EIOCBQUEUED;
+
+exit_put_ref:
+ ublk_put_req_ref(ubq, req);
+exit:
+ return -EINVAL;
+}
+
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
{
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -1363,6 +1517,10 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
+ if ((issue_flags & IO_URING_F_FUSED) &&
+ cmd_op != UBLK_IO_FUSED_SUBMIT_IO)
+ return -EOPNOTSUPP;
+
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
@@ -1370,7 +1528,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (!ubq || ub_cmd->q_id != ubq->q_id)
goto out;
- if (ubq->ubq_daemon && ubq->ubq_daemon != current)
+ /*
+ * The fused command reads the io buffer data structure only, so it
+ * is fine to be issued from other context.
+ */
+ if ((ubq->ubq_daemon && ubq->ubq_daemon != current) &&
+ (cmd_op != UBLK_IO_FUSED_SUBMIT_IO))
goto out;
if (tag >= ubq->q_depth)
@@ -1393,6 +1556,9 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
goto out;
switch (cmd_op) {
+ case UBLK_IO_FUSED_SUBMIT_IO:
+ return ublk_handle_fused_cmd(cmd, ubq, tag, issue_flags);
+
case UBLK_IO_FETCH_REQ:
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
if (ublk_queue_ready(ubq)) {
@@ -1722,11 +1888,14 @@ static void ublk_align_max_io_size(struct ublk_device *ub)
static int ublk_add_tag_set(struct ublk_device *ub)
{
+ int zc = !!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY);
+ struct ublk_rq_data *data;
+
ub->tag_set.ops = &ublk_mq_ops;
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
- ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
+ ub->tag_set.cmd_size = struct_size(data, buf, zc);
ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
@@ -1942,12 +2111,18 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
*/
ub->dev_info.flags &= UBLK_F_ALL;
+ /*
+ * NEED_GET_DATA doesn't make sense any more in case that
+ * ZERO_COPY is requested. Another reason is that userspace
+ * can read/write io request buffer by pread()/pwrite() with
+ * each io buffer's position.
+ */
+ if (ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)
+ ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
+
if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK))
ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK;
- /* We are not ready to support zero copy */
- ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
-
ub->dev_info.nr_hw_queues = min_t(unsigned int,
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index d1a6b3dc0327..c4f3465399cf 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -44,6 +44,7 @@
#define UBLK_IO_FETCH_REQ 0x20
#define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21
#define UBLK_IO_NEED_GET_DATA 0x22
+#define UBLK_IO_FUSED_SUBMIT_IO 0x23
/* only ABORT means that no re-fetch */
#define UBLK_IO_RES_OK 0
@@ -85,10 +86,7 @@ static inline __u64 ublk_pos(__u16 q_id, __u16 tag, __u32 offset)
((((__u64)tag) << UBLK_BUF_SIZE_BITS) + offset);
}
-/*
- * zero copy requires 4k block size, and can remap ublk driver's io
- * request into ublksrv's vm space
- */
+/* io_uring fused command based zero copy */
#define UBLK_F_SUPPORT_ZERO_COPY (1ULL << 0)
/*
--
2.39.2
^ permalink raw reply related [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (15 preceding siblings ...)
2023-03-14 12:57 ` [PATCH V3 16/16] block: ublk_drv: apply io_uring FUSED_CMD for supporting " Ming Lei
@ 2023-03-16 3:13 ` Xiaoguang Wang
2023-03-16 3:56 ` Ming Lei
2023-03-18 16:23 ` Pavel Begunkov
2023-03-17 8:14 ` Ming Lei
` (2 subsequent siblings)
19 siblings, 2 replies; 49+ messages in thread
From: Xiaoguang Wang @ 2023-03-16 3:13 UTC (permalink / raw)
To: Ming Lei, Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Bernd Schubert, Pavel Begunkov
hi,
> Hello,
>
> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> and its ->issue() can retrieve/import buffer from master request's
> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> submits slave OP just like normal OP issued from userspace, that said,
> SQE order is kept, and batching handling is done too.
Thanks for this great work, seems that we're now in the right direction
to support ublk zero copy, I believe this feature will improve io throughput
greatly and reduce ublk's cpu resource usage.
I have gone through your 2th patch, and have some little concerns here:
Say we have one ublk loop target device, but it has 4 backend files,
every file will carry 25% of device capacity and it's implemented in stripped
way, then for every io request, current implementation will need issed 4
fused_cmd, right? 4 slave sqes are necessary, but it would be better to
have just one master sqe, so I wonder whether we can have another
method. The key point is to let io_uring support register various kernel
memory objects, which come from kernel, such as ITER_BVEC or
ITER_KVEC. so how about below actions:
1. add a new infrastructure in io_uring, which will support to register
various kernel memory objects in it, this new infrastructure could be
maintained in a xarray structure, every memory objects in it will have
a unique id. This registration could be done in a ublk uring cmd, io_uring
offers registration interface.
2. then any sqe can use these memory objects freely, so long as it
passes above unique id in sqe properly.
Above are just rough ideas, just for your reference.
And current zero-copy method only supports raw data redirection, if
ublk targets need to crc, compress, encrypt raw io requests' pages,
then we'll still need to copy block layer's io data to userspace daemon.
In that way, ebpf may give a help :) we directly operate block layer's
io data in ebpf prog, doing crc or compress, encrypt, still does not need
to copy to userspace daemon. But as you said before, ebpf may not
support complicated user io logic, a much long way to go...
Regards,
Xiaoguang Wang
>
> Please see detailed design in commit log of the 2th patch, and one big
> point is how to handle buffer ownership.
>
> With this way, it is easy to support zero copy for ublk/fuse device.
>
> Basically userspace can specify any sub-buffer of the ublk block request
> buffer from the fused command just by setting 'offset/len'
> in the slave SQE for running slave OP. This way is flexible to implement
> io mapping: mirror, stripped, ...
>
> The 3th & 4th patches enable fused slave support for the following OPs:
>
> OP_READ/OP_WRITE
> OP_SEND/OP_RECV/OP_SEND_ZC
>
> The other ublk patches cleans ublk driver and implement fused command
> for supporting zero copy.
>
> Follows userspace code:
>
> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>
> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>
> ublk add -t [loop|nbd|qcow2] -z ....
>
> Basic fs mount/kernel building and builtin test are done, and also not
> observe regression on xfstest test over ublk-loop with zero copy.
>
> Also add liburing test case for covering fused command based on miniublk
> of blktest:
>
> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>
> Performance improvement is obvious on memory bandwidth
> related workloads, such as, 1~2X improvement on 64K/512K BS
> IO test on loop with ramfs backing file.
>
> Any comments are welcome!
>
> V3:
> - fix build warning reported by kernel test robot
> - drop patch for checking fused flags on existed drivers with
> ->uring_command(), which isn't necessary, since we do not do that
> when adding new ioctl or uring command
> - inline io_init_rq() for core code, so just export io_init_slave_req
> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> will be cleared
> - pass xfstest over ublk-loop
>
> V2:
> - don't resue io_mapped_ubuf (io_uring)
> - remove REQ_F_FUSED_MASTER_BIT (io_uring)
> - fix compile warning (io_uring)
> - rebase on v6.3-rc1 (io_uring)
> - grabbing io request reference when handling fused command
> - simplify ublk_copy_user_pages() by iov iterator
> - add read()/write() for userspace to read/write ublk io buffer, so
> that some corner cases(read zero, passthrough request(report zones)) can
> be handled easily in case of zero copy; this way also helps to switch to
> zero copy completely
> - misc cleanup
>
>
> Ming Lei (16):
> io_uring: increase io_kiocb->flags into 64bit
> io_uring: add IORING_OP_FUSED_CMD
> io_uring: support OP_READ/OP_WRITE for fused slave request
> io_uring: support OP_SEND_ZC/OP_RECV for fused slave request
> block: ublk_drv: mark device as LIVE before adding disk
> block: ublk_drv: add common exit handling
> block: ublk_drv: don't consider flush request in map/unmap io
> block: ublk_drv: add two helpers to clean up map/unmap request
> block: ublk_drv: clean up several helpers
> block: ublk_drv: cleanup 'struct ublk_map_data'
> block: ublk_drv: cleanup ublk_copy_user_pages
> block: ublk_drv: grab request reference when the request is handled by
> userspace
> block: ublk_drv: support to copy any part of request pages
> block: ublk_drv: add read()/write() support for ublk char device
> block: ublk_drv: don't check buffer in case of zero copy
> block: ublk_drv: apply io_uring FUSED_CMD for supporting zero copy
>
> drivers/block/ublk_drv.c | 602 ++++++++++++++++++++++++++-------
> include/linux/io_uring.h | 49 ++-
> include/linux/io_uring_types.h | 80 +++--
> include/uapi/linux/io_uring.h | 1 +
> include/uapi/linux/ublk_cmd.h | 37 +-
> io_uring/Makefile | 2 +-
> io_uring/fused_cmd.c | 245 ++++++++++++++
> io_uring/fused_cmd.h | 11 +
> io_uring/io_uring.c | 28 +-
> io_uring/io_uring.h | 3 +
> io_uring/net.c | 30 +-
> io_uring/opdef.c | 17 +
> io_uring/opdef.h | 2 +
> io_uring/rw.c | 20 ++
> 14 files changed, 967 insertions(+), 160 deletions(-)
> create mode 100644 io_uring/fused_cmd.c
> create mode 100644 io_uring/fused_cmd.h
>
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-16 3:13 ` [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Xiaoguang Wang
@ 2023-03-16 3:56 ` Ming Lei
2023-03-18 16:23 ` Pavel Begunkov
1 sibling, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-16 3:56 UTC (permalink / raw)
To: Xiaoguang Wang
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Bernd Schubert, Pavel Begunkov, ming.lei
On Thu, Mar 16, 2023 at 11:13:39AM +0800, Xiaoguang Wang wrote:
> hi,
>
> > Hello,
> >
> > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > and its ->issue() can retrieve/import buffer from master request's
> > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > submits slave OP just like normal OP issued from userspace, that said,
> > SQE order is kept, and batching handling is done too.
> Thanks for this great work, seems that we're now in the right direction
> to support ublk zero copy, I believe this feature will improve io throughput
> greatly and reduce ublk's cpu resource usage.
>
> I have gone through your 2th patch, and have some little concerns here:
> Say we have one ublk loop target device, but it has 4 backend files,
> every file will carry 25% of device capacity and it's implemented in stripped
> way, then for every io request, current implementation will need issed 4
> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> have just one master sqe, so I wonder whether we can have another
Yeah, the current approach needs 4 fused command with 4 slave request,
but from user viewpoint it is just 4 128byte SQEs.
It is pretty lightweight to handle master command, just calling io_fused_cmd_provide_kbuf()
for providing the buffer, so IMO it is fine to submit 4 fused command to handle
single stripped IO.
> method. The key point is to let io_uring support register various kernel
> memory objects, which come from kernel, such as ITER_BVEC or
> ITER_KVEC. so how about below actions:
> 1. add a new infrastructure in io_uring, which will support to register
> various kernel memory objects in it, this new infrastructure could be
> maintained in a xarray structure, every memory objects in it will have
> a unique id. This registration could be done in a ublk uring cmd, io_uring
> offers registration interface.
> 2. then any sqe can use these memory objects freely, so long as it
> passes above unique id in sqe properly.
> Above are just rough ideas, just for your reference.
I'd rather not add more complexity from the beginning, and IMO probably it
could be the most simple & generic way to handle it by single fused command,
at least the buffer lifetime/ownership won't cross multiple OPs.
Registering per-io buffer isn't free, Pavel actually mentioned the
idea, basically:
1) one OP is for registering buffer
2) another OP is for un-registering buffer
Then we still need 3+ OPs(SQEs) for handling single IO, not mention the buffer
has to be stored in global(per-ctx) data structure, and you have to pay
cost to read/write global data structure in IO fast path. In the case of
4 stripped underlying device, you still need 6 64byte SQEs for handling single io.
But in future if we don't have other better candidates and fused command can't
scale well, we can extend it or add new OPs for improving the multiple underlying
devices, but so far, not see the problem.
>
> And current zero-copy method only supports raw data redirection, if
Yeah.
> ublk targets need to crc, compress, encrypt raw io requests' pages,
> then we'll still need to copy block layer's io data to userspace daemon.
Yes, zero copy can't cover all cases, that is why I add read/write
interface to support other cases, see patch 14, then userspace can
do whatwever they like.
Actually once zero copy is accepted, I'd suggest to mark the non-zc code path
as legacy, since the copy can be done explicitly in userspace by the added
read()/write(). And ublk driver can get simplified & cleaned, same with
userspace implementation.
> In that way, ebpf may give a help :) we directly operate block layer's
> io data in ebpf prog, doing crc or compress, encrypt, still does not need
> to copy to userspace daemon. But as you said before, ebpf may not
> support complicated user io logic, a much long way to go...
Of course, there can be lots of work for future improvement, and ebpf is
really one great weapon, but let's start effectively with something
reliable & simple.
thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-16 3:13 ` [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Xiaoguang Wang
2023-03-16 3:56 ` Ming Lei
@ 2023-03-18 16:23 ` Pavel Begunkov
2023-03-18 16:39 ` Ming Lei
` (2 more replies)
1 sibling, 3 replies; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-18 16:23 UTC (permalink / raw)
To: Xiaoguang Wang, Ming Lei, Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Bernd Schubert
On 3/16/23 03:13, Xiaoguang Wang wrote:
>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>> and its ->issue() can retrieve/import buffer from master request's
>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>> submits slave OP just like normal OP issued from userspace, that said,
>> SQE order is kept, and batching handling is done too.
> Thanks for this great work, seems that we're now in the right direction
> to support ublk zero copy, I believe this feature will improve io throughput
> greatly and reduce ublk's cpu resource usage.
>
> I have gone through your 2th patch, and have some little concerns here:
> Say we have one ublk loop target device, but it has 4 backend files,
> every file will carry 25% of device capacity and it's implemented in stripped
> way, then for every io request, current implementation will need issed 4
> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> have just one master sqe, so I wonder whether we can have another
> method. The key point is to let io_uring support register various kernel
> memory objects, which come from kernel, such as ITER_BVEC or
> ITER_KVEC. so how about below actions:
> 1. add a new infrastructure in io_uring, which will support to register
> various kernel memory objects in it, this new infrastructure could be
> maintained in a xarray structure, every memory objects in it will have
> a unique id. This registration could be done in a ublk uring cmd, io_uring
> offers registration interface.
> 2. then any sqe can use these memory objects freely, so long as it
> passes above unique id in sqe properly.
> Above are just rough ideas, just for your reference.
It precisely hints on what I proposed a bit earlier, that makes
me not alone thinking that it's a good idea to have a design allowing
1) multiple ops using a buffer and 2) to limiting it to one single
submission because the userspace might want to preprocess a part
of the data, multiplex it or on the opposite divide. I was mostly
coming from non ublk cases, and one example would be such zc recv,
parsing the app level headers and redirecting the rest of the data
somewhere.
I haven't got a chance to work on it but will return to it in
a week. The discussion was here:
https://lore.kernel.org/all/[email protected]/
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 16:23 ` Pavel Begunkov
@ 2023-03-18 16:39 ` Ming Lei
2023-03-21 9:17 ` Ziyang Zhang
2023-03-25 14:15 ` Ming Lei
2 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 16:39 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Xiaoguang Wang, Jens Axboe, io-uring, linux-block, Miklos Szeredi,
ZiyangZhang, Bernd Schubert, ming.lei
On Sat, Mar 18, 2023 at 04:23:35PM +0000, Pavel Begunkov wrote:
> On 3/16/23 03:13, Xiaoguang Wang wrote:
> > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > > and its ->issue() can retrieve/import buffer from master request's
> > > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > > submits slave OP just like normal OP issued from userspace, that said,
> > > SQE order is kept, and batching handling is done too.
> > Thanks for this great work, seems that we're now in the right direction
> > to support ublk zero copy, I believe this feature will improve io throughput
> > greatly and reduce ublk's cpu resource usage.
> >
> > I have gone through your 2th patch, and have some little concerns here:
> > Say we have one ublk loop target device, but it has 4 backend files,
> > every file will carry 25% of device capacity and it's implemented in stripped
> > way, then for every io request, current implementation will need issed 4
> > fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> > have just one master sqe, so I wonder whether we can have another
> > method. The key point is to let io_uring support register various kernel
> > memory objects, which come from kernel, such as ITER_BVEC or
> > ITER_KVEC. so how about below actions:
> > 1. add a new infrastructure in io_uring, which will support to register
> > various kernel memory objects in it, this new infrastructure could be
> > maintained in a xarray structure, every memory objects in it will have
> > a unique id. This registration could be done in a ublk uring cmd, io_uring
> > offers registration interface.
> > 2. then any sqe can use these memory objects freely, so long as it
> > passes above unique id in sqe properly.
> > Above are just rough ideas, just for your reference.
>
> It precisely hints on what I proposed a bit earlier, that makes
> me not alone thinking that it's a good idea to have a design allowing
> 1) multiple ops using a buffer and
Firstly fused command does cover this case, io_fused_cmd_provide_kbuf()
is very cheap, which just passes buffer reference.
Secondly, your original suggestion is to wire the per-io buffer with
context fixed buffer, which basically has to add two OPs:
1) one for registering buffer
2) another one for un-registering buffer
So one usual such IO may have to takes 3+ SQEs, which won't be efficient for
single or even double submission cases since the cost of touching global
context fixed buffer can't be ignored.
> 2) to limiting it to one single
> submission because the userspace might want to preprocess a part
> of the data, multiplex it or on the opposite divide.
Unfortunately ublk has to support multiple submissions, and there can
be lots of such use cases, logical volume manager(mirror, stripped),
distributed network storage, ...
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 16:23 ` Pavel Begunkov
2023-03-18 16:39 ` Ming Lei
@ 2023-03-21 9:17 ` Ziyang Zhang
2023-03-27 16:04 ` Pavel Begunkov
2023-03-28 0:53 ` Ming Lei
2023-03-25 14:15 ` Ming Lei
2 siblings, 2 replies; 49+ messages in thread
From: Ziyang Zhang @ 2023-03-21 9:17 UTC (permalink / raw)
To: Pavel Begunkov, Ming Lei
Cc: Miklos Szeredi, Bernd Schubert, Jens Axboe, Xiaoguang Wang,
io-uring, linux-block
On 2023/3/19 00:23, Pavel Begunkov wrote:
> On 3/16/23 03:13, Xiaoguang Wang wrote:
>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>> and its ->issue() can retrieve/import buffer from master request's
>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>> submits slave OP just like normal OP issued from userspace, that said,
>>> SQE order is kept, and batching handling is done too.
>> Thanks for this great work, seems that we're now in the right direction
>> to support ublk zero copy, I believe this feature will improve io throughput
>> greatly and reduce ublk's cpu resource usage.
>>
>> I have gone through your 2th patch, and have some little concerns here:
>> Say we have one ublk loop target device, but it has 4 backend files,
>> every file will carry 25% of device capacity and it's implemented in stripped
>> way, then for every io request, current implementation will need issed 4
>> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
>> have just one master sqe, so I wonder whether we can have another
>> method. The key point is to let io_uring support register various kernel
>> memory objects, which come from kernel, such as ITER_BVEC or
>> ITER_KVEC. so how about below actions:
>> 1. add a new infrastructure in io_uring, which will support to register
>> various kernel memory objects in it, this new infrastructure could be
>> maintained in a xarray structure, every memory objects in it will have
>> a unique id. This registration could be done in a ublk uring cmd, io_uring
>> offers registration interface.
>> 2. then any sqe can use these memory objects freely, so long as it
>> passes above unique id in sqe properly.
>> Above are just rough ideas, just for your reference.
>
> It precisely hints on what I proposed a bit earlier, that makes
> me not alone thinking that it's a good idea to have a design allowing
> 1) multiple ops using a buffer and 2) to limiting it to one single
> submission because the userspace might want to preprocess a part
> of the data, multiplex it or on the opposite divide. I was mostly
> coming from non ublk cases, and one example would be such zc recv,
> parsing the app level headers and redirecting the rest of the data
> somewhere.
>
> I haven't got a chance to work on it but will return to it in
> a week. The discussion was here:
>
> https://lore.kernel.org/all/[email protected]/
>
Hi Pavel and all,
I think it is a good idea to register some kernel objects(such as bvec)
in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
I do not think reusing splice is a good idea because splice should run in io-wq.
If we have a big sq depth there may be lots of io-wqs. Then lots of context switch
may lower the IO performance especially for small IO size.
Here are some rough ideas:
(1) design a new OPCODE such as IORING_REGISTER_KOBJ to register kernel objects in
io_uring or
(2) reuse uring-cmd. We can send uring-cmd to drivers(opcode may be CMD_REGISTER_KBUF)
and let drivers call io_uring_provide_kbuf() to register kbuf. io_uring_provide_kbuf()
is a new function provided by io_uring for drivers.
(3) let the driver call io_uring_provide_kbuf() directly. For ublk, this function is called
before io_uring_cmd_done().
Regards,
Zhang
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-21 9:17 ` Ziyang Zhang
@ 2023-03-27 16:04 ` Pavel Begunkov
2023-03-28 1:01 ` Ming Lei
2023-03-28 0:53 ` Ming Lei
1 sibling, 1 reply; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-27 16:04 UTC (permalink / raw)
To: Ziyang Zhang, Ming Lei
Cc: Miklos Szeredi, Bernd Schubert, Jens Axboe, Xiaoguang Wang,
io-uring, linux-block
On 3/21/23 09:17, Ziyang Zhang wrote:
> On 2023/3/19 00:23, Pavel Begunkov wrote:
>> On 3/16/23 03:13, Xiaoguang Wang wrote:
>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>>> and its ->issue() can retrieve/import buffer from master request's
>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>>> submits slave OP just like normal OP issued from userspace, that said,
>>>> SQE order is kept, and batching handling is done too.
>>> Thanks for this great work, seems that we're now in the right direction
>>> to support ublk zero copy, I believe this feature will improve io throughput
>>> greatly and reduce ublk's cpu resource usage.
>>>
>>> I have gone through your 2th patch, and have some little concerns here:
>>> Say we have one ublk loop target device, but it has 4 backend files,
>>> every file will carry 25% of device capacity and it's implemented in stripped
>>> way, then for every io request, current implementation will need issed 4
>>> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
>>> have just one master sqe, so I wonder whether we can have another
>>> method. The key point is to let io_uring support register various kernel
>>> memory objects, which come from kernel, such as ITER_BVEC or
>>> ITER_KVEC. so how about below actions:
>>> 1. add a new infrastructure in io_uring, which will support to register
>>> various kernel memory objects in it, this new infrastructure could be
>>> maintained in a xarray structure, every memory objects in it will have
>>> a unique id. This registration could be done in a ublk uring cmd, io_uring
>>> offers registration interface.
>>> 2. then any sqe can use these memory objects freely, so long as it
>>> passes above unique id in sqe properly.
>>> Above are just rough ideas, just for your reference.
>>
>> It precisely hints on what I proposed a bit earlier, that makes
>> me not alone thinking that it's a good idea to have a design allowing
>> 1) multiple ops using a buffer and 2) to limiting it to one single
>> submission because the userspace might want to preprocess a part
>> of the data, multiplex it or on the opposite divide. I was mostly
>> coming from non ublk cases, and one example would be such zc recv,
>> parsing the app level headers and redirecting the rest of the data
>> somewhere.
>>
>> I haven't got a chance to work on it but will return to it in
>> a week. The discussion was here:
>>
>> https://lore.kernel.org/all/[email protected]/
>>
>
> Hi Pavel and all,
>
> I think it is a good idea to register some kernel objects(such as bvec)
> in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
> There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
> and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
>
> I do not think reusing splice is a good idea because splice should run in io-wq.
The reason why I disabled inline splice execution is because do_splice()
and below the stack doesn't support nowait well enough, which is not a
problem when we hook directly under the ->splice_read() callback and
operate only with one file at a time at the io_uring level.
> If we have a big sq depth there may be lots of io-wqs. Then lots of context switch
> may lower the IO performance especially for small IO size.
>
> Here are some rough ideas:
> (1) design a new OPCODE such as IORING_REGISTER_KOBJ to register kernel objects in
> io_uring or
> (2) reuse uring-cmd. We can send uring-cmd to drivers(opcode may be CMD_REGISTER_KBUF)
> and let drivers call io_uring_provide_kbuf() to register kbuf. io_uring_provide_kbuf()
> is a new function provided by io_uring for drivers.
> (3) let the driver call io_uring_provide_kbuf() directly. For ublk, this function is called
> before io_uring_cmd_done().
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-27 16:04 ` Pavel Begunkov
@ 2023-03-28 1:01 ` Ming Lei
2023-03-28 11:01 ` Pavel Begunkov
0 siblings, 1 reply; 49+ messages in thread
From: Ming Lei @ 2023-03-28 1:01 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Ziyang Zhang, Miklos Szeredi, Bernd Schubert, Jens Axboe,
Xiaoguang Wang, io-uring, linux-block, ming.lei
On Mon, Mar 27, 2023 at 05:04:01PM +0100, Pavel Begunkov wrote:
> On 3/21/23 09:17, Ziyang Zhang wrote:
> > On 2023/3/19 00:23, Pavel Begunkov wrote:
> > > On 3/16/23 03:13, Xiaoguang Wang wrote:
> > > > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > > > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > > > > and its ->issue() can retrieve/import buffer from master request's
> > > > > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > > > > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > > > > submits slave OP just like normal OP issued from userspace, that said,
> > > > > SQE order is kept, and batching handling is done too.
> > > > Thanks for this great work, seems that we're now in the right direction
> > > > to support ublk zero copy, I believe this feature will improve io throughput
> > > > greatly and reduce ublk's cpu resource usage.
> > > >
> > > > I have gone through your 2th patch, and have some little concerns here:
> > > > Say we have one ublk loop target device, but it has 4 backend files,
> > > > every file will carry 25% of device capacity and it's implemented in stripped
> > > > way, then for every io request, current implementation will need issed 4
> > > > fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> > > > have just one master sqe, so I wonder whether we can have another
> > > > method. The key point is to let io_uring support register various kernel
> > > > memory objects, which come from kernel, such as ITER_BVEC or
> > > > ITER_KVEC. so how about below actions:
> > > > 1. add a new infrastructure in io_uring, which will support to register
> > > > various kernel memory objects in it, this new infrastructure could be
> > > > maintained in a xarray structure, every memory objects in it will have
> > > > a unique id. This registration could be done in a ublk uring cmd, io_uring
> > > > offers registration interface.
> > > > 2. then any sqe can use these memory objects freely, so long as it
> > > > passes above unique id in sqe properly.
> > > > Above are just rough ideas, just for your reference.
> > >
> > > It precisely hints on what I proposed a bit earlier, that makes
> > > me not alone thinking that it's a good idea to have a design allowing
> > > 1) multiple ops using a buffer and 2) to limiting it to one single
> > > submission because the userspace might want to preprocess a part
> > > of the data, multiplex it or on the opposite divide. I was mostly
> > > coming from non ublk cases, and one example would be such zc recv,
> > > parsing the app level headers and redirecting the rest of the data
> > > somewhere.
> > >
> > > I haven't got a chance to work on it but will return to it in
> > > a week. The discussion was here:
> > >
> > > https://lore.kernel.org/all/[email protected]/
> > >
> >
> > Hi Pavel and all,
> >
> > I think it is a good idea to register some kernel objects(such as bvec)
> > in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
> > There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
> > and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
> >
> > I do not think reusing splice is a good idea because splice should run in io-wq.
>
> The reason why I disabled inline splice execution is because do_splice()
> and below the stack doesn't support nowait well enough, which is not a
> problem when we hook directly under the ->splice_read() callback and
> operate only with one file at a time at the io_uring level.
I believe I have explained several times[1][2] it isn't good solution for ublk
zero copy.
But if you insist on reusing splice for this feature, please share your code and
I'm happy to give an review.
[1] https://lore.kernel.org/linux-block/ZB8B8cr1%[email protected]/T/#m1bfa358524b6af94731bcd5be28056f9f4408ecf
[2] https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-28 1:01 ` Ming Lei
@ 2023-03-28 11:01 ` Pavel Begunkov
0 siblings, 0 replies; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-28 11:01 UTC (permalink / raw)
To: Ming Lei
Cc: Ziyang Zhang, Miklos Szeredi, Bernd Schubert, Jens Axboe,
Xiaoguang Wang, io-uring, linux-block
On 3/28/23 02:01, Ming Lei wrote:
> On Mon, Mar 27, 2023 at 05:04:01PM +0100, Pavel Begunkov wrote:
>> On 3/21/23 09:17, Ziyang Zhang wrote:
>>> On 2023/3/19 00:23, Pavel Begunkov wrote:
>>>> On 3/16/23 03:13, Xiaoguang Wang wrote:
>>>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>>>>> and its ->issue() can retrieve/import buffer from master request's
>>>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>>>>> submits slave OP just like normal OP issued from userspace, that said,
>>>>>> SQE order is kept, and batching handling is done too.
>>>>> Thanks for this great work, seems that we're now in the right direction
>>>>> to support ublk zero copy, I believe this feature will improve io throughput
>>>>> greatly and reduce ublk's cpu resource usage.
>>>>>
>>>>> I have gone through your 2th patch, and have some little concerns here:
>>>>> Say we have one ublk loop target device, but it has 4 backend files,
>>>>> every file will carry 25% of device capacity and it's implemented in stripped
>>>>> way, then for every io request, current implementation will need issed 4
>>>>> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
>>>>> have just one master sqe, so I wonder whether we can have another
>>>>> method. The key point is to let io_uring support register various kernel
>>>>> memory objects, which come from kernel, such as ITER_BVEC or
>>>>> ITER_KVEC. so how about below actions:
>>>>> 1. add a new infrastructure in io_uring, which will support to register
>>>>> various kernel memory objects in it, this new infrastructure could be
>>>>> maintained in a xarray structure, every memory objects in it will have
>>>>> a unique id. This registration could be done in a ublk uring cmd, io_uring
>>>>> offers registration interface.
>>>>> 2. then any sqe can use these memory objects freely, so long as it
>>>>> passes above unique id in sqe properly.
>>>>> Above are just rough ideas, just for your reference.
>>>>
>>>> It precisely hints on what I proposed a bit earlier, that makes
>>>> me not alone thinking that it's a good idea to have a design allowing
>>>> 1) multiple ops using a buffer and 2) to limiting it to one single
>>>> submission because the userspace might want to preprocess a part
>>>> of the data, multiplex it or on the opposite divide. I was mostly
>>>> coming from non ublk cases, and one example would be such zc recv,
>>>> parsing the app level headers and redirecting the rest of the data
>>>> somewhere.
>>>>
>>>> I haven't got a chance to work on it but will return to it in
>>>> a week. The discussion was here:
>>>>
>>>> https://lore.kernel.org/all/[email protected]/
>>>>
>>>
>>> Hi Pavel and all,
>>>
>>> I think it is a good idea to register some kernel objects(such as bvec)
>>> in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
>>> There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
>>> and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
>>>
>>> I do not think reusing splice is a good idea because splice should run in io-wq.
>>
>> The reason why I disabled inline splice execution is because do_splice()
>> and below the stack doesn't support nowait well enough, which is not a
>> problem when we hook directly under the ->splice_read() callback and
>> operate only with one file at a time at the io_uring level.
>
> I believe I have explained several times[1][2] it isn't good solution for ublk
> zero copy.
>
> But if you insist on reusing splice for this feature, please share your code and
> I'm happy to give an review.
Absolutely, I was not available the last week, will be catching up to
all that and prototyping it. Let me just note again that my point was
not in internally using splice bits but rather in having a different
uapi, i.e. mediating with the io_uring's registered buffers.
> [1] https://lore.kernel.org/linux-block/ZB8B8cr1%[email protected]/T/#m1bfa358524b6af94731bcd5be28056f9f4408ecf
> [2] https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-21 9:17 ` Ziyang Zhang
2023-03-27 16:04 ` Pavel Begunkov
@ 2023-03-28 0:53 ` Ming Lei
2023-03-29 6:57 ` Ziyang Zhang
1 sibling, 1 reply; 49+ messages in thread
From: Ming Lei @ 2023-03-28 0:53 UTC (permalink / raw)
To: Ziyang Zhang
Cc: Pavel Begunkov, Miklos Szeredi, Bernd Schubert, Jens Axboe,
Xiaoguang Wang, io-uring, linux-block, ming.lei
Hi Ziyang,
On Tue, Mar 21, 2023 at 05:17:56PM +0800, Ziyang Zhang wrote:
> On 2023/3/19 00:23, Pavel Begunkov wrote:
> > On 3/16/23 03:13, Xiaoguang Wang wrote:
> >>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> >>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> >>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> >>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> >>> and its ->issue() can retrieve/import buffer from master request's
> >>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> >>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> >>> submits slave OP just like normal OP issued from userspace, that said,
> >>> SQE order is kept, and batching handling is done too.
> >> Thanks for this great work, seems that we're now in the right direction
> >> to support ublk zero copy, I believe this feature will improve io throughput
> >> greatly and reduce ublk's cpu resource usage.
> >>
> >> I have gone through your 2th patch, and have some little concerns here:
> >> Say we have one ublk loop target device, but it has 4 backend files,
> >> every file will carry 25% of device capacity and it's implemented in stripped
> >> way, then for every io request, current implementation will need issed 4
> >> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> >> have just one master sqe, so I wonder whether we can have another
> >> method. The key point is to let io_uring support register various kernel
> >> memory objects, which come from kernel, such as ITER_BVEC or
> >> ITER_KVEC. so how about below actions:
> >> 1. add a new infrastructure in io_uring, which will support to register
> >> various kernel memory objects in it, this new infrastructure could be
> >> maintained in a xarray structure, every memory objects in it will have
> >> a unique id. This registration could be done in a ublk uring cmd, io_uring
> >> offers registration interface.
> >> 2. then any sqe can use these memory objects freely, so long as it
> >> passes above unique id in sqe properly.
> >> Above are just rough ideas, just for your reference.
> >
> > It precisely hints on what I proposed a bit earlier, that makes
> > me not alone thinking that it's a good idea to have a design allowing
> > 1) multiple ops using a buffer and 2) to limiting it to one single
> > submission because the userspace might want to preprocess a part
> > of the data, multiplex it or on the opposite divide. I was mostly
> > coming from non ublk cases, and one example would be such zc recv,
> > parsing the app level headers and redirecting the rest of the data
> > somewhere.
> >
> > I haven't got a chance to work on it but will return to it in
> > a week. The discussion was here:
> >
> > https://lore.kernel.org/all/[email protected]/
> >
>
> Hi Pavel and all,
>
> I think it is a good idea to register some kernel objects(such as bvec)
> in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
> There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
> and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
>
> I do not think reusing splice is a good idea because splice should run in io-wq.
> If we have a big sq depth there may be lots of io-wqs. Then lots of context switch
> may lower the IO performance especially for small IO size.
Agree, not only it is hard for splice to guarantee correctness of buffer lifetime,
but also it is much less efficient to support the feature in one very ugly way, not
mention Linus objects to extend splice wrt. buffer direction issue, see the reasoning
in my document:
https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
>
> Here are some rough ideas:
> (1) design a new OPCODE such as IORING_REGISTER_KOBJ to register kernel objects in
> io_uring or
> (2) reuse uring-cmd. We can send uring-cmd to drivers(opcode may be CMD_REGISTER_KBUF)
> and let drivers call io_uring_provide_kbuf() to register kbuf. io_uring_provide_kbuf()
> is a new function provided by io_uring for drivers.
> (3) let the driver call io_uring_provide_kbuf() directly. For ublk, this function is called
> before io_uring_cmd_done().
Can you explain a bit which use cases you are trying to address by
registering kernel io buffer unmapped to userspace?
The buffer(request buffer, represented by bvec) are just bvecs, basically only
physical pages available, and the userspace does not have mapping(virtual address)
on this buffer and can't read/write the buffer, so I don't think it makes sense
to register the buffer somewhere for userspace, does it?
That said the buffer should only be used by kernel, such as io_uring normal OPs.
It is basically invisible for userspace,
However, Xiaoguang's BPF might be one perfect supplement here[1], such as:
- add one generic io_uring BPF OP, which can run one specified registered BPF
program by passing bpf_prog_id
- link this BPF OP as slave request of fused command, then the ebpf prog can do
whatever on the kernel pages if kernel mapping & buffer read/write is allowed
for ebpf prog, and results can be returned into user via any bpf mapping(s)
- then userspace can decide how to handle the result from bpf mapping(s), such as,
submit another fused command to handle IO with part of the kernel buffer.
Also the buffer is io buffer, and its lifetime is pretty short, and register/
unregister introduces unnecessary cost in fast io path for any approach.
Finally it is pretty easy to extend fused command[2] for supporting this kind of
interface[2], but at least you need to share your use case first.
[1] https://lwn.net/Articles/927356/
[2] https://lore.kernel.org/linux-block/[email protected]/T/#m0b8d0dcca5024765cef0439ef1d8ca3f7b38bd1c
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-28 0:53 ` Ming Lei
@ 2023-03-29 6:57 ` Ziyang Zhang
2023-03-29 8:52 ` Ming Lei
0 siblings, 1 reply; 49+ messages in thread
From: Ziyang Zhang @ 2023-03-29 6:57 UTC (permalink / raw)
To: Ming Lei
Cc: Pavel Begunkov, Miklos Szeredi, Bernd Schubert, Jens Axboe,
Xiaoguang Wang, io-uring, linux-block
On 2023/3/28 08:53, Ming Lei wrote:
> Hi Ziyang,
>
> On Tue, Mar 21, 2023 at 05:17:56PM +0800, Ziyang Zhang wrote:
>> On 2023/3/19 00:23, Pavel Begunkov wrote:
>>> On 3/16/23 03:13, Xiaoguang Wang wrote:
>>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>>>> and its ->issue() can retrieve/import buffer from master request's
>>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>>>> submits slave OP just like normal OP issued from userspace, that said,
>>>>> SQE order is kept, and batching handling is done too.
>>>> Thanks for this great work, seems that we're now in the right direction
>>>> to support ublk zero copy, I believe this feature will improve io throughput
>>>> greatly and reduce ublk's cpu resource usage.
>>>>
>>>> I have gone through your 2th patch, and have some little concerns here:
>>>> Say we have one ublk loop target device, but it has 4 backend files,
>>>> every file will carry 25% of device capacity and it's implemented in stripped
>>>> way, then for every io request, current implementation will need issed 4
>>>> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
>>>> have just one master sqe, so I wonder whether we can have another
>>>> method. The key point is to let io_uring support register various kernel
>>>> memory objects, which come from kernel, such as ITER_BVEC or
>>>> ITER_KVEC. so how about below actions:
>>>> 1. add a new infrastructure in io_uring, which will support to register
>>>> various kernel memory objects in it, this new infrastructure could be
>>>> maintained in a xarray structure, every memory objects in it will have
>>>> a unique id. This registration could be done in a ublk uring cmd, io_uring
>>>> offers registration interface.
>>>> 2. then any sqe can use these memory objects freely, so long as it
>>>> passes above unique id in sqe properly.
>>>> Above are just rough ideas, just for your reference.
>>>
>>> It precisely hints on what I proposed a bit earlier, that makes
>>> me not alone thinking that it's a good idea to have a design allowing
>>> 1) multiple ops using a buffer and 2) to limiting it to one single
>>> submission because the userspace might want to preprocess a part
>>> of the data, multiplex it or on the opposite divide. I was mostly
>>> coming from non ublk cases, and one example would be such zc recv,
>>> parsing the app level headers and redirecting the rest of the data
>>> somewhere.
>>>
>>> I haven't got a chance to work on it but will return to it in
>>> a week. The discussion was here:
>>>
>>> https://lore.kernel.org/all/[email protected]/
>>>
>>
>> Hi Pavel and all,
>>
>> I think it is a good idea to register some kernel objects(such as bvec)
>> in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
>> There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
>> and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
>>
>> I do not think reusing splice is a good idea because splice should run in io-wq.
>> If we have a big sq depth there may be lots of io-wqs. Then lots of context switch
>> may lower the IO performance especially for small IO size.
>
> Agree, not only it is hard for splice to guarantee correctness of buffer lifetime,
> but also it is much less efficient to support the feature in one very ugly way, not
> mention Linus objects to extend splice wrt. buffer direction issue, see the reasoning
> in my document:
>
> https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
>
>>
>> Here are some rough ideas:
>> (1) design a new OPCODE such as IORING_REGISTER_KOBJ to register kernel objects in
>> io_uring or
>> (2) reuse uring-cmd. We can send uring-cmd to drivers(opcode may be CMD_REGISTER_KBUF)
>> and let drivers call io_uring_provide_kbuf() to register kbuf. io_uring_provide_kbuf()
>> is a new function provided by io_uring for drivers.
>> (3) let the driver call io_uring_provide_kbuf() directly. For ublk, this function is called
>> before io_uring_cmd_done().
>
> Can you explain a bit which use cases you are trying to address by
> registering kernel io buffer unmapped to userspace?
Hi Ming,
Sorry there is no specific use case. In our product, we have to calculate cksum
or compress data before sending IO to remote backend. So Xiaoguang's EBPF might
be the final solution... :) But I'd rather to start here...
I think you, Pavel and I all have the same basic idea: register the kernel object
(bvec) first then incoming sqes can use it. But I think fused-cmd is too specific
(hack) to ublk so other users of io_uring may not benefit from it.
What if we design a general way which allows io_uring to register kernel objects
(such as bvec) just like IORING_OP_PROVIDE_BUFFERS or IORING_REGISTER_PBUF_RING?
Pavel said that registration replaces fuse master cmd. And I think so too.
>
> The buffer(request buffer, represented by bvec) are just bvecs, basically only
> physical pages available, and the userspace does not have mapping(virtual address)
> on this buffer and can't read/write the buffer, so I don't think it makes sense
> to register the buffer somewhere for userspace, does it?
The userspace does not touch these registered kernel bvecs, but reference it id.
For example, we can set "sqe->kobj_id" so this sqe can import this bvec as its
RW buffer just like IORING_OP_PROVIDE_BUFFERS.
There is limitation on fused-cmd: secondary sqe has to be primary+1 or be linked.
But with registration way we allow multiple OPs reference the kernel bvecs. However
we have to deal with buffer ownership/lifetime carefully.
>
> That said the buffer should only be used by kernel, such as io_uring normal OPs.
> It is basically invisible for userspace,
>
> However, Xiaoguang's BPF might be one perfect supplement here[1], such as:
>
> - add one generic io_uring BPF OP, which can run one specified registered BPF
> program by passing bpf_prog_id
>
> - link this BPF OP as slave request of fused command, then the ebpf prog can do
> whatever on the kernel pages if kernel mapping & buffer read/write is allowed
> for ebpf prog, and results can be returned into user via any bpf mapping(s)
In Xiaoguang's ublk-EBPF design, we almost avoid userspace code/logic while
handling ublk io. So mix fused-cmd with ublk-EBPF may be a bad idea.
>
> - then userspace can decide how to handle the result from bpf mapping(s), such as,
> submit another fused command to handle IO with part of the kernel buffer.
>
> Also the buffer is io buffer, and its lifetime is pretty short, and register/
> unregister introduces unnecessary cost in fast io path for any approach.
I'm not sure the io buffer has short lifetime in our product. :P In our product
we can first issue a very big request with a big io buffer. Then the backend
can parse&split it into pieces and distribute each piece to a specific socket_fd
representing a storage node. This big io buffer may have long lifetime.
Regards,
Zhang
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-29 6:57 ` Ziyang Zhang
@ 2023-03-29 8:52 ` Ming Lei
0 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-29 8:52 UTC (permalink / raw)
To: Ziyang Zhang
Cc: Pavel Begunkov, Miklos Szeredi, Bernd Schubert, Jens Axboe,
Xiaoguang Wang, io-uring, linux-block, ming.lei
On Wed, Mar 29, 2023 at 02:57:38PM +0800, Ziyang Zhang wrote:
> On 2023/3/28 08:53, Ming Lei wrote:
> > Hi Ziyang,
> >
> > On Tue, Mar 21, 2023 at 05:17:56PM +0800, Ziyang Zhang wrote:
> >> On 2023/3/19 00:23, Pavel Begunkov wrote:
> >>> On 3/16/23 03:13, Xiaoguang Wang wrote:
> >>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> >>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> >>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> >>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> >>>>> and its ->issue() can retrieve/import buffer from master request's
> >>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> >>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> >>>>> submits slave OP just like normal OP issued from userspace, that said,
> >>>>> SQE order is kept, and batching handling is done too.
> >>>> Thanks for this great work, seems that we're now in the right direction
> >>>> to support ublk zero copy, I believe this feature will improve io throughput
> >>>> greatly and reduce ublk's cpu resource usage.
> >>>>
> >>>> I have gone through your 2th patch, and have some little concerns here:
> >>>> Say we have one ublk loop target device, but it has 4 backend files,
> >>>> every file will carry 25% of device capacity and it's implemented in stripped
> >>>> way, then for every io request, current implementation will need issed 4
> >>>> fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> >>>> have just one master sqe, so I wonder whether we can have another
> >>>> method. The key point is to let io_uring support register various kernel
> >>>> memory objects, which come from kernel, such as ITER_BVEC or
> >>>> ITER_KVEC. so how about below actions:
> >>>> 1. add a new infrastructure in io_uring, which will support to register
> >>>> various kernel memory objects in it, this new infrastructure could be
> >>>> maintained in a xarray structure, every memory objects in it will have
> >>>> a unique id. This registration could be done in a ublk uring cmd, io_uring
> >>>> offers registration interface.
> >>>> 2. then any sqe can use these memory objects freely, so long as it
> >>>> passes above unique id in sqe properly.
> >>>> Above are just rough ideas, just for your reference.
> >>>
> >>> It precisely hints on what I proposed a bit earlier, that makes
> >>> me not alone thinking that it's a good idea to have a design allowing
> >>> 1) multiple ops using a buffer and 2) to limiting it to one single
> >>> submission because the userspace might want to preprocess a part
> >>> of the data, multiplex it or on the opposite divide. I was mostly
> >>> coming from non ublk cases, and one example would be such zc recv,
> >>> parsing the app level headers and redirecting the rest of the data
> >>> somewhere.
> >>>
> >>> I haven't got a chance to work on it but will return to it in
> >>> a week. The discussion was here:
> >>>
> >>> https://lore.kernel.org/all/[email protected]/
> >>>
> >>
> >> Hi Pavel and all,
> >>
> >> I think it is a good idea to register some kernel objects(such as bvec)
> >> in io_uring and return a cookie(such as buf_idx) for READ/WRITE/SEND/RECV sqes.
> >> There are some ways to register user's buffer such as IORING_OP_PROVIDE_BUFFERS
> >> and IORING_REGISTER_PBUF_RING but there is not a way to register kernel buffer(bvec).
> >>
> >> I do not think reusing splice is a good idea because splice should run in io-wq.
> >> If we have a big sq depth there may be lots of io-wqs. Then lots of context switch
> >> may lower the IO performance especially for small IO size.
> >
> > Agree, not only it is hard for splice to guarantee correctness of buffer lifetime,
> > but also it is much less efficient to support the feature in one very ugly way, not
> > mention Linus objects to extend splice wrt. buffer direction issue, see the reasoning
> > in my document:
> >
> > https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
> >
> >>
> >> Here are some rough ideas:
> >> (1) design a new OPCODE such as IORING_REGISTER_KOBJ to register kernel objects in
> >> io_uring or
> >> (2) reuse uring-cmd. We can send uring-cmd to drivers(opcode may be CMD_REGISTER_KBUF)
> >> and let drivers call io_uring_provide_kbuf() to register kbuf. io_uring_provide_kbuf()
> >> is a new function provided by io_uring for drivers.
> >> (3) let the driver call io_uring_provide_kbuf() directly. For ublk, this function is called
> >> before io_uring_cmd_done().
> >
> > Can you explain a bit which use cases you are trying to address by
> > registering kernel io buffer unmapped to userspace?
>
> Hi Ming,
>
> Sorry there is no specific use case. In our product, we have to calculate cksum
> or compress data before sending IO to remote backend. So Xiaoguang's EBPF might
> be the final solution... :) But I'd rather to start here...
If chsum calculation and compression are done in userspace, the current zero
copy can't help you because the fused command is for sharing ublk client
io buffer to io_uring OPs only. And userspace has to reply on data copy
for checksum & compression.
ebpf could help you, but that is still one big project, not sure if
current prog is allowed to get kernel mapping of pages and read/write
via the kernel mapping.
>
> I think you, Pavel and I all have the same basic idea: register the kernel object
> (bvec) first then incoming sqes can use it. But I think fused-cmd is too specific
> (hack) to ublk so other users of io_uring may not benefit from it.
fused command is actually one generic interface:
1) create relationship between primary command and secondary requests,
the current interface does support to setup 1:N relationship, and just
needs multiple secondary reqs following the primary command. If you
think following SQEs isn't flexible, you still can send multiple fused
requests with same primary cmd to relax the usage of following SQEs.
2) based on the above relationship, lots of thing can be done, sharing
buffer is just one function, it could be other kind of resource sharing.
The 'sharing' can be implemented as plugin way, such as passing
uring_command flags for specifying which kind of plugin is used.
I have re-organized code in my local repo in the above way.
> What if we design a general way which allows io_uring to register kernel objects
> (such as bvec) just like IORING_OP_PROVIDE_BUFFERS or IORING_REGISTER_PBUF_RING?
> Pavel said that registration replaces fuse master cmd. And I think so too.
The buffer belongs to device, not io_uring context. And the registration
isn't necessary, and not sure it is doable:
1) userspace hasn't buffer mapping, so can't use the buffer, you can't
calculate checksum and compress data by this registration
2) you just want to use the register id to build the relationship between
primary command and secondary OPs, but fused command can do it(see above)
because we want to solve buffer lifetime easily, fused command has same
lifetime with the buffer reference
3) not sure if the buffer registration is doable:
- only 1 sqe flags is left, how to distinguish normal fixed buffer
with this kind of registration?
- the buffer belongs to device, if you register it in userspace, you
have to unregister it in userspace since only userspace knows
when the buffer isn't needed. Then this buffer lifetime will cross
multiple OPs, what if the userspace is killed before unregistration.
So what is your real requirement for the buffer registration? I believe
fused command can solve requests relationship building(primary cmd vs.
secondary requests), which seems your only concern about buffer
registration.
>
> >
> > The buffer(request buffer, represented by bvec) are just bvecs, basically only
> > physical pages available, and the userspace does not have mapping(virtual address)
> > on this buffer and can't read/write the buffer, so I don't think it makes sense
> > to register the buffer somewhere for userspace, does it?
>
> The userspace does not touch these registered kernel bvecs, but reference it id.
> For example, we can set "sqe->kobj_id" so this sqe can import this bvec as its
> RW buffer just like IORING_OP_PROVIDE_BUFFERS.
>
> There is limitation on fused-cmd: secondary sqe has to be primary+1 or be linked.
> But with registration way we allow multiple OPs reference the kernel bvecs.
The interface in V5 actually starts to supports to 1:N relation between primary cmd
and secondary requests, but just implements 1:1 so far. It isn't hard to do 1:N.
Actually you can reach same purpose by sending multiple fused requests with same
primary req, and there shouldn't be performance effect since the primary command
handling is pretty thin(passing buffer reference).
> However
> we have to deal with buffer ownership/lifetime carefully.
That is one fundamental problem. If buffer is allowed to cross multiple
OPs, it can be hard to solve the lifetime issue. Not mention it is less efficient
to add one extra buffer un-registraion in fast io path.
>
> >
> > That said the buffer should only be used by kernel, such as io_uring normal OPs.
> > It is basically invisible for userspace,
> >
> > However, Xiaoguang's BPF might be one perfect supplement here[1], such as:
> >
> > - add one generic io_uring BPF OP, which can run one specified registered BPF
> > program by passing bpf_prog_id
> >
> > - link this BPF OP as slave request of fused command, then the ebpf prog can do
> > whatever on the kernel pages if kernel mapping & buffer read/write is allowed
> > for ebpf prog, and results can be returned into user via any bpf mapping(s)
>
> In Xiaoguang's ublk-EBPF design, we almost avoid userspace code/logic while
> handling ublk io. So mix fused-cmd with ublk-EBPF may be a bad idea.
What I meant is to add io_uring generic ebpf OP, that isn't ublk dedicated ebpf.
The generic io_uring ebpf OP is for supporting encryption, checksum, or
simple packet parsing, sort of thing, because the bvec buffer doesn't
have userspace mapping, and we want to avoid to copy data to userspace for
calculating checksum, encryption, ...
>
> >
> > - then userspace can decide how to handle the result from bpf mapping(s), such as,
> > submit another fused command to handle IO with part of the kernel buffer.
> >
> > Also the buffer is io buffer, and its lifetime is pretty short, and register/
> > unregister introduces unnecessary cost in fast io path for any approach.
>
> I'm not sure the io buffer has short lifetime in our product. :P In our product
> we can first issue a very big request with a big io buffer. Then the backend
> can parse&split it into pieces and distribute each piece to a specific socket_fd
> representing a storage node. This big io buffer may have long lifetime.
The short just means it is in fast io path, not like io_uring fixed buffer which
needs to register just once. IO handling is really fast, otherwise it isn't necessary
to consider zero copy at all.
So we do care performance effect from any unneccessary operation(such
as, buffer unregistration).
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 16:23 ` Pavel Begunkov
2023-03-18 16:39 ` Ming Lei
2023-03-21 9:17 ` Ziyang Zhang
@ 2023-03-25 14:15 ` Ming Lei
2 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-25 14:15 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Xiaoguang Wang, Jens Axboe, io-uring, linux-block, Miklos Szeredi,
ZiyangZhang, Bernd Schubert, ming.lei
On Sat, Mar 18, 2023 at 04:23:35PM +0000, Pavel Begunkov wrote:
> On 3/16/23 03:13, Xiaoguang Wang wrote:
> > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > > and its ->issue() can retrieve/import buffer from master request's
> > > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > > submits slave OP just like normal OP issued from userspace, that said,
> > > SQE order is kept, and batching handling is done too.
> > Thanks for this great work, seems that we're now in the right direction
> > to support ublk zero copy, I believe this feature will improve io throughput
> > greatly and reduce ublk's cpu resource usage.
> >
> > I have gone through your 2th patch, and have some little concerns here:
> > Say we have one ublk loop target device, but it has 4 backend files,
> > every file will carry 25% of device capacity and it's implemented in stripped
> > way, then for every io request, current implementation will need issed 4
> > fused_cmd, right? 4 slave sqes are necessary, but it would be better to
> > have just one master sqe, so I wonder whether we can have another
> > method. The key point is to let io_uring support register various kernel
> > memory objects, which come from kernel, such as ITER_BVEC or
> > ITER_KVEC. so how about below actions:
> > 1. add a new infrastructure in io_uring, which will support to register
> > various kernel memory objects in it, this new infrastructure could be
> > maintained in a xarray structure, every memory objects in it will have
> > a unique id. This registration could be done in a ublk uring cmd, io_uring
> > offers registration interface.
> > 2. then any sqe can use these memory objects freely, so long as it
> > passes above unique id in sqe properly.
> > Above are just rough ideas, just for your reference.
>
> It precisely hints on what I proposed a bit earlier, that makes
> me not alone thinking that it's a good idea to have a design allowing
> 1) multiple ops using a buffer and 2) to limiting it to one single
> submission because the userspace might want to preprocess a part
> of the data, multiplex it or on the opposite divide. I was mostly
> coming from non ublk cases, and one example would be such zc recv,
> parsing the app level headers and redirecting the rest of the data
> somewhere.
Just get some time to think about zc recv.
Firstly I understand the buffer shouldn't be provided from userspace unlike
storage, given network recv can happen any time, and NIC driver has to put
received data into kernel socket recv buffer first. But if yes for some special recv
case, the use case is totally different with ublk, and impossible to share
any code with ublk.
So here suppose the zc recv means to export socket recv buffer to userspace
just like the implementation in lwn doc [1].
[1] https://lwn.net/Articles/752188/
But how does userspace pre-process this kernel buffer? mmap is expensive,
and copy won't be one option. Or the data is just simply forwarded to
somewhere(special case)?
If yes, it can become a bit similar with ublk's case in which
the device io buffer needn't to be modified and just simply forwarded to
FS or socket in most of cases. Then it could be possible to extend fused
for supporting it given the buffer lifetime model is useful for generic zero
copy, such as:
- send fused command(A) to just register buffer(socket recv buffer) with one
empty buffer index, then return the buffer index to userspace via CQE(
IORING_CQE_F_MORE), but not complete this fused command(A); but it
requires socket FS to implement ->uring_command() for providing recv
buffer.
- after getting recv SQE, userspace can use the registered buffer to
do whatever, but direct access on buffer is one problem, since it is
simply pages which have to be mapped for handling from userspace
- after userspace handles everything(includes net send over this buffer) on
the recv buffer, send another fused command or new OP to ask kernel to
release buffer by completing fused command(A). However, one corner case
is that this fuse command needs to be completed automatically when
io_uring exits since app is dead at that time.
It should be easy to extend fused command in above way(slave less)
since V4 starts to support normal 64byte SQE, and we have enough uring
command flags.
But not sure if that is what you need. If not, please explain a bit
your exact requirement.
>
> I haven't got a chance to work on it but will return to it in
> a week. The discussion was here:
>
> https://lore.kernel.org/all/[email protected]/
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (16 preceding siblings ...)
2023-03-16 3:13 ` [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Xiaoguang Wang
@ 2023-03-17 8:14 ` Ming Lei
2023-03-18 12:59 ` Jens Axboe
2023-03-18 16:09 ` Jens Axboe
2023-03-21 15:56 ` Ming Lei
19 siblings, 1 reply; 49+ messages in thread
From: Ming Lei @ 2023-03-17 8:14 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, ming.lei
On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> Hello,
>
> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> and its ->issue() can retrieve/import buffer from master request's
> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> submits slave OP just like normal OP issued from userspace, that said,
> SQE order is kept, and batching handling is done too.
>
> Please see detailed design in commit log of the 2th patch, and one big
> point is how to handle buffer ownership.
>
> With this way, it is easy to support zero copy for ublk/fuse device.
>
> Basically userspace can specify any sub-buffer of the ublk block request
> buffer from the fused command just by setting 'offset/len'
> in the slave SQE for running slave OP. This way is flexible to implement
> io mapping: mirror, stripped, ...
>
> The 3th & 4th patches enable fused slave support for the following OPs:
>
> OP_READ/OP_WRITE
> OP_SEND/OP_RECV/OP_SEND_ZC
>
> The other ublk patches cleans ublk driver and implement fused command
> for supporting zero copy.
>
> Follows userspace code:
>
> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>
> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>
> ublk add -t [loop|nbd|qcow2] -z ....
>
> Basic fs mount/kernel building and builtin test are done, and also not
> observe regression on xfstest test over ublk-loop with zero copy.
>
> Also add liburing test case for covering fused command based on miniublk
> of blktest:
>
> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>
> Performance improvement is obvious on memory bandwidth
> related workloads, such as, 1~2X improvement on 64K/512K BS
> IO test on loop with ramfs backing file.
>
> Any comments are welcome!
>
> V3:
> - fix build warning reported by kernel test robot
> - drop patch for checking fused flags on existed drivers with
> ->uring_command(), which isn't necessary, since we do not do that
> when adding new ioctl or uring command
> - inline io_init_rq() for core code, so just export io_init_slave_req
> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> will be cleared
> - pass xfstest over ublk-loop
Hello Jens and Guys,
I have been working on io_uring zero copy support for ublk/fuse for a while, and
I appreciate you may share any thoughts on this patchset or approach?
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-17 8:14 ` Ming Lei
@ 2023-03-18 12:59 ` Jens Axboe
2023-03-18 13:35 ` Ming Lei
0 siblings, 1 reply; 49+ messages in thread
From: Jens Axboe @ 2023-03-18 12:59 UTC (permalink / raw)
To: Ming Lei, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov
On 3/17/23 2:14?AM, Ming Lei wrote:
> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
>> Hello,
>>
>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>> and its ->issue() can retrieve/import buffer from master request's
>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>> submits slave OP just like normal OP issued from userspace, that said,
>> SQE order is kept, and batching handling is done too.
>>
>> Please see detailed design in commit log of the 2th patch, and one big
>> point is how to handle buffer ownership.
>>
>> With this way, it is easy to support zero copy for ublk/fuse device.
>>
>> Basically userspace can specify any sub-buffer of the ublk block request
>> buffer from the fused command just by setting 'offset/len'
>> in the slave SQE for running slave OP. This way is flexible to implement
>> io mapping: mirror, stripped, ...
>>
>> The 3th & 4th patches enable fused slave support for the following OPs:
>>
>> OP_READ/OP_WRITE
>> OP_SEND/OP_RECV/OP_SEND_ZC
>>
>> The other ublk patches cleans ublk driver and implement fused command
>> for supporting zero copy.
>>
>> Follows userspace code:
>>
>> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>>
>> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>>
>> ublk add -t [loop|nbd|qcow2] -z ....
>>
>> Basic fs mount/kernel building and builtin test are done, and also not
>> observe regression on xfstest test over ublk-loop with zero copy.
>>
>> Also add liburing test case for covering fused command based on miniublk
>> of blktest:
>>
>> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>>
>> Performance improvement is obvious on memory bandwidth
>> related workloads, such as, 1~2X improvement on 64K/512K BS
>> IO test on loop with ramfs backing file.
>>
>> Any comments are welcome!
>>
>> V3:
>> - fix build warning reported by kernel test robot
>> - drop patch for checking fused flags on existed drivers with
>> ->uring_command(), which isn't necessary, since we do not do that
>> when adding new ioctl or uring command
>> - inline io_init_rq() for core code, so just export io_init_slave_req
>> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
>> will be cleared
>> - pass xfstest over ublk-loop
>
> Hello Jens and Guys,
>
> I have been working on io_uring zero copy support for ublk/fuse for a while, and
> I appreciate you may share any thoughts on this patchset or approach?
I'm a bit split on this one, as I really like (and want) the feature.
ublk has become popular pretty quickly, and it makes a LOT of sense to
support zero copy for it. At the same time, I'm not really a huge fan of
the fused commands... They seem too specialized to be useful for other
things, and it'd be a shame to do something like that only for it later
to be replaced by a generic solution. And then we're stuck with
supporting fused commands forever, not sure I like that prospect.
Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
worth spending a bit more time on figuring out if splice can help us
here. David Howells currently has a lot going on in that area too.
So while I'd love to see this feature get queued up right now, I also
don't want to prematurely do so. Can we split out the fixes from this
series into a separate series that we can queue up now? That would also
help shrink the patchset, which is always a win for review.
--
Jens Axboe
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 12:59 ` Jens Axboe
@ 2023-03-18 13:35 ` Ming Lei
2023-03-18 14:36 ` Jens Axboe
2023-03-18 16:51 ` Pavel Begunkov
0 siblings, 2 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 13:35 UTC (permalink / raw)
To: Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov, ming.lei
Hi Jens,
Thanks for the response!
On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> On 3/17/23 2:14?AM, Ming Lei wrote:
> > On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> >> Hello,
> >>
> >> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> >> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> >> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> >> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> >> and its ->issue() can retrieve/import buffer from master request's
> >> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> >> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> >> submits slave OP just like normal OP issued from userspace, that said,
> >> SQE order is kept, and batching handling is done too.
> >>
> >> Please see detailed design in commit log of the 2th patch, and one big
> >> point is how to handle buffer ownership.
> >>
> >> With this way, it is easy to support zero copy for ublk/fuse device.
> >>
> >> Basically userspace can specify any sub-buffer of the ublk block request
> >> buffer from the fused command just by setting 'offset/len'
> >> in the slave SQE for running slave OP. This way is flexible to implement
> >> io mapping: mirror, stripped, ...
> >>
> >> The 3th & 4th patches enable fused slave support for the following OPs:
> >>
> >> OP_READ/OP_WRITE
> >> OP_SEND/OP_RECV/OP_SEND_ZC
> >>
> >> The other ublk patches cleans ublk driver and implement fused command
> >> for supporting zero copy.
> >>
> >> Follows userspace code:
> >>
> >> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
> >>
> >> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
> >>
> >> ublk add -t [loop|nbd|qcow2] -z ....
> >>
> >> Basic fs mount/kernel building and builtin test are done, and also not
> >> observe regression on xfstest test over ublk-loop with zero copy.
> >>
> >> Also add liburing test case for covering fused command based on miniublk
> >> of blktest:
> >>
> >> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
> >>
> >> Performance improvement is obvious on memory bandwidth
> >> related workloads, such as, 1~2X improvement on 64K/512K BS
> >> IO test on loop with ramfs backing file.
> >>
> >> Any comments are welcome!
> >>
> >> V3:
> >> - fix build warning reported by kernel test robot
> >> - drop patch for checking fused flags on existed drivers with
> >> ->uring_command(), which isn't necessary, since we do not do that
> >> when adding new ioctl or uring command
> >> - inline io_init_rq() for core code, so just export io_init_slave_req
> >> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> >> will be cleared
> >> - pass xfstest over ublk-loop
> >
> > Hello Jens and Guys,
> >
> > I have been working on io_uring zero copy support for ublk/fuse for a while, and
> > I appreciate you may share any thoughts on this patchset or approach?
>
> I'm a bit split on this one, as I really like (and want) the feature.
> ublk has become popular pretty quickly, and it makes a LOT of sense to
> support zero copy for it. At the same time, I'm not really a huge fan of
> the fused commands... They seem too specialized to be useful for other
> things, and it'd be a shame to do something like that only for it later
> to be replaced by a generic solution. And then we're stuck with
> supporting fused commands forever, not sure I like that prospect.
>
> Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
> worth spending a bit more time on figuring out if splice can help us
> here. David Howells currently has a lot going on in that area too.
IMO, splice(->splice_read()) can help much less in this use case, and
I can't see improvement David Howells has done in this area:
1) we need to pass reference of the whole buffer from driver to io_uring,
which is missed in splice, which just deals with page reference; for
passing whole buffer reference, we have to apply per buffer pipe to
solve the problem, and this way is expensive since the pipe can't
be freed until all buffers are consumed.
2) reference can't outlive the whole buffer, and splice still misses
mechanism to provide such guarantee; splice can just make sure that
page won't be gone if page reference is grabbed, but here we care
more the whole buffer & its (shared)references lifetime
3) current ->splice_read() misses capability to provide writeable
reference to spliced page[2]; either we have to pass new flags
to ->splice_read() or passing back new pipe buf flags, unfortunately
Linus thought it isn't good to extend pipe/splice for such purpose,
and now I agree with Linus now.
I believe that Pavel has realized this point[3] too, and here the only
of value of using pipe is to reuse ->splice_read(), however, the above
points show that ->splice_read() isn't good at this purpose.
[1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
[2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
[3] https://lore.kernel.org/linux-block/[email protected]/
>
> So while I'd love to see this feature get queued up right now, I also
> don't want to prematurely do so. Can we split out the fixes from this
> series into a separate series that we can queue up now? That would also
> help shrink the patchset, which is always a win for review.
There is only one fix(patch 5), and the real part is actually the 1st 4
patches.
I will separate patch 5 from the whole patchset and send out soon, and will
post out this patchset v4 by improving document for explaining how fused
command solves this problem in one safe & efficient way.
thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 13:35 ` Ming Lei
@ 2023-03-18 14:36 ` Jens Axboe
2023-03-18 15:06 ` Ming Lei
2023-03-18 16:51 ` Pavel Begunkov
1 sibling, 1 reply; 49+ messages in thread
From: Jens Axboe @ 2023-03-18 14:36 UTC (permalink / raw)
To: Ming Lei
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov
On 3/18/23 7:35?AM, Ming Lei wrote:
> Hi Jens,
>
> Thanks for the response!
>
> On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
>> On 3/17/23 2:14?AM, Ming Lei wrote:
>>> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
>>>> Hello,
>>>>
>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>>> and its ->issue() can retrieve/import buffer from master request's
>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>>> submits slave OP just like normal OP issued from userspace, that said,
>>>> SQE order is kept, and batching handling is done too.
>>>>
>>>> Please see detailed design in commit log of the 2th patch, and one big
>>>> point is how to handle buffer ownership.
>>>>
>>>> With this way, it is easy to support zero copy for ublk/fuse device.
>>>>
>>>> Basically userspace can specify any sub-buffer of the ublk block request
>>>> buffer from the fused command just by setting 'offset/len'
>>>> in the slave SQE for running slave OP. This way is flexible to implement
>>>> io mapping: mirror, stripped, ...
>>>>
>>>> The 3th & 4th patches enable fused slave support for the following OPs:
>>>>
>>>> OP_READ/OP_WRITE
>>>> OP_SEND/OP_RECV/OP_SEND_ZC
>>>>
>>>> The other ublk patches cleans ublk driver and implement fused command
>>>> for supporting zero copy.
>>>>
>>>> Follows userspace code:
>>>>
>>>> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>>>>
>>>> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>>>>
>>>> ublk add -t [loop|nbd|qcow2] -z ....
>>>>
>>>> Basic fs mount/kernel building and builtin test are done, and also not
>>>> observe regression on xfstest test over ublk-loop with zero copy.
>>>>
>>>> Also add liburing test case for covering fused command based on miniublk
>>>> of blktest:
>>>>
>>>> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>>>>
>>>> Performance improvement is obvious on memory bandwidth
>>>> related workloads, such as, 1~2X improvement on 64K/512K BS
>>>> IO test on loop with ramfs backing file.
>>>>
>>>> Any comments are welcome!
>>>>
>>>> V3:
>>>> - fix build warning reported by kernel test robot
>>>> - drop patch for checking fused flags on existed drivers with
>>>> ->uring_command(), which isn't necessary, since we do not do that
>>>> when adding new ioctl or uring command
>>>> - inline io_init_rq() for core code, so just export io_init_slave_req
>>>> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
>>>> will be cleared
>>>> - pass xfstest over ublk-loop
>>>
>>> Hello Jens and Guys,
>>>
>>> I have been working on io_uring zero copy support for ublk/fuse for a while, and
>>> I appreciate you may share any thoughts on this patchset or approach?
>>
>> I'm a bit split on this one, as I really like (and want) the feature.
>> ublk has become popular pretty quickly, and it makes a LOT of sense to
>> support zero copy for it. At the same time, I'm not really a huge fan of
>> the fused commands... They seem too specialized to be useful for other
>> things, and it'd be a shame to do something like that only for it later
>> to be replaced by a generic solution. And then we're stuck with
>> supporting fused commands forever, not sure I like that prospect.
>>
>> Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
>> worth spending a bit more time on figuring out if splice can help us
>> here. David Howells currently has a lot going on in that area too.
>
> IMO, splice(->splice_read()) can help much less in this use case, and
> I can't see improvement David Howells has done in this area:
>
> 1) we need to pass reference of the whole buffer from driver to io_uring,
> which is missed in splice, which just deals with page reference; for
> passing whole buffer reference, we have to apply per buffer pipe to
> solve the problem, and this way is expensive since the pipe can't
> be freed until all buffers are consumed.
>
> 2) reference can't outlive the whole buffer, and splice still misses
> mechanism to provide such guarantee; splice can just make sure that
> page won't be gone if page reference is grabbed, but here we care
> more the whole buffer & its (shared)references lifetime
>
> 3) current ->splice_read() misses capability to provide writeable
> reference to spliced page[2]; either we have to pass new flags
> to ->splice_read() or passing back new pipe buf flags, unfortunately
> Linus thought it isn't good to extend pipe/splice for such purpose,
> and now I agree with Linus now.
>
> I believe that Pavel has realized this point[3] too, and here the only
> of value of using pipe is to reuse ->splice_read(), however, the above
> points show that ->splice_read() isn't good at this purpose.
>
>
> [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
> [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
> [3] https://lore.kernel.org/linux-block/[email protected]/
splice is just one idea, but I do wonder if there's a way to express
this relationship (and buffer handovers) that doesn't involve needing
these odd kind of fused commands where they must be submitted as one big
sqe, but really are two normal ones. BPF is obviously one way, and maybe
we'll do BPF with io_uring at some point, but it makes things rather
more complicated to use and I'd prefer to avoid it if we can.
I'll take a closer look at the patches.
>> So while I'd love to see this feature get queued up right now, I also
>> don't want to prematurely do so. Can we split out the fixes from this
>> series into a separate series that we can queue up now? That would also
>> help shrink the patchset, which is always a win for review.
>
> There is only one fix(patch 5), and the real part is actually the 1st 4
> patches.
>
> I will separate patch 5 from the whole patchset and send out soon, and will
> post out this patchset v4 by improving document for explaining how fused
> command solves this problem in one safe & efficient way.
Thanks, did get that one now and applied it.
--
Jens Axboe
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 14:36 ` Jens Axboe
@ 2023-03-18 15:06 ` Ming Lei
0 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 15:06 UTC (permalink / raw)
To: Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov, ming.lei
On Sat, Mar 18, 2023 at 08:36:37AM -0600, Jens Axboe wrote:
> On 3/18/23 7:35?AM, Ming Lei wrote:
> > Hi Jens,
> >
> > Thanks for the response!
> >
> > On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> >> On 3/17/23 2:14?AM, Ming Lei wrote:
> >>> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> >>>> Hello,
> >>>>
> >>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> >>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> >>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> >>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> >>>> and its ->issue() can retrieve/import buffer from master request's
> >>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> >>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> >>>> submits slave OP just like normal OP issued from userspace, that said,
> >>>> SQE order is kept, and batching handling is done too.
> >>>>
> >>>> Please see detailed design in commit log of the 2th patch, and one big
> >>>> point is how to handle buffer ownership.
> >>>>
> >>>> With this way, it is easy to support zero copy for ublk/fuse device.
> >>>>
> >>>> Basically userspace can specify any sub-buffer of the ublk block request
> >>>> buffer from the fused command just by setting 'offset/len'
> >>>> in the slave SQE for running slave OP. This way is flexible to implement
> >>>> io mapping: mirror, stripped, ...
> >>>>
> >>>> The 3th & 4th patches enable fused slave support for the following OPs:
> >>>>
> >>>> OP_READ/OP_WRITE
> >>>> OP_SEND/OP_RECV/OP_SEND_ZC
> >>>>
> >>>> The other ublk patches cleans ublk driver and implement fused command
> >>>> for supporting zero copy.
> >>>>
> >>>> Follows userspace code:
> >>>>
> >>>> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
> >>>>
> >>>> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
> >>>>
> >>>> ublk add -t [loop|nbd|qcow2] -z ....
> >>>>
> >>>> Basic fs mount/kernel building and builtin test are done, and also not
> >>>> observe regression on xfstest test over ublk-loop with zero copy.
> >>>>
> >>>> Also add liburing test case for covering fused command based on miniublk
> >>>> of blktest:
> >>>>
> >>>> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
> >>>>
> >>>> Performance improvement is obvious on memory bandwidth
> >>>> related workloads, such as, 1~2X improvement on 64K/512K BS
> >>>> IO test on loop with ramfs backing file.
> >>>>
> >>>> Any comments are welcome!
> >>>>
> >>>> V3:
> >>>> - fix build warning reported by kernel test robot
> >>>> - drop patch for checking fused flags on existed drivers with
> >>>> ->uring_command(), which isn't necessary, since we do not do that
> >>>> when adding new ioctl or uring command
> >>>> - inline io_init_rq() for core code, so just export io_init_slave_req
> >>>> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> >>>> will be cleared
> >>>> - pass xfstest over ublk-loop
> >>>
> >>> Hello Jens and Guys,
> >>>
> >>> I have been working on io_uring zero copy support for ublk/fuse for a while, and
> >>> I appreciate you may share any thoughts on this patchset or approach?
> >>
> >> I'm a bit split on this one, as I really like (and want) the feature.
> >> ublk has become popular pretty quickly, and it makes a LOT of sense to
> >> support zero copy for it. At the same time, I'm not really a huge fan of
> >> the fused commands... They seem too specialized to be useful for other
> >> things, and it'd be a shame to do something like that only for it later
> >> to be replaced by a generic solution. And then we're stuck with
> >> supporting fused commands forever, not sure I like that prospect.
> >>
> >> Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
> >> worth spending a bit more time on figuring out if splice can help us
> >> here. David Howells currently has a lot going on in that area too.
> >
> > IMO, splice(->splice_read()) can help much less in this use case, and
> > I can't see improvement David Howells has done in this area:
> >
> > 1) we need to pass reference of the whole buffer from driver to io_uring,
> > which is missed in splice, which just deals with page reference; for
> > passing whole buffer reference, we have to apply per buffer pipe to
> > solve the problem, and this way is expensive since the pipe can't
> > be freed until all buffers are consumed.
> >
> > 2) reference can't outlive the whole buffer, and splice still misses
> > mechanism to provide such guarantee; splice can just make sure that
> > page won't be gone if page reference is grabbed, but here we care
> > more the whole buffer & its (shared)references lifetime
> >
> > 3) current ->splice_read() misses capability to provide writeable
> > reference to spliced page[2]; either we have to pass new flags
> > to ->splice_read() or passing back new pipe buf flags, unfortunately
> > Linus thought it isn't good to extend pipe/splice for such purpose,
> > and now I agree with Linus now.
> >
> > I believe that Pavel has realized this point[3] too, and here the only
> > of value of using pipe is to reuse ->splice_read(), however, the above
> > points show that ->splice_read() isn't good at this purpose.
> >
> >
> > [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
> > [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
> > [3] https://lore.kernel.org/linux-block/[email protected]/
>
> splice is just one idea, but I do wonder if there's a way to express
> this relationship (and buffer handovers) that doesn't involve needing
> these odd kind of fused commands where they must be submitted as one big
> sqe, but really are two normal ones. BPF is obviously one way, and maybe
The problem can't be solved in single normal SQE, so either two normal
SQEs or single big one.
I thought of using two SQEs:
1) the 1st SQE has to be the uring command for providing buffer by
reference
2) the 2nd one consumes the buffer reference, so it depends on 1st SQE
3) the 1st SQE has to be completed after the 2nd one is done because
reference(io_uring_bvec_buf) can't outlive value(buffer, or bvec), so existed
IO_LINK can't handle this problem simply
That is why I take single big SQE, which can:
1) meet buffer & its reference lifetime requirement
2) dependence between uring command for providing buffer and normal OP which
consumes the provided buffer reference
> we'll do BPF with io_uring at some point, but it makes things rather
> more complicated to use and I'd prefer to avoid it if we can.
Agree.
>
> I'll take a closer look at the patches.
Thanks!
--
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 13:35 ` Ming Lei
2023-03-18 14:36 ` Jens Axboe
@ 2023-03-18 16:51 ` Pavel Begunkov
2023-03-18 23:42 ` Ming Lei
1 sibling, 1 reply; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-18 16:51 UTC (permalink / raw)
To: Ming Lei, Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert
On 3/18/23 13:35, Ming Lei wrote:
> Hi Jens,
>
> Thanks for the response!
>
> On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
>> On 3/17/23 2:14?AM, Ming Lei wrote:
>>> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
>>>> Hello,
>>>>
>>>> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
>>>> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
>>>> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
>>>> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
>>>> and its ->issue() can retrieve/import buffer from master request's
>>>> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
>>>> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
>>>> submits slave OP just like normal OP issued from userspace, that said,
>>>> SQE order is kept, and batching handling is done too.
>>>>
>>>> Please see detailed design in commit log of the 2th patch, and one big
>>>> point is how to handle buffer ownership.
>>>>
>>>> With this way, it is easy to support zero copy for ublk/fuse device.
>>>>
>>>> Basically userspace can specify any sub-buffer of the ublk block request
>>>> buffer from the fused command just by setting 'offset/len'
>>>> in the slave SQE for running slave OP. This way is flexible to implement
>>>> io mapping: mirror, stripped, ...
>>>>
>>>> The 3th & 4th patches enable fused slave support for the following OPs:
>>>>
>>>> OP_READ/OP_WRITE
>>>> OP_SEND/OP_RECV/OP_SEND_ZC
>>>>
>>>> The other ublk patches cleans ublk driver and implement fused command
>>>> for supporting zero copy.
>>>>
>>>> Follows userspace code:
>>>>
>>>> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>>>>
>>>> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>>>>
>>>> ublk add -t [loop|nbd|qcow2] -z ....
>>>>
>>>> Basic fs mount/kernel building and builtin test are done, and also not
>>>> observe regression on xfstest test over ublk-loop with zero copy.
>>>>
>>>> Also add liburing test case for covering fused command based on miniublk
>>>> of blktest:
>>>>
>>>> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>>>>
>>>> Performance improvement is obvious on memory bandwidth
>>>> related workloads, such as, 1~2X improvement on 64K/512K BS
>>>> IO test on loop with ramfs backing file.
>>>>
>>>> Any comments are welcome!
>>>>
>>>> V3:
>>>> - fix build warning reported by kernel test robot
>>>> - drop patch for checking fused flags on existed drivers with
>>>> ->uring_command(), which isn't necessary, since we do not do that
>>>> when adding new ioctl or uring command
>>>> - inline io_init_rq() for core code, so just export io_init_slave_req
>>>> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
>>>> will be cleared
>>>> - pass xfstest over ublk-loop
>>>
>>> Hello Jens and Guys,
>>>
>>> I have been working on io_uring zero copy support for ublk/fuse for a while, and
>>> I appreciate you may share any thoughts on this patchset or approach?
>>
>> I'm a bit split on this one, as I really like (and want) the feature.
>> ublk has become popular pretty quickly, and it makes a LOT of sense to
>> support zero copy for it. At the same time, I'm not really a huge fan of
>> the fused commands... They seem too specialized to be useful for other
>> things, and it'd be a shame to do something like that only for it later
>> to be replaced by a generic solution. And then we're stuck with
>> supporting fused commands forever, not sure I like that prospect.
>>
>> Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
>> worth spending a bit more time on figuring out if splice can help us
>> here. David Howells currently has a lot going on in that area too.
>
> IMO, splice(->splice_read()) can help much less in this use case, and
> I can't see improvement David Howells has done in this area:
Let me correct a misunderstanding I've seen a couple of times
from people. Apart from the general idea of providing buffers, it's
not that bound to splice. Yes, I reused splicing guts for that
half-made POC, but we can add a new callback that would do it a
bit nicer, i.e. better consolidating returned buffers. Would
probably be even better to have both of them falling back to
splice so it can cover more cases. The core of it is mediating
buffers through io_uring's registered buffer table, which
decouples all the components from each other.
> 1) we need to pass reference of the whole buffer from driver to io_uring,
> which is missed in splice, which just deals with page reference; for
> passing whole buffer reference, we have to apply per buffer pipe to
> solve the problem, and this way is expensive since the pipe can't
> be freed until all buffers are consumed.
>
> 2) reference can't outlive the whole buffer, and splice still misses
> mechanism to provide such guarantee; splice can just make sure that
> page won't be gone if page reference is grabbed, but here we care
> more the whole buffer & its (shared)references lifetime
>
> 3) current ->splice_read() misses capability to provide writeable
> reference to spliced page[2]; either we have to pass new flags
> to ->splice_read() or passing back new pipe buf flags, unfortunately
> Linus thought it isn't good to extend pipe/splice for such purpose,
> and now I agree with Linus now.
It might be a non-workable option if we're thinking about splice(2)
and pipes, but pipes and ->splice_read() are just internal details,
an execution mechanism, and it's hidden from the userspace.
I guess someone might make a point that we don't want any changes
to the splice code even if it doesn't affect splice(2) userspace
users, but that's rather a part of development process.
> I believe that Pavel has realized this point[3] too, and here the only
> of value of using pipe is to reuse ->splice_read(), however, the above
> points show that ->splice_read() isn't good at this purpose.
But agree that, ->splice_read() doesn't support the revers
direction, i.e. a file (e.g. ublk) provides buffers for
someone to write into it, that would need to be extended
in some way.
> [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
Oops, missed this one
> [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
Miklos said that it's better to signal the owner of buffer about
completion, IIUC the way I was proposing, i.e. calling ->release
when io_uring removes the buffer and all io_uring requests using
it complete, should do exactly that.
> [3] https://lore.kernel.org/linux-block/[email protected]/
>
>>
>> So while I'd love to see this feature get queued up right now, I also
>> don't want to prematurely do so. Can we split out the fixes from this
>> series into a separate series that we can queue up now? That would also
>> help shrink the patchset, which is always a win for review.
>
> There is only one fix(patch 5), and the real part is actually the 1st 4
> patches.
>
> I will separate patch 5 from the whole patchset and send out soon, and will
> post out this patchset v4 by improving document for explaining how fused
> command solves this problem in one safe & efficient way.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 16:51 ` Pavel Begunkov
@ 2023-03-18 23:42 ` Ming Lei
2023-03-19 0:17 ` Ming Lei
2023-03-28 10:55 ` Pavel Begunkov
0 siblings, 2 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 23:42 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, ming.lei
On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
> On 3/18/23 13:35, Ming Lei wrote:
> > Hi Jens,
> >
> > Thanks for the response!
> >
> > On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> > > On 3/17/23 2:14?AM, Ming Lei wrote:
> > > > On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> > > > > Hello,
> > > > >
> > > > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > > > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > > > > and its ->issue() can retrieve/import buffer from master request's
> > > > > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > > > > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > > > > submits slave OP just like normal OP issued from userspace, that said,
> > > > > SQE order is kept, and batching handling is done too.
> > > > >
> > > > > Please see detailed design in commit log of the 2th patch, and one big
> > > > > point is how to handle buffer ownership.
> > > > >
> > > > > With this way, it is easy to support zero copy for ublk/fuse device.
> > > > >
> > > > > Basically userspace can specify any sub-buffer of the ublk block request
> > > > > buffer from the fused command just by setting 'offset/len'
> > > > > in the slave SQE for running slave OP. This way is flexible to implement
> > > > > io mapping: mirror, stripped, ...
> > > > >
> > > > > The 3th & 4th patches enable fused slave support for the following OPs:
> > > > >
> > > > > OP_READ/OP_WRITE
> > > > > OP_SEND/OP_RECV/OP_SEND_ZC
> > > > >
> > > > > The other ublk patches cleans ublk driver and implement fused command
> > > > > for supporting zero copy.
> > > > >
> > > > > Follows userspace code:
> > > > >
> > > > > https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
> > > > >
> > > > > All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
> > > > >
> > > > > ublk add -t [loop|nbd|qcow2] -z ....
> > > > >
> > > > > Basic fs mount/kernel building and builtin test are done, and also not
> > > > > observe regression on xfstest test over ublk-loop with zero copy.
> > > > >
> > > > > Also add liburing test case for covering fused command based on miniublk
> > > > > of blktest:
> > > > >
> > > > > https://github.com/ming1/liburing/commits/fused_cmd_miniublk
> > > > >
> > > > > Performance improvement is obvious on memory bandwidth
> > > > > related workloads, such as, 1~2X improvement on 64K/512K BS
> > > > > IO test on loop with ramfs backing file.
> > > > >
> > > > > Any comments are welcome!
> > > > >
> > > > > V3:
> > > > > - fix build warning reported by kernel test robot
> > > > > - drop patch for checking fused flags on existed drivers with
> > > > > ->uring_command(), which isn't necessary, since we do not do that
> > > > > when adding new ioctl or uring command
> > > > > - inline io_init_rq() for core code, so just export io_init_slave_req
> > > > > - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> > > > > will be cleared
> > > > > - pass xfstest over ublk-loop
> > > >
> > > > Hello Jens and Guys,
> > > >
> > > > I have been working on io_uring zero copy support for ublk/fuse for a while, and
> > > > I appreciate you may share any thoughts on this patchset or approach?
> > >
> > > I'm a bit split on this one, as I really like (and want) the feature.
> > > ublk has become popular pretty quickly, and it makes a LOT of sense to
> > > support zero copy for it. At the same time, I'm not really a huge fan of
> > > the fused commands... They seem too specialized to be useful for other
> > > things, and it'd be a shame to do something like that only for it later
> > > to be replaced by a generic solution. And then we're stuck with
> > > supporting fused commands forever, not sure I like that prospect.
> > >
> > > Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
> > > worth spending a bit more time on figuring out if splice can help us
> > > here. David Howells currently has a lot going on in that area too.
> >
> > IMO, splice(->splice_read()) can help much less in this use case, and
> > I can't see improvement David Howells has done in this area:
>
> Let me correct a misunderstanding I've seen a couple of times
> from people. Apart from the general idea of providing buffers, it's
> not that bound to splice. Yes, I reused splicing guts for that
> half-made POC, but we can add a new callback that would do it a
> bit nicer, i.e. better consolidating returned buffers. Would
->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
> probably be even better to have both of them falling back to
> splice so it can cover more cases. The core of it is mediating
> buffers through io_uring's registered buffer table, which
> decouples all the components from each other.
For using pipe buffer's ->release() to release the whole buffer's
reference, you have to allocate one pipe for each fixed buffer, and add
pipe buffer to it, and keep each pipe buffer into the pipe
until it is consumed, since ->release() needs to be called when
unregistering buffer(all IOs are completed)
It(allocating/free pipe node, and populating it with each page) is
really inefficient for handling one single IO.
So re-using splice for this purpose is still bad not mention splice
can't support writeable spliced page.
Wiring device io buffer with context registered buffer table looks like
another approach, however:
1) two uring command OPs for registering/unregistering this buffer in io fast
path has to be added since only userspace can know when buffer(reference)
isn't needed
2) userspace becomes more complicated, 3+ OPs are required for handling one
single device IO
3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
for unregistering buffer
4) here the case is totally different with io_mapped_ubuf which isn't
related to any specific file, and just belong to io_uring context; however,
the device io buffer belongs to device(file) actually, so in theory it is wrong
to put it into context's registered buffer table, and supposed to put into
per-file buffer table which isn't supported by io_uring, or it becomes hard to
implement multiple-device io buffer in single context since 'file + buffer key'
has to be used to retrieve this buffer, probably xarray has to be
relied, but
- here the index is (file, buffer key) if the table is per-context, current
xarray only supports index with type of 'unsigned long', so looks not doable
- or per-file xarray has to be used, then the implementation becomes more complicated
- write to xarray has to be done two times in fast io path, so another factor which
hurts performance.
>
> > 1) we need to pass reference of the whole buffer from driver to io_uring,
> > which is missed in splice, which just deals with page reference; for
> > passing whole buffer reference, we have to apply per buffer pipe to
> > solve the problem, and this way is expensive since the pipe can't
> > be freed until all buffers are consumed.
> >
> > 2) reference can't outlive the whole buffer, and splice still misses
> > mechanism to provide such guarantee; splice can just make sure that
> > page won't be gone if page reference is grabbed, but here we care
> > more the whole buffer & its (shared)references lifetime
> >
> > 3) current ->splice_read() misses capability to provide writeable
> > reference to spliced page[2]; either we have to pass new flags
> > to ->splice_read() or passing back new pipe buf flags, unfortunately
> > Linus thought it isn't good to extend pipe/splice for such purpose,
> > and now I agree with Linus now.
>
> It might be a non-workable option if we're thinking about splice(2)
> and pipes, but pipes and ->splice_read() are just internal details,
> an execution mechanism, and it's hidden from the userspace.
both pipe and ->splice_read() are really exposed to userspace, and are
used in other non-io_uring situations, so any change can not break
existed splice/pipe usage, maybe I misunderstand your point?
>
> I guess someone might make a point that we don't want any changes
> to the splice code even if it doesn't affect splice(2) userspace
> users, but that's rather a part of development process.
> > I believe that Pavel has realized this point[3] too, and here the only
> > of value of using pipe is to reuse ->splice_read(), however, the above
> > points show that ->splice_read() isn't good at this purpose.
>
> But agree that, ->splice_read() doesn't support the revers
> direction, i.e. a file (e.g. ublk) provides buffers for
> someone to write into it, that would need to be extended
> in some way.
Linus has objected[1] explicitly to extend it in this way:
There's no point trying to deal with "if unexpectedly doing crazy
things". If a sink writes the data, the sinkm is so unbelievably buggy
that it's not even funny.
[1] https://lore.kernel.org/linux-block/CAHk-=wgJsi7t7YYpuo6ewXGnHz2nmj67iWR6KPGoz5TBu34mWQ@mail.gmail.com/
That is also the reason why fuse can only support write zero copy via splice
for 10+ years.
>
> > [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
>
> Oops, missed this one
>
> > [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
>
> Miklos said that it's better to signal the owner of buffer about
> completion, IIUC the way I was proposing, i.e. calling ->release
> when io_uring removes the buffer and all io_uring requests using
> it complete, should do exactly that.
->release() just for acking the page consumption, what the ublk needs is
to drop the whole buffer(represented by bvec) reference when the buffer isn't
used by normal OPs, actually similar with fuse's case, because buffer
reference can't outlive the buffer itself(repesented by bvec).
Yeah, probably releasing whole buffer reference can be done by ->release() in
very complicated way, but the whole pipe & pipe buffer has to be kept in
the whole IO lifetime for calling each pipe buffer's ->release(), so you have to
allocate one pipe when registering this buffer, and release it when un-registering
it. Much less efficient.
In short, splice can't help us for meeting ublk/fuse requirement.
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 23:42 ` Ming Lei
@ 2023-03-19 0:17 ` Ming Lei
2023-03-28 10:55 ` Pavel Begunkov
1 sibling, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-19 0:17 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, ming.lei
On Sun, Mar 19, 2023 at 07:42:26AM +0800, Ming Lei wrote:
> On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
> > On 3/18/23 13:35, Ming Lei wrote:
> > > Hi Jens,
> > >
> > > Thanks for the response!
> > >
> > > On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> > > > On 3/17/23 2:14?AM, Ming Lei wrote:
> > > > > On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> > > > > > Hello,
> > > > > >
> > > > > > Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> > > > > > be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> > > > > > 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> > > > > > to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> > > > > > and its ->issue() can retrieve/import buffer from master request's
> > > > > > fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> > > > > > this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> > > > > > submits slave OP just like normal OP issued from userspace, that said,
> > > > > > SQE order is kept, and batching handling is done too.
> > > > > >
> > > > > > Please see detailed design in commit log of the 2th patch, and one big
> > > > > > point is how to handle buffer ownership.
> > > > > >
> > > > > > With this way, it is easy to support zero copy for ublk/fuse device.
> > > > > >
> > > > > > Basically userspace can specify any sub-buffer of the ublk block request
> > > > > > buffer from the fused command just by setting 'offset/len'
> > > > > > in the slave SQE for running slave OP. This way is flexible to implement
> > > > > > io mapping: mirror, stripped, ...
> > > > > >
> > > > > > The 3th & 4th patches enable fused slave support for the following OPs:
> > > > > >
> > > > > > OP_READ/OP_WRITE
> > > > > > OP_SEND/OP_RECV/OP_SEND_ZC
> > > > > >
> > > > > > The other ublk patches cleans ublk driver and implement fused command
> > > > > > for supporting zero copy.
> > > > > >
> > > > > > Follows userspace code:
> > > > > >
> > > > > > https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
> > > > > >
> > > > > > All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
> > > > > >
> > > > > > ublk add -t [loop|nbd|qcow2] -z ....
> > > > > >
> > > > > > Basic fs mount/kernel building and builtin test are done, and also not
> > > > > > observe regression on xfstest test over ublk-loop with zero copy.
> > > > > >
> > > > > > Also add liburing test case for covering fused command based on miniublk
> > > > > > of blktest:
> > > > > >
> > > > > > https://github.com/ming1/liburing/commits/fused_cmd_miniublk
> > > > > >
> > > > > > Performance improvement is obvious on memory bandwidth
> > > > > > related workloads, such as, 1~2X improvement on 64K/512K BS
> > > > > > IO test on loop with ramfs backing file.
> > > > > >
> > > > > > Any comments are welcome!
> > > > > >
> > > > > > V3:
> > > > > > - fix build warning reported by kernel test robot
> > > > > > - drop patch for checking fused flags on existed drivers with
> > > > > > ->uring_command(), which isn't necessary, since we do not do that
> > > > > > when adding new ioctl or uring command
> > > > > > - inline io_init_rq() for core code, so just export io_init_slave_req
> > > > > > - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> > > > > > will be cleared
> > > > > > - pass xfstest over ublk-loop
> > > > >
> > > > > Hello Jens and Guys,
> > > > >
> > > > > I have been working on io_uring zero copy support for ublk/fuse for a while, and
> > > > > I appreciate you may share any thoughts on this patchset or approach?
> > > >
> > > > I'm a bit split on this one, as I really like (and want) the feature.
> > > > ublk has become popular pretty quickly, and it makes a LOT of sense to
> > > > support zero copy for it. At the same time, I'm not really a huge fan of
> > > > the fused commands... They seem too specialized to be useful for other
> > > > things, and it'd be a shame to do something like that only for it later
> > > > to be replaced by a generic solution. And then we're stuck with
> > > > supporting fused commands forever, not sure I like that prospect.
> > > >
> > > > Both Pavel and Xiaoguang voiced similar concerns, and I think it may be
> > > > worth spending a bit more time on figuring out if splice can help us
> > > > here. David Howells currently has a lot going on in that area too.
> > >
> > > IMO, splice(->splice_read()) can help much less in this use case, and
> > > I can't see improvement David Howells has done in this area:
> >
> > Let me correct a misunderstanding I've seen a couple of times
> > from people. Apart from the general idea of providing buffers, it's
> > not that bound to splice. Yes, I reused splicing guts for that
> > half-made POC, but we can add a new callback that would do it a
> > bit nicer, i.e. better consolidating returned buffers. Would
>
> ->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
>
> > probably be even better to have both of them falling back to
> > splice so it can cover more cases. The core of it is mediating
> > buffers through io_uring's registered buffer table, which
> > decouples all the components from each other.
>
> For using pipe buffer's ->release() to release the whole buffer's
> reference, you have to allocate one pipe for each fixed buffer, and add
> pipe buffer to it, and keep each pipe buffer into the pipe
> until it is consumed, since ->release() needs to be called when
> unregistering buffer(all IOs are completed)
>
> It(allocating/free pipe node, and populating it with each page) is
> really inefficient for handling one single IO.
>
> So re-using splice for this purpose is still bad not mention splice
> can't support writeable spliced page.
>
> Wiring device io buffer with context registered buffer table looks like
> another approach, however:
>
> 1) two uring command OPs for registering/unregistering this buffer in io fast
> path has to be added since only userspace can know when buffer(reference)
> isn't needed
>
> 2) userspace becomes more complicated, 3+ OPs are required for handling one
> single device IO
>
> 3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
> we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
> for unregistering buffer
Follows another problem or complexity here:
- normal usage when handling one application(ublk/fuse) IO:
register device io buffer (file, buffer key)
OP1 consumes the buffer reference and submits IO
OP2 consumes the buffer reference and submits IO
...
unregister device io buffer(file, buffer key) after all above OPs are completed
- for avoiding devil userspace(we are allowed for unprivileged user) to consume
buffer after buffer is un-registered, each OP has to grab the buffer(reference)'s
reference or check if the buffer is stale in its io code path; which has to be added
to current OP code path
- so the decoupling purpose may _not_ be supported actually, also the current
fixed buffer interface does not support this kind of buffer retrieving via (xarray, (file, key))
thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 23:42 ` Ming Lei
2023-03-19 0:17 ` Ming Lei
@ 2023-03-28 10:55 ` Pavel Begunkov
2023-03-28 13:01 ` Ming Lei
1 sibling, 1 reply; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-28 10:55 UTC (permalink / raw)
To: Ming Lei
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert
On 3/18/23 23:42, Ming Lei wrote:
> On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
>> On 3/18/23 13:35, Ming Lei wrote:
>>> On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
>>>> On 3/17/23 2:14?AM, Ming Lei wrote:
>>>>> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
[...]
>>> IMO, splice(->splice_read()) can help much less in this use case, and
>>> I can't see improvement David Howells has done in this area:
>>
>> Let me correct a misunderstanding I've seen a couple of times
>> from people. Apart from the general idea of providing buffers, it's
>> not that bound to splice. Yes, I reused splicing guts for that
>> half-made POC, but we can add a new callback that would do it a
>> bit nicer, i.e. better consolidating returned buffers. Would
>
> ->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
>> probably be even better to have both of them falling back to
>> splice so it can cover more cases. The core of it is mediating
>> buffers through io_uring's registered buffer table, which
>> decouples all the components from each other.
>
> For using pipe buffer's ->release() to release the whole buffer's
> reference, you have to allocate one pipe for each fixed buffer, and add
> pipe buffer to it, and keep each pipe buffer into the pipe
> until it is consumed, since ->release() needs to be called when
> unregistering buffer(all IOs are completed)
What I'm saying is that I'm more concerned about the uapi,
whether internally it's ->splice_read(). I think ->splice_read()
has its merit in a hybrid approach, but simplicity let's say for
we don't use it and there is a new f_op callback or it's
it's returned with by cmd requests.
> It(allocating/free pipe node, and populating it with each page) is
> really inefficient for handling one single IO.
It doesn't need pipe node allocation. We'd need to allocate
space for pages, but again, there is a good io_uring infra
for it without any single additional lock taken in most cases.
> So re-using splice for this purpose is still bad not mention splice
> can't support writeable spliced page.
>
> Wiring device io buffer with context registered buffer table looks like
> another approach, however:
>
> 1) two uring command OPs for registering/unregistering this buffer in io fast
> path has to be added since only userspace can know when buffer(reference)
> isn't needed
Yes, that's a good point. Registration replaces fuse master cmd, so it's
one extra request for unregister, which might be fine.
> 2) userspace becomes more complicated, 3+ OPs are required for handling one
> single device IO
>
> 3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
> we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
> for unregistering buffer
It should not necessarily be a file.
> 4) here the case is totally different with io_mapped_ubuf which isn't
> related to any specific file, and just belong to io_uring context; however,
> the device io buffer belongs to device(file) actually, so in theory it is wrong
> to put it into context's registered buffer table, and supposed to put into
Not at all, it doesn't belong to io_uring but rather to the user space,
without a file, right, but io_uring still only borrowing it.
As for keeping files, I predict that it'll be there anyway in some time,
some p2pdma experiments, dma preregistration, all required having a file
attached to the buffer.
> per-file buffer table which isn't supported by io_uring, or it becomes hard to
> implement multiple-device io buffer in single context since 'file + buffer key'
> has to be used to retrieve this buffer, probably xarray has to be
> relied, but
I was proposing to give slot selection to the userspace, perhaps with
optional auto index allocation as it's done with registered files.
> - here the index is (file, buffer key) if the table is per-context, current
> xarray only supports index with type of 'unsigned long', so looks not doable
> - or per-file xarray has to be used, then the implementation becomes more complicated
> - write to xarray has to be done two times in fast io path, so another factor which
> hurts performance.
>
>>
>>> 1) we need to pass reference of the whole buffer from driver to io_uring,
>>> which is missed in splice, which just deals with page reference; for
>>> passing whole buffer reference, we have to apply per buffer pipe to
>>> solve the problem, and this way is expensive since the pipe can't
>>> be freed until all buffers are consumed.
>>>
>>> 2) reference can't outlive the whole buffer, and splice still misses
>>> mechanism to provide such guarantee; splice can just make sure that
>>> page won't be gone if page reference is grabbed, but here we care
>>> more the whole buffer & its (shared)references lifetime
>>>
>>> 3) current ->splice_read() misses capability to provide writeable
>>> reference to spliced page[2]; either we have to pass new flags
>>> to ->splice_read() or passing back new pipe buf flags, unfortunately
>>> Linus thought it isn't good to extend pipe/splice for such purpose,
>>> and now I agree with Linus now.
>>
>> It might be a non-workable option if we're thinking about splice(2)
>> and pipes, but pipes and ->splice_read() are just internal details,
>> an execution mechanism, and it's hidden from the userspace.
>
> both pipe and ->splice_read() are really exposed to userspace, and are
> used in other non-io_uring situations, so any change can not break
> existed splice/pipe usage, maybe I misunderstand your point?
Oh, I meant reusing some of splice bits but not changing splice(2).
E.g. a kernel internal flag which is not allowed to be passed into
splice(2).
>> I guess someone might make a point that we don't want any changes
>> to the splice code even if it doesn't affect splice(2) userspace
>> users, but that's rather a part of development process.
>>> I believe that Pavel has realized this point[3] too, and here the only
>>> of value of using pipe is to reuse ->splice_read(), however, the above
>>> points show that ->splice_read() isn't good at this purpose.
>>
>> But agree that, ->splice_read() doesn't support the revers
>> direction, i.e. a file (e.g. ublk) provides buffers for
>> someone to write into it, that would need to be extended
>> in some way.
>
> Linus has objected[1] explicitly to extend it in this way:
>
> There's no point trying to deal with "if unexpectedly doing crazy
> things". If a sink writes the data, the sinkm is so unbelievably buggy
> that it's not even funny.
As far as I can see, Linus doesn't like there that the semantics
is not clear. "sink writes data" and writing to pages provided
by ->splice_read() don't sound right indeed.
I might be wrong but it appears that the semantics was ublk
lending an "empty" buffer to another file, which will fill it
in and return back the data by calling some sort of ->release
callback, then ublk consumes the data.
> [1] https://lore.kernel.org/linux-block/CAHk-=wgJsi7t7YYpuo6ewXGnHz2nmj67iWR6KPGoz5TBu34mWQ@mail.gmail.com/
>
> That is also the reason why fuse can only support write zero copy via splice
> for 10+ years.
>
>>
>>> [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
>>
>> Oops, missed this one
>>
>>> [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
>>
>> Miklos said that it's better to signal the owner of buffer about
>> completion, IIUC the way I was proposing, i.e. calling ->release
>> when io_uring removes the buffer and all io_uring requests using
>> it complete, should do exactly that.
>
> ->release() just for acking the page consumption, what the ublk needs is
> to drop the whole buffer(represented by bvec) reference when the buffer isn't
> used by normal OPs, actually similar with fuse's case, because buffer
> reference can't outlive the buffer itself(repesented by bvec).
>
> Yeah, probably releasing whole buffer reference can be done by ->release() in
> very complicated way, but the whole pipe & pipe buffer has to be kept in
> the whole IO lifetime for calling each pipe buffer's ->release(), so you have to
> allocate one pipe when registering this buffer, and release it when un-registering
> it. Much less efficient.
As per noted above, We don't necessarily have to stick with splice_read()
and pipe callbacks.
>
> In short, splice can't help us for meeting ublk/fuse requirement.
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-28 10:55 ` Pavel Begunkov
@ 2023-03-28 13:01 ` Ming Lei
2023-03-29 6:59 ` Ziyang Zhang
2023-03-29 10:43 ` Pavel Begunkov
0 siblings, 2 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-28 13:01 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, ming.lei
On Tue, Mar 28, 2023 at 11:55:38AM +0100, Pavel Begunkov wrote:
> On 3/18/23 23:42, Ming Lei wrote:
> > On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
> > > On 3/18/23 13:35, Ming Lei wrote:
> > > > On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> > > > > On 3/17/23 2:14?AM, Ming Lei wrote:
> > > > > > On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> [...]
> > > > IMO, splice(->splice_read()) can help much less in this use case, and
> > > > I can't see improvement David Howells has done in this area:
> > >
> > > Let me correct a misunderstanding I've seen a couple of times
> > > from people. Apart from the general idea of providing buffers, it's
> > > not that bound to splice. Yes, I reused splicing guts for that
> > > half-made POC, but we can add a new callback that would do it a
> > > bit nicer, i.e. better consolidating returned buffers. Would
> >
> > ->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
> > > probably be even better to have both of them falling back to
> > > splice so it can cover more cases. The core of it is mediating
> > > buffers through io_uring's registered buffer table, which
> > > decouples all the components from each other.
> >
> > For using pipe buffer's ->release() to release the whole buffer's
> > reference, you have to allocate one pipe for each fixed buffer, and add
> > pipe buffer to it, and keep each pipe buffer into the pipe
> > until it is consumed, since ->release() needs to be called when
> > unregistering buffer(all IOs are completed)
>
> What I'm saying is that I'm more concerned about the uapi,
> whether internally it's ->splice_read(). I think ->splice_read()
> has its merit in a hybrid approach, but simplicity let's say for
> we don't use it and there is a new f_op callback or it's
> it's returned with by cmd requests.
OK, then forget splice if you add new callback, isn't that what this
patchset(just reuse ->uring_cmd()) is doing?
>
> > It(allocating/free pipe node, and populating it with each page) is
> > really inefficient for handling one single IO.
>
> It doesn't need pipe node allocation. We'd need to allocate
> space for pages, but again, there is a good io_uring infra
> for it without any single additional lock taken in most cases.
Then it is same with this patchset.
>
>
> > So re-using splice for this purpose is still bad not mention splice
> > can't support writeable spliced page.
> >
> > Wiring device io buffer with context registered buffer table looks like
> > another approach, however:
> >
> > 1) two uring command OPs for registering/unregistering this buffer in io fast
> > path has to be added since only userspace can know when buffer(reference)
> > isn't needed
>
> Yes, that's a good point. Registration replaces fuse master cmd, so it's
> one extra request for unregister, which might be fine.
Unfortunately I don't think this way is good, the problem is that buffer
only has physical pages, and doesn't have userspace mapping, so why bother
to export it to userspace?
As I replied to Ziyang, the current fused command can be extended to
this way easily, but I don't know why we need to use the buffer registration,
given userspace can't read/write the buffer, and fused command can cover
it just fine.
>
> > 2) userspace becomes more complicated, 3+ OPs are required for handling one
> > single device IO
> >
> > 3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
> > we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
> > for unregistering buffer
>
> It should not necessarily be a file.
At least in ublk's case, from io_uring viewpoint, the buffer is owned by
ublk device, so we need the device node or file for releasing the
buffer.
>
> > 4) here the case is totally different with io_mapped_ubuf which isn't
> > related to any specific file, and just belong to io_uring context; however,
> > the device io buffer belongs to device(file) actually, so in theory it is wrong
> > to put it into context's registered buffer table, and supposed to put into
>
> Not at all, it doesn't belong to io_uring but rather to the user space,
> without a file, right, but io_uring still only borrowing it.
How can one such buffer be owned by userspace? What if the userspace is
killed? If you think userspace can grab the buffer reference, that still
needs userspace to release the buffer, but that is unreliable, and
io_uring has to cover the buffer cleanup in case of userspace exit abnormally.
Because buffer lifetime is crossing multiple OPs if you implement buffer
register/unregister OPs. And there isn't such issue for fused command
which has same lifetime with the buffer.
>
> As for keeping files, I predict that it'll be there anyway in some time,
> some p2pdma experiments, dma preregistration, all required having a file
> attached to the buffer.
>
> > per-file buffer table which isn't supported by io_uring, or it becomes hard to
> > implement multiple-device io buffer in single context since 'file + buffer key'
> > has to be used to retrieve this buffer, probably xarray has to be
> > relied, but
>
> I was proposing to give slot selection to the userspace, perhaps with
> optional auto index allocation as it's done with registered files.
As I mentioned above, it doesn't make sense to export buffer to
userspace which can't touch any data of the buffer at all.
>
> > - here the index is (file, buffer key) if the table is per-context, current
> > xarray only supports index with type of 'unsigned long', so looks not doable
> > - or per-file xarray has to be used, then the implementation becomes more complicated
> > - write to xarray has to be done two times in fast io path, so another factor which
> > hurts performance.
> >
> > >
> > > > 1) we need to pass reference of the whole buffer from driver to io_uring,
> > > > which is missed in splice, which just deals with page reference; for
> > > > passing whole buffer reference, we have to apply per buffer pipe to
> > > > solve the problem, and this way is expensive since the pipe can't
> > > > be freed until all buffers are consumed.
> > > >
> > > > 2) reference can't outlive the whole buffer, and splice still misses
> > > > mechanism to provide such guarantee; splice can just make sure that
> > > > page won't be gone if page reference is grabbed, but here we care
> > > > more the whole buffer & its (shared)references lifetime
> > > >
> > > > 3) current ->splice_read() misses capability to provide writeable
> > > > reference to spliced page[2]; either we have to pass new flags
> > > > to ->splice_read() or passing back new pipe buf flags, unfortunately
> > > > Linus thought it isn't good to extend pipe/splice for such purpose,
> > > > and now I agree with Linus now.
> > >
> > > It might be a non-workable option if we're thinking about splice(2)
> > > and pipes, but pipes and ->splice_read() are just internal details,
> > > an execution mechanism, and it's hidden from the userspace.
> >
> > both pipe and ->splice_read() are really exposed to userspace, and are
> > used in other non-io_uring situations, so any change can not break
> > existed splice/pipe usage, maybe I misunderstand your point?
>
> Oh, I meant reusing some of splice bits but not changing splice(2).
> E.g. a kernel internal flag which is not allowed to be passed into
> splice(2).
>
>
> > > I guess someone might make a point that we don't want any changes
> > > to the splice code even if it doesn't affect splice(2) userspace
> > > users, but that's rather a part of development process.
> > > > I believe that Pavel has realized this point[3] too, and here the only
> > > > of value of using pipe is to reuse ->splice_read(), however, the above
> > > > points show that ->splice_read() isn't good at this purpose.
> > >
> > > But agree that, ->splice_read() doesn't support the revers
> > > direction, i.e. a file (e.g. ublk) provides buffers for
> > > someone to write into it, that would need to be extended
> > > in some way.
> >
> > Linus has objected[1] explicitly to extend it in this way:
> >
> > There's no point trying to deal with "if unexpectedly doing crazy
> > things". If a sink writes the data, the sinkm is so unbelievably buggy
> > that it's not even funny.
>
> As far as I can see, Linus doesn't like there that the semantics
> is not clear. "sink writes data" and writing to pages provided
> by ->splice_read() don't sound right indeed.
>
> I might be wrong but it appears that the semantics was ublk
> lending an "empty" buffer to another file, which will fill it
> in and return back the data by calling some sort of ->release
> callback, then ublk consumes the data.
Yes, that is exactly what fused command is doing.
>
>
> > [1] https://lore.kernel.org/linux-block/CAHk-=wgJsi7t7YYpuo6ewXGnHz2nmj67iWR6KPGoz5TBu34mWQ@mail.gmail.com/
> >
> > That is also the reason why fuse can only support write zero copy via splice
> > for 10+ years.
> >
> > >
> > > > [1] https://lore.kernel.org/linux-block/ZAk5%[email protected]/
> > >
> > > Oops, missed this one
> > >
> > > > [2] https://lore.kernel.org/linux-block/CAJfpeguQ3xn2-6svkkVXJ88tiVfcDd-eKi1evzzfvu305fMoyw@mail.gmail.com/
> > >
> > > Miklos said that it's better to signal the owner of buffer about
> > > completion, IIUC the way I was proposing, i.e. calling ->release
> > > when io_uring removes the buffer and all io_uring requests using
> > > it complete, should do exactly that.
> >
> > ->release() just for acking the page consumption, what the ublk needs is
> > to drop the whole buffer(represented by bvec) reference when the buffer isn't
> > used by normal OPs, actually similar with fuse's case, because buffer
> > reference can't outlive the buffer itself(repesented by bvec).
> >
> > Yeah, probably releasing whole buffer reference can be done by ->release() in
> > very complicated way, but the whole pipe & pipe buffer has to be kept in
> > the whole IO lifetime for calling each pipe buffer's ->release(), so you have to
> > allocate one pipe when registering this buffer, and release it when un-registering
> > it. Much less efficient.
>
> As per noted above, We don't necessarily have to stick with splice_read()
> and pipe callbacks.
As I mentioned, it is basically what fused command is doing.
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-28 13:01 ` Ming Lei
@ 2023-03-29 6:59 ` Ziyang Zhang
2023-03-29 10:43 ` Pavel Begunkov
1 sibling, 0 replies; 49+ messages in thread
From: Ziyang Zhang @ 2023-03-29 6:59 UTC (permalink / raw)
To: Ming Lei, Pavel Begunkov
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, Xiaoguang Wang,
Bernd Schubert
On 2023/3/28 21:01, Ming Lei wrote:
[...]
>>
>>
>>> So re-using splice for this purpose is still bad not mention splice
>>> can't support writeable spliced page.
>>>
>>> Wiring device io buffer with context registered buffer table looks like
>>> another approach, however:
>>>
>>> 1) two uring command OPs for registering/unregistering this buffer in io fast
>>> path has to be added since only userspace can know when buffer(reference)
>>> isn't needed
>>
>> Yes, that's a good point. Registration replaces fuse master cmd, so it's
>> one extra request for unregister, which might be fine.
>
> Unfortunately I don't think this way is good, the problem is that buffer
> only has physical pages, and doesn't have userspace mapping, so why bother
> to export it to userspace?
>
> As I replied to Ziyang, the current fused command can be extended to
> this way easily, but I don't know why we need to use the buffer registration,
> given userspace can't read/write the buffer, and fused command can cover
> it just fine.
>
Hi Ming, I have replied to you in another email.
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-28 13:01 ` Ming Lei
2023-03-29 6:59 ` Ziyang Zhang
@ 2023-03-29 10:43 ` Pavel Begunkov
2023-03-29 11:55 ` Ming Lei
1 sibling, 1 reply; 49+ messages in thread
From: Pavel Begunkov @ 2023-03-29 10:43 UTC (permalink / raw)
To: Ming Lei
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert
On 3/28/23 14:01, Ming Lei wrote:
> On Tue, Mar 28, 2023 at 11:55:38AM +0100, Pavel Begunkov wrote:
>> On 3/18/23 23:42, Ming Lei wrote:
>>> On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
>>>> On 3/18/23 13:35, Ming Lei wrote:
>>>>> On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
>>>>>> On 3/17/23 2:14?AM, Ming Lei wrote:
>>>>>>> On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
>> [...]
>>>>> IMO, splice(->splice_read()) can help much less in this use case, and
>>>>> I can't see improvement David Howells has done in this area:
>>>>
>>>> Let me correct a misunderstanding I've seen a couple of times
>>>> from people. Apart from the general idea of providing buffers, it's
>>>> not that bound to splice. Yes, I reused splicing guts for that
>>>> half-made POC, but we can add a new callback that would do it a
>>>> bit nicer, i.e. better consolidating returned buffers. Would
>>>
>>> ->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
>>>> probably be even better to have both of them falling back to
>>>> splice so it can cover more cases. The core of it is mediating
>>>> buffers through io_uring's registered buffer table, which
>>>> decouples all the components from each other.
>>>
>>> For using pipe buffer's ->release() to release the whole buffer's
>>> reference, you have to allocate one pipe for each fixed buffer, and add
>>> pipe buffer to it, and keep each pipe buffer into the pipe
>>> until it is consumed, since ->release() needs to be called when
>>> unregistering buffer(all IOs are completed)
>>
>> What I'm saying is that I'm more concerned about the uapi,
>> whether internally it's ->splice_read(). I think ->splice_read()
>> has its merit in a hybrid approach, but simplicity let's say for
>> we don't use it and there is a new f_op callback or it's
>> it's returned with by cmd requests.
>
> OK, then forget splice if you add new callback, isn't that what this
> patchset(just reuse ->uring_cmd()) is doing?
It certainly similar in many aspects! And it's also similar to
splicing with pipes, just instead of pipes there is io_uring and,
of course, semantics changes. The idea is to decouple requests from
each other with a different uapi.
>>> It(allocating/free pipe node, and populating it with each page) is
>>> really inefficient for handling one single IO.
>>
>> It doesn't need pipe node allocation. We'd need to allocate
>> space for pages, but again, there is a good io_uring infra
>> for it without any single additional lock taken in most cases.
>
> Then it is same with this patchset.
>
>>
>>
>>> So re-using splice for this purpose is still bad not mention splice
>>> can't support writeable spliced page.
>>>
>>> Wiring device io buffer with context registered buffer table looks like
>>> another approach, however:
>>>
>>> 1) two uring command OPs for registering/unregistering this buffer in io fast
>>> path has to be added since only userspace can know when buffer(reference)
>>> isn't needed
>>
>> Yes, that's a good point. Registration replaces fuse master cmd, so it's
>> one extra request for unregister, which might be fine.
>
> Unfortunately I don't think this way is good, the problem is that buffer
> only has physical pages, and doesn't have userspace mapping, so why bother
> to export it to userspace?
>
> As I replied to Ziyang, the current fused command can be extended to
> this way easily, but I don't know why we need to use the buffer registration,
> given userspace can't read/write the buffer, and fused command can cover
> it just fine.
I probably mentioned it before, but that's where we need a new memcpy
io_uring request type, to partially copy it, e.g. headers. I think people
mentioned memcpy before in general, and it will also be used for DMA driven
copies if Keith returns back to experiments.
Apart from it and things like broadcasting, sending different chunks to
different places and so, there is a typical problem what to do when the
second operation fails but the data has already been received, mostly
relevant to sockets / streams.
>>> 2) userspace becomes more complicated, 3+ OPs are required for handling one
>>> single device IO
>>>
>>> 3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
>>> we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
>>> for unregistering buffer
>>
>> It should not necessarily be a file.
>
> At least in ublk's case, from io_uring viewpoint, the buffer is owned by
> ublk device, so we need the device node or file for releasing the
> buffer.
For example, io_uring has a lightweight way to pin the context
(pcpu refcount). I haven't looked into ublk code, it's hard for
me to argue about it.
>>> 4) here the case is totally different with io_mapped_ubuf which isn't
>>> related to any specific file, and just belong to io_uring context; however,
>>> the device io buffer belongs to device(file) actually, so in theory it is wrong
>>> to put it into context's registered buffer table, and supposed to put into
>>
>> Not at all, it doesn't belong to io_uring but rather to the user space,
>> without a file, right, but io_uring still only borrowing it.
>
> How can one such buffer be owned by userspace? What if the userspace is
> killed? If you think userspace can grab the buffer reference, that still
> needs userspace to release the buffer, but that is unreliable, and
> io_uring has to cover the buffer cleanup in case of userspace exit abnormally.
Conceptually userspace owns buffers and io_uring is share / borrowing it.
Probably, I misunderstood and you was talking about refcounting or something
else. Can you elaborate? As for references, io_uring pins normal buffers
and so holds additional refs.
> Because buffer lifetime is crossing multiple OPs if you implement buffer
> register/unregister OPs. And there isn't such issue for fused command
> which has same lifetime with the buffer.
>
>>
>> As for keeping files, I predict that it'll be there anyway in some time,
>> some p2pdma experiments, dma preregistration, all required having a file
>> attached to the buffer.
>>
>>> per-file buffer table which isn't supported by io_uring, or it becomes hard to
>>> implement multiple-device io buffer in single context since 'file + buffer key'
>>> has to be used to retrieve this buffer, probably xarray has to be
>>> relied, but
>>
>> I was proposing to give slot selection to the userspace, perhaps with
>> optional auto index allocation as it's done with registered files.
>
> As I mentioned above, it doesn't make sense to export buffer to
> userspace which can't touch any data of the buffer at all.
replied above.
>>> - here the index is (file, buffer key) if the table is per-context, current
>>> xarray only supports index with type of 'unsigned long', so looks not doable
>>> - or per-file xarray has to be used, then the implementation becomes more complicated
>>> - write to xarray has to be done two times in fast io path, so another factor which
>>> hurts performance.
>>>
>>>>
[...]
--
Pavel Begunkov
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-29 10:43 ` Pavel Begunkov
@ 2023-03-29 11:55 ` Ming Lei
0 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-29 11:55 UTC (permalink / raw)
To: Pavel Begunkov
Cc: Jens Axboe, io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, ming.lei
On Wed, Mar 29, 2023 at 11:43:41AM +0100, Pavel Begunkov wrote:
> On 3/28/23 14:01, Ming Lei wrote:
> > On Tue, Mar 28, 2023 at 11:55:38AM +0100, Pavel Begunkov wrote:
> > > On 3/18/23 23:42, Ming Lei wrote:
> > > > On Sat, Mar 18, 2023 at 04:51:14PM +0000, Pavel Begunkov wrote:
> > > > > On 3/18/23 13:35, Ming Lei wrote:
> > > > > > On Sat, Mar 18, 2023 at 06:59:41AM -0600, Jens Axboe wrote:
> > > > > > > On 3/17/23 2:14?AM, Ming Lei wrote:
> > > > > > > > On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> > > [...]
> > > > > > IMO, splice(->splice_read()) can help much less in this use case, and
> > > > > > I can't see improvement David Howells has done in this area:
> > > > >
> > > > > Let me correct a misunderstanding I've seen a couple of times
> > > > > from people. Apart from the general idea of providing buffers, it's
> > > > > not that bound to splice. Yes, I reused splicing guts for that
> > > > > half-made POC, but we can add a new callback that would do it a
> > > > > bit nicer, i.e. better consolidating returned buffers. Would
> > > >
> > > > ->release() is for releasing pipe-buffer(page), instead of the whole buffer(reference).
> > > > > probably be even better to have both of them falling back to
> > > > > splice so it can cover more cases. The core of it is mediating
> > > > > buffers through io_uring's registered buffer table, which
> > > > > decouples all the components from each other.
> > > >
> > > > For using pipe buffer's ->release() to release the whole buffer's
> > > > reference, you have to allocate one pipe for each fixed buffer, and add
> > > > pipe buffer to it, and keep each pipe buffer into the pipe
> > > > until it is consumed, since ->release() needs to be called when
> > > > unregistering buffer(all IOs are completed)
> > >
> > > What I'm saying is that I'm more concerned about the uapi,
> > > whether internally it's ->splice_read(). I think ->splice_read()
> > > has its merit in a hybrid approach, but simplicity let's say for
> > > we don't use it and there is a new f_op callback or it's
> > > it's returned with by cmd requests.
> >
> > OK, then forget splice if you add new callback, isn't that what this
> > patchset(just reuse ->uring_cmd()) is doing?
>
> It certainly similar in many aspects! And it's also similar to
> splicing with pipes, just instead of pipes there is io_uring and,
It is definitely different with pipe/splice, which works on page lifetime, but
here we need to focus on the whole buffer lifetime.
> of course, semantics changes. The idea is to decouple requests from
> each other with a different uapi.
The only difference is that buffer registration can use current ->buffer_idx
interface, and fused command uses normal uapi interface by passing
buffer offset/len via sqe->addr & sqe->len to locate buffer in primary
command. That should be the decouple. But not sure if the difference matters.
Even though I am not sure if it is doable, because:
- only 1 sqe flag is left, and how to differentiate this buffer
registration with normal fixed buffer
- unregister buffer OP may not be called because of task exit
abnormally, so io_uring has to take care of the cleanup, so
file/command data needs to be saved somewhere for the cleanup,
since buffer belongs to device, both register and unregister should
call into device via uring command, see details below
Also there are other performance effects from buffer registration:
1) one extra OP of unregister is needed in io code path
2) boundary in buffer register & OPs & buffer unregister have to be
linked since there are dependencies among the three(register, OPs,
unregister)
>
> > > > It(allocating/free pipe node, and populating it with each page) is
> > > > really inefficient for handling one single IO.
> > >
> > > It doesn't need pipe node allocation. We'd need to allocate
> > > space for pages, but again, there is a good io_uring infra
> > > for it without any single additional lock taken in most cases.
> >
> > Then it is same with this patchset.
> >
> > >
> > >
> > > > So re-using splice for this purpose is still bad not mention splice
> > > > can't support writeable spliced page.
> > > >
> > > > Wiring device io buffer with context registered buffer table looks like
> > > > another approach, however:
> > > >
> > > > 1) two uring command OPs for registering/unregistering this buffer in io fast
> > > > path has to be added since only userspace can know when buffer(reference)
> > > > isn't needed
> > >
> > > Yes, that's a good point. Registration replaces fuse master cmd, so it's
> > > one extra request for unregister, which might be fine.
> >
> > Unfortunately I don't think this way is good, the problem is that buffer
> > only has physical pages, and doesn't have userspace mapping, so why bother
> > to export it to userspace?
> >
> > As I replied to Ziyang, the current fused command can be extended to
> > this way easily, but I don't know why we need to use the buffer registration,
> > given userspace can't read/write the buffer, and fused command can cover
> > it just fine.
>
> I probably mentioned it before, but that's where we need a new memcpy
> io_uring request type, to partially copy it, e.g. headers. I think people
> mentioned memcpy before in general, and it will also be used for DMA driven
> copies if Keith returns back to experiments.
>
> Apart from it and things like broadcasting, sending different chunks to
> different places and so, there is a typical problem what to do when the
> second operation fails but the data has already been received, mostly
> relevant to sockets / streams.
OK, but the new copy OP can work with both fused command and buffer registration
if it is involved, and buffer register isn't a must given the interface
needs to support plain offset/len way.
>
> > > > 2) userspace becomes more complicated, 3+ OPs are required for handling one
> > > > single device IO
> > > >
> > > > 3) buffer reference crosses multiple OPs, for cleanup the registered buffer,
> > > > we have to store the device file & "buffer key" in each buffer(such as io_uring_bvec_buf)
> > > > for unregistering buffer
> > >
> > > It should not necessarily be a file.
> >
> > At least in ublk's case, from io_uring viewpoint, the buffer is owned by
> > ublk device, so we need the device node or file for releasing the
> > buffer.
>
> For example, io_uring has a lightweight way to pin the context
> (pcpu refcount). I haven't looked into ublk code, it's hard for
> me to argue about it.
The buffer(generic bio/bvec pages) is originated from generic application which
submits IO to /dev/ublkbN(block device), or page cache, and io_uring borrows the
buffer via uring command on /dev/ublkbcN(pair device of /dev/ublkbN).
>
> > > > 4) here the case is totally different with io_mapped_ubuf which isn't
> > > > related to any specific file, and just belong to io_uring context; however,
> > > > the device io buffer belongs to device(file) actually, so in theory it is wrong
> > > > to put it into context's registered buffer table, and supposed to put into
> > >
> > > Not at all, it doesn't belong to io_uring but rather to the user space,
> > > without a file, right, but io_uring still only borrowing it.
> >
> > How can one such buffer be owned by userspace? What if the userspace is
> > killed? If you think userspace can grab the buffer reference, that still
> > needs userspace to release the buffer, but that is unreliable, and
> > io_uring has to cover the buffer cleanup in case of userspace exit abnormally.
>
> Conceptually userspace owns buffers and io_uring is share / borrowing it.
> Probably, I misunderstood and you was talking about refcounting or something
> else. Can you elaborate? As for references, io_uring pins normal buffers
> and so holds additional refs.
We need one uring command on /dev/ublkcN to get the buffer, and the
buffer needs to be return back after we run OPs with the buffer.
Follows difference between the two approaches:
1) fused command
- the buffer lifetime is same with the primary command which is
completed after all secondary OPs are completed with the buffer,
so driver gets notified after we use the buffer which belongs to
/dev/ublkcN
2) buffer registration
- one uring command to get the buffer from /dev/ublkcN and register it
into io_uring
- submit OPs with the buffer
- unregister the buffer after all above OPs are done, which still needs
one uring command on /dev/ulkbcN
That is why I mentioned it is hard to handle buffer cleanup after
userspace exits abnormally with buffer registration.
>
> > Because buffer lifetime is crossing multiple OPs if you implement buffer
> > register/unregister OPs. And there isn't such issue for fused command
> > which has same lifetime with the buffer.
> >
> > >
> > > As for keeping files, I predict that it'll be there anyway in some time,
> > > some p2pdma experiments, dma preregistration, all required having a file
> > > attached to the buffer.
> > >
> > > > per-file buffer table which isn't supported by io_uring, or it becomes hard to
> > > > implement multiple-device io buffer in single context since 'file + buffer key'
> > > > has to be used to retrieve this buffer, probably xarray has to be
> > > > relied, but
> > >
> > > I was proposing to give slot selection to the userspace, perhaps with
> > > optional auto index allocation as it's done with registered files.
> >
> > As I mentioned above, it doesn't make sense to export buffer to
> > userspace which can't touch any data of the buffer at all.
>
> replied above.
buffer register isn't a must for new io_uring memcpy OP, we can just
copy with offset/len & the "buffer".
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (17 preceding siblings ...)
2023-03-17 8:14 ` Ming Lei
@ 2023-03-18 16:09 ` Jens Axboe
2023-03-18 17:01 ` Ming Lei
2023-03-21 15:56 ` Ming Lei
19 siblings, 1 reply; 49+ messages in thread
From: Jens Axboe @ 2023-03-18 16:09 UTC (permalink / raw)
To: Ming Lei, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov
On 3/14/23 6:57?AM, Ming Lei wrote:
> Basically userspace can specify any sub-buffer of the ublk block request
> buffer from the fused command just by setting 'offset/len'
> in the slave SQE for running slave OP. This way is flexible to implement
> io mapping: mirror, stripped, ...
>
> The 3th & 4th patches enable fused slave support for the following OPs:
>
> OP_READ/OP_WRITE
> OP_SEND/OP_RECV/OP_SEND_ZC
>
> The other ublk patches cleans ublk driver and implement fused command
> for supporting zero copy.
>
> Follows userspace code:
>
> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
Ran some quick testing here with qcow2. This is just done on my laptop
in kvm, so take them with a grain of salt, results may be better
elsewhere.
Basline:
64k reads 98-100K IOPS 6-6.1GB/sec (ublk 100%, io_uring 9%)
4k reads 670-680K IOPS 2.6GB/sec (ublk 65%, io_uring 44%)
and with zerocopy enabled:
64k reads 184K IOPS 11.5GB/sec (ublk 91%, io_uring 12%)
4k reads 730K IOPS 2.8GB/sec (ublk 73%, io_uring 48%)
and with zerocopy and using SINGLE_ISSUER|COOP_TASKRUN for the ring:
64k reads 205K IOPS 12.8GB/sec (ublk 91%, io_uring 12%)
4k reads 730K IOPS 2.8GB/sec (ublk 66%, io_uring 42%)
Don't put too much into the CPU utilization numbers, they are just
indicative and not super accurate. But overall a nice win for larger
block sizes with zero copy. We seem to be IOPS limited on this
particular setup, which is most likely why 4k isn't showing any major
wins here. Eg running 8k with zero copy, I get the same IOPS limit, just
obviously doubling the bandwidth of the 4k run:
IOPS=732.26K, BW=5.72GiB/s, IOS/call=32/32
IOPS=733.38K, BW=5.73GiB/s, IOS/call=32/32
I also tried using DEFER_TASKRUN, but it stalls on setup. Most likely
something trivial, didn't poke any further at that.
--
Jens Axboe
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-18 16:09 ` Jens Axboe
@ 2023-03-18 17:01 ` Ming Lei
0 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-18 17:01 UTC (permalink / raw)
To: Jens Axboe
Cc: io-uring, linux-block, Miklos Szeredi, ZiyangZhang,
Xiaoguang Wang, Bernd Schubert, Pavel Begunkov, ming.lei
On Sat, Mar 18, 2023 at 10:09:52AM -0600, Jens Axboe wrote:
> On 3/14/23 6:57?AM, Ming Lei wrote:
> > Basically userspace can specify any sub-buffer of the ublk block request
> > buffer from the fused command just by setting 'offset/len'
> > in the slave SQE for running slave OP. This way is flexible to implement
> > io mapping: mirror, stripped, ...
> >
> > The 3th & 4th patches enable fused slave support for the following OPs:
> >
> > OP_READ/OP_WRITE
> > OP_SEND/OP_RECV/OP_SEND_ZC
> >
> > The other ublk patches cleans ublk driver and implement fused command
> > for supporting zero copy.
> >
> > Follows userspace code:
> >
> > https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>
> Ran some quick testing here with qcow2. This is just done on my laptop
> in kvm, so take them with a grain of salt, results may be better
> elsewhere.
>
> Basline:
>
> 64k reads 98-100K IOPS 6-6.1GB/sec (ublk 100%, io_uring 9%)
> 4k reads 670-680K IOPS 2.6GB/sec (ublk 65%, io_uring 44%)
>
> and with zerocopy enabled:
>
> 64k reads 184K IOPS 11.5GB/sec (ublk 91%, io_uring 12%)
> 4k reads 730K IOPS 2.8GB/sec (ublk 73%, io_uring 48%)
There are other ways to observe the boost:
1) loop over file in tmpfs
- 1~2X in my test
2) nbd with local nbd server(nbdkit memory )
- less than 1X in my test
3) null
- which won't call into fused command, but can evaluate page copy cost
- 5+X in my test
>
> and with zerocopy and using SINGLE_ISSUER|COOP_TASKRUN for the ring:
>
> 64k reads 205K IOPS 12.8GB/sec (ublk 91%, io_uring 12%)
> 4k reads 730K IOPS 2.8GB/sec (ublk 66%, io_uring 42%)
Looks SINGLE_ISSUER|COOP_TASKRUN can get ~10% improvement, will look it.
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread
* Re: [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD
2023-03-14 12:57 [PATCH V3 00/16] io_uring/ublk: add IORING_OP_FUSED_CMD Ming Lei
` (18 preceding siblings ...)
2023-03-18 16:09 ` Jens Axboe
@ 2023-03-21 15:56 ` Ming Lei
19 siblings, 0 replies; 49+ messages in thread
From: Ming Lei @ 2023-03-21 15:56 UTC (permalink / raw)
To: Jens Axboe, io-uring, linux-block
Cc: Miklos Szeredi, ZiyangZhang, Xiaoguang Wang, Bernd Schubert,
Pavel Begunkov, Stefan Hajnoczi
On Tue, Mar 14, 2023 at 08:57:11PM +0800, Ming Lei wrote:
> Hello,
>
> Add IORING_OP_FUSED_CMD, it is one special URING_CMD, which has to
> be SQE128. The 1st SQE(master) is one 64byte URING_CMD, and the 2nd
> 64byte SQE(slave) is another normal 64byte OP. For any OP which needs
> to support slave OP, io_issue_defs[op].fused_slave needs to be set as 1,
> and its ->issue() can retrieve/import buffer from master request's
> fused_cmd_kbuf. The slave OP is actually submitted from kernel, part of
> this idea is from Xiaoguang's ublk ebpf patchset, but this patchset
> submits slave OP just like normal OP issued from userspace, that said,
> SQE order is kept, and batching handling is done too.
>
> Please see detailed design in commit log of the 2th patch, and one big
> point is how to handle buffer ownership.
>
> With this way, it is easy to support zero copy for ublk/fuse device.
>
> Basically userspace can specify any sub-buffer of the ublk block request
> buffer from the fused command just by setting 'offset/len'
> in the slave SQE for running slave OP. This way is flexible to implement
> io mapping: mirror, stripped, ...
>
> The 3th & 4th patches enable fused slave support for the following OPs:
>
> OP_READ/OP_WRITE
> OP_SEND/OP_RECV/OP_SEND_ZC
>
> The other ublk patches cleans ublk driver and implement fused command
> for supporting zero copy.
>
> Follows userspace code:
>
> https://github.com/ming1/ubdsrv/tree/fused-cmd-zc-v2
>
> All three(loop, nbd and qcow2) ublk targets have supported zero copy by passing:
>
> ublk add -t [loop|nbd|qcow2] -z ....
>
> Basic fs mount/kernel building and builtin test are done, and also not
> observe regression on xfstest test over ublk-loop with zero copy.
>
> Also add liburing test case for covering fused command based on miniublk
> of blktest:
>
> https://github.com/ming1/liburing/commits/fused_cmd_miniublk
>
> Performance improvement is obvious on memory bandwidth
> related workloads, such as, 1~2X improvement on 64K/512K BS
> IO test on loop with ramfs backing file.
>
> Any comments are welcome!
>
> V3:
> - fix build warning reported by kernel test robot
> - drop patch for checking fused flags on existed drivers with
> ->uring_command(), which isn't necessary, since we do not do that
> when adding new ioctl or uring command
> - inline io_init_rq() for core code, so just export io_init_slave_req
> - return result of failed slave request unconditionally since REQ_F_CQE_SKIP
> will be cleared
> - pass xfstest over ublk-loop
BTW, I just wrote one ublk zero copy document, which describes technical requirement
for this feature, and explains why splice isn't good and how fused command solves it,
feel free to refer to it when working on candidate approach.
https://github.com/ming1/linux/blob/my_v6.3-io_uring_fuse_cmd_v4/Documentation/block/ublk.rst#zero-copy
Which will be included in V4.
Thanks,
Ming
^ permalink raw reply [flat|nested] 49+ messages in thread