From: Joanne Koong <joannelkoong@gmail.com>
To: miklos@szeredi.hu, axboe@kernel.dk
Cc: bschubert@ddn.com, asml.silence@gmail.com,
io-uring@vger.kernel.org, csander@purestorage.com,
xiaobing.li@samsung.com, linux-fsdevel@vger.kernel.org
Subject: [PATCH v1 29/30] fuse: add zero-copy over io-uring
Date: Tue, 2 Dec 2025 16:35:24 -0800 [thread overview]
Message-ID: <20251203003526.2889477-30-joannelkoong@gmail.com> (raw)
In-Reply-To: <20251203003526.2889477-1-joannelkoong@gmail.com>
Implement zero-copy data transfer for fuse over io-uring, eliminating
memory copies between kernel and userspace for read/write operations.
This is only allowed on privileged servers and requires the server to
preregister the following:
a) a sparse buffer corresponding to the queue depth
b) a fixed buffer at index queue_depth (the tail of the buffers)
c) a kernel-managed buffer ring
The sparse buffer is where the client's pages reside. The fixed buffer
at the tail is where the headers (struct fuse_uring_req_header) are
placed. The kernel-managed buffer ring is where any non-zero-copied args
reside (eg out headers).
Benchmarks with bs=1M showed approximately the following differences in
throughput:
direct randreads: ~20% increase (~2100 MB/s -> ~2600 MB/s)
buffered randreads: ~25% increase (~1900 MB/s -> 2400 MB/s)
direct randwrites: no difference (~750 MB/s)
buffered randwrites: ~10% increase (950 MB/s -> 1050 MB/s)
The benchmark was run using fio on the passthrough_hp server:
fio --name=test_run --ioengine=sync --rw=rand{read,write} --bs=1M
--size=1G --numjobs=2 --ramp_time=30 --group_reporting=1
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
---
fs/fuse/dev.c | 7 +-
fs/fuse/dev_uring.c | 191 ++++++++++++++++++++++++++++++++------
fs/fuse/dev_uring_i.h | 12 +++
fs/fuse/fuse_dev_i.h | 1 +
include/uapi/linux/fuse.h | 5 +-
5 files changed, 187 insertions(+), 29 deletions(-)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 7d39c80da554..0e9c9d006118 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1229,8 +1229,11 @@ int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
for (i = 0; !err && i < numargs; i++) {
struct fuse_arg *arg = &args[i];
- if (i == numargs - 1 && argpages)
- err = fuse_copy_folios(cs, arg->size, zeroing);
+ if (i == numargs - 1 && argpages) {
+ if (cs->skip_folio_copy)
+ return 0;
+ return fuse_copy_folios(cs, arg->size, zeroing);
+ }
else
err = fuse_copy_one(cs, arg->value, arg->size);
}
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 3600892ba837..02846203960f 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -89,12 +89,19 @@ static void fuse_uring_flush_bg(struct fuse_ring_queue *queue)
}
}
+static bool can_zero_copy_req(struct fuse_ring_ent *ent, struct fuse_req *req)
+{
+ return ent->queue->use_zero_copy &&
+ (req->args->in_pages || req->args->out_pages);
+}
+
static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
- int error)
+ int error, unsigned issue_flags)
{
struct fuse_ring_queue *queue = ent->queue;
struct fuse_ring *ring = queue->ring;
struct fuse_conn *fc = ring->fc;
+ int err;
lockdep_assert_not_held(&queue->lock);
spin_lock(&queue->lock);
@@ -109,6 +116,13 @@ static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
spin_unlock(&queue->lock);
+ if (ent->zero_copied) {
+ err = io_buffer_unregister(ent->queue->ring_ctx,
+ ent->zero_copy_buf_id, issue_flags);
+ WARN_ON_ONCE(err);
+ ent->zero_copied = false;
+ }
+
if (error)
req->out.h.error = error;
@@ -198,6 +212,31 @@ bool fuse_uring_request_expired(struct fuse_conn *fc)
return false;
}
+static void fuse_uring_zero_copy_teardown(struct fuse_ring_ent *ent,
+ unsigned int issue_flags)
+{
+ struct fuse_ring_queue *queue = ent->queue;
+
+ spin_lock(&queue->lock);
+
+ if (queue->ring_killed) {
+ spin_unlock(&queue->lock);
+ return;
+ }
+
+ if (!percpu_ref_tryget_live(&queue->ring_ctx->refs)) {
+ spin_unlock(&queue->lock);
+ return;
+ }
+
+ spin_unlock(&queue->lock);
+
+ io_buffer_unregister(queue->ring_ctx, ent->zero_copy_buf_id,
+ issue_flags);
+
+ percpu_ref_put(&queue->ring_ctx->refs);
+}
+
static void fuse_uring_teardown_buffers(struct fuse_ring_queue *queue,
unsigned int issue_flags)
{
@@ -322,9 +361,12 @@ static void io_ring_killed(void *priv)
static int fuse_uring_buf_ring_setup(struct io_uring_cmd *cmd,
struct fuse_ring_queue *queue,
+ bool zero_copy,
unsigned int issue_flags)
{
struct io_ring_ctx *ring_ctx = cmd_to_io_kiocb(cmd)->ctx;
+ const struct fuse_uring_cmd_req *cmd_req;
+ u16 headers_index;
int err;
err = io_uring_buf_ring_pin(ring_ctx, FUSE_URING_RINGBUF_GROUP,
@@ -342,8 +384,24 @@ static int fuse_uring_buf_ring_setup(struct io_uring_cmd *cmd,
if (err)
goto error;
- err = io_uring_cmd_import_fixed_index(cmd,
- FUSE_URING_FIXED_HEADERS_INDEX,
+ if (zero_copy) {
+ err = -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ goto error;
+
+ queue->use_zero_copy = true;
+
+ cmd_req = io_uring_sqe_cmd(cmd->sqe);
+ queue->depth = READ_ONCE(cmd_req->init.queue_depth);
+ if (!queue->depth)
+ goto error;
+
+ headers_index = queue->depth;
+ } else {
+ headers_index = FUSE_URING_FIXED_HEADERS_INDEX;
+ }
+
+ err = io_uring_cmd_import_fixed_index(cmd, headers_index,
ITER_DEST, &queue->headers_iter,
issue_flags);
if (err) {
@@ -367,7 +425,8 @@ static int fuse_uring_buf_ring_setup(struct io_uring_cmd *cmd,
static struct fuse_ring_queue *
fuse_uring_create_queue(struct io_uring_cmd *cmd, struct fuse_ring *ring,
- int qid, bool use_bufring, unsigned int issue_flags)
+ int qid, bool use_bufring, bool zero_copy,
+ unsigned int issue_flags)
{
struct fuse_conn *fc = ring->fc;
struct fuse_ring_queue *queue;
@@ -399,12 +458,13 @@ fuse_uring_create_queue(struct io_uring_cmd *cmd, struct fuse_ring *ring,
fuse_pqueue_init(&queue->fpq);
if (use_bufring) {
- err = fuse_uring_buf_ring_setup(cmd, queue, issue_flags);
- if (err) {
- kfree(pq);
- kfree(queue);
- return ERR_PTR(err);
- }
+ err = fuse_uring_buf_ring_setup(cmd, queue, zero_copy,
+ issue_flags);
+ if (err)
+ goto cleanup;
+ } else if (zero_copy) {
+ err = -EINVAL;
+ goto cleanup;
}
spin_lock(&fc->lock);
@@ -422,6 +482,11 @@ fuse_uring_create_queue(struct io_uring_cmd *cmd, struct fuse_ring *ring,
spin_unlock(&fc->lock);
return queue;
+
+cleanup:
+ kfree(pq);
+ kfree(queue);
+ return ERR_PTR(err);
}
static void fuse_uring_stop_fuse_req_end(struct fuse_req *req)
@@ -466,6 +531,9 @@ static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
if (req)
fuse_uring_stop_fuse_req_end(req);
+
+ if (ent->zero_copied)
+ fuse_uring_zero_copy_teardown(ent, IO_URING_F_UNLOCKED);
}
static void fuse_uring_stop_list_entries(struct list_head *head,
@@ -831,6 +899,7 @@ static int setup_fuse_copy_state(struct fuse_copy_state *cs,
cs->is_kaddr = true;
cs->len = ent->payload_kvec.iov_len;
cs->kaddr = ent->payload_kvec.iov_base;
+ cs->skip_folio_copy = can_zero_copy_req(ent, req);
}
cs->is_uring = true;
@@ -863,11 +932,56 @@ static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
return err;
}
+
+static int fuse_uring_set_up_zero_copy(struct fuse_ring_ent *ent,
+ struct fuse_req *req,
+ unsigned issue_flags)
+{
+ struct fuse_args_pages *ap;
+ size_t total_bytes = 0;
+ u16 buf_index;
+ struct bio_vec *bvs;
+ int err, ddir, i;
+
+ buf_index = ent->zero_copy_buf_id;
+
+ /* out_pages indicates a read, in_pages indicates a write */
+ ddir = req->args->out_pages ? ITER_DEST : ITER_SOURCE;
+
+ ap = container_of(req->args, typeof(*ap), args);
+
+ /*
+ * We can avoid having to allocate the bvs array when folios and
+ * descriptors are represented by bvecs in fuse
+ */
+ bvs = kcalloc(ap->num_folios, sizeof(*bvs), GFP_KERNEL_ACCOUNT);
+ if (!bvs)
+ return -ENOMEM;
+
+ for (i = 0; i < ap->num_folios; i++) {
+ total_bytes += ap->descs[i].length;
+ bvs[i].bv_page = folio_page(ap->folios[i], 0);
+ bvs[i].bv_offset = ap->descs[i].offset;
+ bvs[i].bv_len = ap->descs[i].length;
+ }
+
+ err = io_buffer_register_bvec(ent->queue->ring_ctx, bvs, ap->num_folios,
+ total_bytes, ddir, buf_index, issue_flags);
+ kfree(bvs);
+ if (err)
+ return err;
+
+ ent->zero_copied = true;
+
+ return 0;
+}
+
/*
* Copy data from the req to the ring buffer
*/
static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
- struct fuse_ring_ent *ent)
+ struct fuse_ring_ent *ent,
+ unsigned int issue_flags)
{
struct fuse_copy_state cs;
struct fuse_args *args = req->args;
@@ -900,6 +1014,11 @@ static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
num_args--;
}
+ if (can_zero_copy_req(ent, req)) {
+ err = fuse_uring_set_up_zero_copy(ent, req, issue_flags);
+ if (err)
+ return err;
+ }
/* copy the payload */
err = fuse_copy_args(&cs, num_args, args->in_pages,
(struct fuse_arg *)in_args, 0);
@@ -910,12 +1029,17 @@ static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
}
ent_in_out.payload_sz = cs.ring.copied_sz;
+ if (cs.skip_folio_copy && args->in_pages)
+ ent_in_out.payload_sz +=
+ args->in_args[args->in_numargs - 1].size;
+
return copy_header_to_ring(ent, FUSE_URING_HEADER_RING_ENT,
&ent_in_out, sizeof(ent_in_out));
}
static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
- struct fuse_req *req)
+ struct fuse_req *req,
+ unsigned int issue_flags)
{
struct fuse_ring_queue *queue = ent->queue;
struct fuse_ring *ring = queue->ring;
@@ -933,7 +1057,7 @@ static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
return err;
/* copy the request */
- err = fuse_uring_args_to_ring(ring, req, ent);
+ err = fuse_uring_args_to_ring(ring, req, ent, issue_flags);
if (unlikely(err)) {
pr_info_ratelimited("Copy to ring failed: %d\n", err);
return err;
@@ -944,11 +1068,20 @@ static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
sizeof(req->in.h));
}
-static bool fuse_uring_req_has_payload(struct fuse_req *req)
+static bool fuse_uring_req_has_copyable_payload(struct fuse_ring_ent *ent,
+ struct fuse_req *req)
{
struct fuse_args *args = req->args;
- return args->in_numargs > 1 || args->out_numargs;
+ if (!can_zero_copy_req(ent, req))
+ return args->in_numargs > 1 || args->out_numargs;
+
+ if ((args->in_numargs > 1) && (!args->in_pages || args->in_numargs > 2))
+ return true;
+ if (args->out_numargs && (!args->out_pages || args->out_numargs > 1))
+ return true;
+
+ return false;
}
static int fuse_uring_select_buffer(struct fuse_ring_ent *ent,
@@ -1014,7 +1147,7 @@ static int fuse_uring_next_req_update_buffer(struct fuse_ring_ent *ent,
ent->headers_iter.data_source = false;
buffer_selected = ent->payload_kvec.iov_base != 0;
- has_payload = fuse_uring_req_has_payload(req);
+ has_payload = fuse_uring_req_has_copyable_payload(ent, req);
if (has_payload && !buffer_selected)
return fuse_uring_select_buffer(ent, issue_flags);
@@ -1040,22 +1173,23 @@ static int fuse_uring_prep_buffer(struct fuse_ring_ent *ent,
ent->headers_iter.data_source = false;
/* no payload to copy, can skip selecting a buffer */
- if (!fuse_uring_req_has_payload(req))
+ if (!fuse_uring_req_has_copyable_payload(ent, req))
return 0;
return fuse_uring_select_buffer(ent, issue_flags);
}
static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
- struct fuse_req *req)
+ struct fuse_req *req,
+ unsigned int issue_flags)
{
int err;
- err = fuse_uring_copy_to_ring(ent, req);
+ err = fuse_uring_copy_to_ring(ent, req, issue_flags);
if (!err)
set_bit(FR_SENT, &req->flags);
else
- fuse_uring_req_end(ent, req, err);
+ fuse_uring_req_end(ent, req, err, issue_flags);
return err;
}
@@ -1158,7 +1292,7 @@ static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
err = fuse_uring_copy_from_ring(ring, req, ent);
out:
- fuse_uring_req_end(ent, req, err);
+ fuse_uring_req_end(ent, req, err, issue_flags);
}
/*
@@ -1181,7 +1315,7 @@ static bool fuse_uring_get_next_fuse_req(struct fuse_ring_ent *ent,
spin_unlock(&queue->lock);
if (req) {
- err = fuse_uring_prepare_send(ent, req);
+ err = fuse_uring_prepare_send(ent, req, issue_flags);
if (err)
goto retry;
}
@@ -1284,7 +1418,7 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
err = fuse_uring_prep_buffer(ent, req, ITER_SOURCE, issue_flags);
if (WARN_ON_ONCE(err))
- fuse_uring_req_end(ent, req, err);
+ fuse_uring_req_end(ent, req, err, issue_flags);
else
fuse_uring_commit(ent, req, issue_flags);
@@ -1409,6 +1543,9 @@ fuse_uring_create_ring_ent(struct io_uring_cmd *cmd,
buf_index = READ_ONCE(cmd->sqe->buf_index);
+ if (queue->use_zero_copy)
+ ent->zero_copy_buf_id = buf_index;
+
/* set up the headers */
ent->headers_iter = queue->headers_iter;
iov_iter_advance(&ent->headers_iter, buf_index * header_size);
@@ -1459,6 +1596,7 @@ static int fuse_uring_register(struct io_uring_cmd *cmd,
{
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
bool use_bufring = READ_ONCE(cmd_req->init.use_bufring);
+ bool zero_copy = READ_ONCE(cmd_req->init.zero_copy);
struct fuse_ring *ring = smp_load_acquire(&fc->ring);
struct fuse_ring_queue *queue;
struct fuse_ring_ent *ent;
@@ -1480,11 +1618,12 @@ static int fuse_uring_register(struct io_uring_cmd *cmd,
queue = ring->queues[qid];
if (!queue) {
queue = fuse_uring_create_queue(cmd, ring, qid, use_bufring,
- issue_flags);
+ zero_copy, issue_flags);
if (IS_ERR(queue))
return PTR_ERR(queue);
} else {
- if (queue->use_bufring != use_bufring)
+ if ((queue->use_bufring != use_bufring) ||
+ (queue->use_zero_copy != zero_copy))
return -EINVAL;
}
@@ -1587,7 +1726,7 @@ static void fuse_uring_send_in_task(struct io_tw_req tw_req, io_tw_token_t tw)
int err;
if (!tw.cancel) {
- err = fuse_uring_prepare_send(ent, ent->fuse_req);
+ err = fuse_uring_prepare_send(ent, ent->fuse_req, issue_flags);
if (err) {
if (!fuse_uring_get_next_fuse_req(ent, queue,
issue_flags))
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index a8a849c3497e..3398b43fb1df 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -56,6 +56,11 @@ struct fuse_ring_ent {
* the buffer when done with it
*/
unsigned int ringbuf_buf_id;
+
+ /* True if the request's pages are being zero-copied */
+ bool zero_copied;
+ /* Buf id for this ent's zero-copied pages */
+ unsigned int zero_copy_buf_id;
};
};
@@ -128,6 +133,13 @@ struct fuse_ring_queue {
struct iov_iter headers_iter;
/* synchronized by the queue lock */
struct io_buffer_list *bufring;
+ /*
+ * True if zero copy should be used for payloads. This is only enabled
+ * on privileged servers. Kernel-managed ring buffers must be enabled
+ * in order to use zero copy.
+ */
+ bool use_zero_copy : 1;
+ unsigned int depth;
};
/**
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index aa1d25421054..67b5bed451fe 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -39,6 +39,7 @@ struct fuse_copy_state {
bool is_uring:1;
/* if set, use kaddr; otherwise use pg */
bool is_kaddr:1;
+ bool skip_folio_copy:1;
struct {
unsigned int copied_sz; /* copied size into the user buffer */
} ring;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 3041177e3dd8..c98ea7a4ddde 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -243,6 +243,7 @@
*
* 7.46
* - add fuse_uring_cmd_req use_bufring
+ * - add fuse_uring_cmd_req zero_copy and queue_depth
*/
#ifndef _LINUX_FUSE_H
@@ -1312,10 +1313,12 @@ struct fuse_uring_cmd_req {
union {
struct {
bool use_bufring;
+ bool zero_copy;
+ uint16_t queue_depth;
} init;
};
- uint8_t padding[5];
+ uint8_t padding[2];
};
#endif /* _LINUX_FUSE_H */
--
2.47.3
next prev parent reply other threads:[~2025-12-03 0:37 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-03 0:34 [PATCH v1 00/30] fuse/io-uring: add kernel-managed buffer rings and zero-copy Joanne Koong
2025-12-03 0:34 ` [PATCH v1 01/30] io_uring/kbuf: refactor io_buf_pbuf_register() logic into generic helpers Joanne Koong
2025-12-03 0:34 ` [PATCH v1 02/30] io_uring/kbuf: rename io_unregister_pbuf_ring() to io_unregister_buf_ring() Joanne Koong
2025-12-03 0:34 ` [PATCH v1 03/30] io_uring/kbuf: add support for kernel-managed buffer rings Joanne Koong
2025-12-03 0:34 ` [PATCH v1 04/30] io_uring/kbuf: add mmap " Joanne Koong
2025-12-03 0:35 ` [PATCH v1 05/30] io_uring/kbuf: support kernel-managed buffer rings in buffer selection Joanne Koong
2025-12-03 0:35 ` [PATCH v1 06/30] io_uring/kbuf: add buffer ring pinning/unpinning Joanne Koong
2025-12-03 4:13 ` Caleb Sander Mateos
2025-12-04 18:41 ` Joanne Koong
2025-12-03 0:35 ` [PATCH v1 07/30] io_uring/rsrc: add fixed buffer table pinning/unpinning Joanne Koong
2025-12-03 4:49 ` Caleb Sander Mateos
2025-12-03 22:52 ` Joanne Koong
2025-12-04 1:24 ` Caleb Sander Mateos
2025-12-04 20:07 ` Joanne Koong
2025-12-10 3:35 ` Caleb Sander Mateos
2025-12-13 6:07 ` Joanne Koong
2025-12-03 0:35 ` [PATCH v1 08/30] io_uring/kbuf: add recycling for pinned kernel managed buffer rings Joanne Koong
2025-12-03 0:35 ` [PATCH v1 09/30] io_uring: add io_uring_cmd_import_fixed_index() Joanne Koong
2025-12-03 21:43 ` Caleb Sander Mateos
2025-12-04 18:56 ` Joanne Koong
2025-12-05 16:56 ` Caleb Sander Mateos
2025-12-05 23:28 ` Joanne Koong
2025-12-11 2:57 ` Caleb Sander Mateos
2025-12-03 0:35 ` [PATCH v1 10/30] io_uring/kbuf: add io_uring_is_kmbuf_ring() Joanne Koong
2025-12-03 0:35 ` [PATCH v1 11/30] io_uring/kbuf: return buffer id in buffer selection Joanne Koong
2025-12-03 21:53 ` Caleb Sander Mateos
2025-12-04 19:22 ` Joanne Koong
2025-12-04 21:57 ` Caleb Sander Mateos
2025-12-03 0:35 ` [PATCH v1 12/30] io_uring/kbuf: export io_ring_buffer_select() Joanne Koong
2025-12-03 0:35 ` [PATCH v1 13/30] io_uring/cmd: set selected buffer index in __io_uring_cmd_done() Joanne Koong
2025-12-03 0:35 ` [PATCH v1 14/30] io_uring: add release callback for ring death Joanne Koong
2025-12-03 22:25 ` Caleb Sander Mateos
2025-12-03 22:54 ` Joanne Koong
2025-12-03 0:35 ` [PATCH v1 15/30] fuse: refactor io-uring logic for getting next fuse request Joanne Koong
2025-12-03 0:35 ` [PATCH v1 16/30] fuse: refactor io-uring header copying to ring Joanne Koong
2025-12-03 0:35 ` [PATCH v1 17/30] fuse: refactor io-uring header copying from ring Joanne Koong
2025-12-03 0:35 ` [PATCH v1 18/30] fuse: use enum types for header copying Joanne Koong
2025-12-03 0:35 ` [PATCH v1 19/30] fuse: refactor setting up copy state for payload copying Joanne Koong
2025-12-03 0:35 ` [PATCH v1 20/30] fuse: support buffer copying for kernel addresses Joanne Koong
2025-12-03 0:35 ` [PATCH v1 21/30] fuse: add io-uring kernel-managed buffer ring Joanne Koong
2025-12-03 0:35 ` [PATCH v1 22/30] io_uring/rsrc: refactor io_buffer_register_bvec()/io_buffer_unregister_bvec() Joanne Koong
2025-12-07 8:33 ` Caleb Sander Mateos
2025-12-13 5:11 ` Joanne Koong
2025-12-16 3:07 ` Caleb Sander Mateos
2025-12-03 0:35 ` [PATCH v1 23/30] io_uring/rsrc: split io_buffer_register_request() logic Joanne Koong
2025-12-07 8:41 ` Caleb Sander Mateos
2025-12-13 5:24 ` Joanne Koong
2025-12-15 17:09 ` Caleb Sander Mateos
2025-12-03 0:35 ` [PATCH v1 24/30] io_uring/rsrc: Allow buffer release callback to be optional Joanne Koong
2025-12-07 8:42 ` Caleb Sander Mateos
2025-12-03 0:35 ` [PATCH v1 25/30] io_uring/rsrc: add io_buffer_register_bvec() Joanne Koong
2025-12-03 0:35 ` [PATCH v1 26/30] io_uring/rsrc: export io_buffer_unregister Joanne Koong
2025-12-03 0:35 ` [PATCH v1 27/30] fuse: rename fuse_set_zero_arg0() to fuse_zero_in_arg0() Joanne Koong
2025-12-03 0:35 ` [PATCH v1 28/30] fuse: enforce op header for every payload reply Joanne Koong
2025-12-03 0:35 ` Joanne Koong [this message]
2025-12-03 0:35 ` [PATCH v1 30/30] docs: fuse: add io-uring bufring and zero-copy documentation Joanne Koong
2025-12-13 7:52 ` Askar Safin
2025-12-15 3:18 ` Joanne Koong
2025-12-13 9:14 ` [PATCH v1 00/30] fuse/io-uring: add kernel-managed buffer rings and zero-copy Askar Safin
2025-12-15 3:24 ` Joanne Koong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251203003526.2889477-30-joannelkoong@gmail.com \
--to=joannelkoong@gmail.com \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=bschubert@ddn.com \
--cc=csander@purestorage.com \
--cc=io-uring@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=miklos@szeredi.hu \
--cc=xiaobing.li@samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox