From: Bernd Schubert <[email protected]>
To: Miklos Szeredi <[email protected]>
Cc: Jens Axboe <[email protected]>,
Pavel Begunkov <[email protected]>,
[email protected], [email protected],
Joanne Koong <[email protected]>,
Josef Bacik <[email protected]>,
Amir Goldstein <[email protected]>,
Ming Lei <[email protected]>, David Wei <[email protected]>,
[email protected], Bernd Schubert <[email protected]>
Subject: [PATCH RFC v7 11/16] fuse: {uring} Allow to queue fg requests through io-uring
Date: Wed, 27 Nov 2024 14:40:28 +0100 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
This prepares queueing and sending foreground requests through
io-uring.
Signed-off-by: Bernd Schubert <[email protected]>
---
fs/fuse/dev.c | 5 +-
fs/fuse/dev_uring.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++++++
fs/fuse/dev_uring_i.h | 8 +++
fs/fuse/fuse_dev_i.h | 5 ++
4 files changed, 175 insertions(+), 2 deletions(-)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ed85a5f2d6c360b8401b174bc97cc135d87e90d9..c53deb690cc9c7958741cd144fcad166b5721e11 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -237,7 +237,8 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
}
-static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
@@ -250,7 +251,7 @@ static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_li
}
}
-static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index b5bb78b5f5902c8b87f3e196baaa05640380046d..b1c56ccf828ec2d4cd921906fb42901fefcc6cc5 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -24,6 +24,12 @@ bool fuse_uring_enabled(void)
return enable_uring;
}
+struct fuse_uring_cmd_pdu {
+ struct fuse_ring_ent *ring_ent;
+};
+
+const struct fuse_iqueue_ops fuse_io_uring_ops;
+
static void fuse_uring_req_end(struct fuse_ring_ent *ring_ent, bool set_err,
int error)
{
@@ -773,6 +779,31 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
return 0;
}
+static bool is_ring_ready(struct fuse_ring *ring, int current_qid)
+{
+ int qid;
+ struct fuse_ring_queue *queue;
+ bool ready = true;
+
+ for (qid = 0; qid < ring->nr_queues && ready; qid++) {
+ if (current_qid == qid)
+ continue;
+
+ queue = ring->queues[qid];
+ if (!queue) {
+ ready = false;
+ break;
+ }
+
+ spin_lock(&queue->lock);
+ if (list_empty(&queue->ent_avail_queue))
+ ready = false;
+ spin_unlock(&queue->lock);
+ }
+
+ return ready;
+}
+
/*
* fuse_uring_req_fetch command handling
*/
@@ -781,11 +812,23 @@ static void _fuse_uring_fetch(struct fuse_ring_ent *ring_ent,
unsigned int issue_flags)
{
struct fuse_ring_queue *queue = ring_ent->queue;
+ struct fuse_ring *ring = queue->ring;
+ struct fuse_conn *fc = ring->fc;
+ struct fuse_iqueue *fiq = &fc->iq;
spin_lock(&queue->lock);
fuse_uring_ent_avail(ring_ent, queue);
ring_ent->cmd = cmd;
spin_unlock(&queue->lock);
+
+ if (!ring->ready) {
+ bool ready = is_ring_ready(ring, queue->qid);
+
+ if (ready) {
+ WRITE_ONCE(ring->ready, true);
+ fiq->ops = &fuse_io_uring_ops;
+ }
+ }
}
/*
@@ -945,3 +988,119 @@ int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
}
+
+/*
+ * This prepares and sends the ring request in fuse-uring task context.
+ * User buffers are not mapped yet - the application does not have permission
+ * to write to it - this has to be executed in ring task context.
+ */
+static void
+fuse_uring_send_req_in_task(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct fuse_uring_cmd_pdu *pdu = (struct fuse_uring_cmd_pdu *)cmd->pdu;
+ struct fuse_ring_ent *ring_ent = pdu->ring_ent;
+ struct fuse_ring_queue *queue = ring_ent->queue;
+ int err;
+
+ BUILD_BUG_ON(sizeof(pdu) > sizeof(cmd->pdu));
+
+ err = fuse_uring_prepare_send(ring_ent);
+ if (err)
+ goto err;
+
+ io_uring_cmd_done(cmd, 0, 0, issue_flags);
+
+ spin_lock(&queue->lock);
+ ring_ent->state = FRRS_USERSPACE;
+ list_move(&ring_ent->list, &queue->ent_in_userspace);
+ spin_unlock(&queue->lock);
+ return;
+err:
+ fuse_uring_next_fuse_req(ring_ent, queue);
+}
+
+static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring)
+{
+ unsigned int qid;
+ struct fuse_ring_queue *queue;
+
+ qid = task_cpu(current);
+
+ if (WARN_ONCE(qid >= ring->nr_queues,
+ "Core number (%u) exceeds nr ueues (%zu)\n", qid,
+ ring->nr_queues))
+ qid = 0;
+
+ queue = ring->queues[qid];
+ if (WARN_ONCE(!queue, "Missing queue for qid %d\n", qid))
+ return NULL;
+
+ return queue;
+}
+
+/* queue a fuse request and send it if a ring entry is available */
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
+{
+ struct fuse_conn *fc = req->fm->fc;
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ struct fuse_ring_ent *ring_ent = NULL;
+ int err;
+
+ err = -EINVAL;
+ queue = fuse_uring_task_to_queue(ring);
+ if (!queue)
+ goto err;
+
+ if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
+ req->in.h.unique = fuse_get_unique(fiq);
+ spin_lock(&queue->lock);
+ err = -ENOTCONN;
+ if (unlikely(queue->stopped))
+ goto err_unlock;
+
+ if (!list_empty(&queue->ent_avail_queue)) {
+ ring_ent = list_first_entry(&queue->ent_avail_queue,
+ struct fuse_ring_ent, list);
+
+ fuse_uring_add_req_to_ring_ent(ring_ent, req);
+ } else {
+ list_add_tail(&req->list, &queue->fuse_req_queue);
+ }
+ spin_unlock(&queue->lock);
+
+ if (ring_ent) {
+ struct io_uring_cmd *cmd = ring_ent->cmd;
+ struct fuse_uring_cmd_pdu *pdu =
+ (struct fuse_uring_cmd_pdu *)cmd->pdu;
+
+ err = -EIO;
+ if (WARN_ON_ONCE(ring_ent->state != FRRS_FUSE_REQ))
+ goto err;
+
+ pdu->ring_ent = ring_ent;
+ io_uring_cmd_complete_in_task(cmd, fuse_uring_send_req_in_task);
+ }
+
+ return;
+
+err_unlock:
+ spin_unlock(&queue->lock);
+err:
+ req->out.h.error = err;
+ clear_bit(FR_PENDING, &req->flags);
+ fuse_request_end(req);
+}
+
+const struct fuse_iqueue_ops fuse_io_uring_ops = {
+ /* should be send over io-uring as enhancement */
+ .send_forget = fuse_dev_queue_forget,
+
+ /*
+ * could be send over io-uring, but interrupts should be rare,
+ * no need to make the code complex
+ */
+ .send_interrupt = fuse_dev_queue_interrupt,
+ .send_req = fuse_uring_queue_fuse_req,
+};
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index 52259714ffc59a38b21f834ae5e317fe818863dc..19867d27894f9d985e224111ea586c82b4b4cfe8 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -119,6 +119,8 @@ struct fuse_ring {
unsigned long teardown_time;
atomic_t queue_refs;
+
+ bool ready;
};
bool fuse_uring_enabled(void);
@@ -126,6 +128,7 @@ void fuse_uring_destruct(struct fuse_conn *fc);
void fuse_uring_stop_queues(struct fuse_ring *ring);
void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -149,6 +152,11 @@ static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc)
atomic_read(&ring->queue_refs) == 0);
}
+static inline bool fuse_uring_ready(struct fuse_conn *fc)
+{
+ return fc->ring && fc->ring->ready;
+}
+
#else /* CONFIG_FUSE_IO_URING */
struct fuse_ring;
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index d7bf72dabd84c3896d1447380649e2f4d20b0643..1d1c1e9848fba8dae46651e28809f73e165e74fe 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -17,6 +17,8 @@ struct fuse_arg;
struct fuse_args;
struct fuse_pqueue;
struct fuse_req;
+struct fuse_iqueue;
+struct fuse_forget_link;
struct fuse_copy_state {
int write;
@@ -58,6 +60,9 @@ int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs,
int zeroing);
int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
unsigned int nbytes);
+void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
+ struct fuse_forget_link *forget);
+void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
#endif
--
2.43.0
next prev parent reply other threads:[~2024-11-27 13:41 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-27 13:40 [PATCH RFC v7 00/16] fuse: fuse-over-io-uring Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 01/16] fuse: rename to fuse_dev_end_requests and make non-static Bernd Schubert
2024-11-28 0:19 ` Joanne Koong
2024-11-27 13:40 ` [PATCH RFC v7 02/16] fuse: Move fuse_get_dev to header file Bernd Schubert
2024-11-28 0:20 ` Joanne Koong
2024-11-27 13:40 ` [PATCH RFC v7 03/16] fuse: Move request bits Bernd Schubert
2024-11-28 0:21 ` Joanne Koong
2024-11-27 13:40 ` [PATCH RFC v7 04/16] fuse: Add fuse-io-uring design documentation Bernd Schubert
2024-12-03 12:30 ` Pavel Begunkov
2024-11-27 13:40 ` [PATCH RFC v7 05/16] fuse: make args->in_args[0] to be always the header Bernd Schubert
2024-11-28 0:27 ` Joanne Koong
2024-11-27 13:40 ` [PATCH RFC v7 06/16] fuse: {uring} Handle SQEs - register commands Bernd Schubert
2024-11-28 2:23 ` Joanne Koong
2024-11-28 18:20 ` Bernd Schubert
2024-12-03 13:24 ` Pavel Begunkov
2024-12-03 13:49 ` Bernd Schubert
2024-12-03 14:16 ` Pavel Begunkov
2024-12-03 13:38 ` Pavel Begunkov
2024-11-27 13:40 ` [PATCH RFC v7 07/16] fuse: Make fuse_copy non static Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 08/16] fuse: Add fuse-io-uring handling into fuse_copy Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 09/16] fuse: {uring} Add uring sqe commit and fetch support Bernd Schubert
2024-12-03 13:47 ` Pavel Begunkov
2024-11-27 13:40 ` [PATCH RFC v7 10/16] fuse: {uring} Handle teardown of ring entries Bernd Schubert
2024-11-27 13:40 ` Bernd Schubert [this message]
2024-12-03 14:09 ` [PATCH RFC v7 11/16] fuse: {uring} Allow to queue fg requests through io-uring Pavel Begunkov
2024-12-03 22:46 ` Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 12/16] fuse: {uring} Allow to queue bg " Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 13/16] io_uring/cmd: let cmds to know about dying task Bernd Schubert
2024-12-03 12:15 ` Pavel Begunkov
2024-12-03 12:15 ` Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 14/16] fuse: {uring} Handle IO_URING_F_TASK_DEAD Bernd Schubert
2024-12-03 12:20 ` Pavel Begunkov
2024-11-27 13:40 ` [PATCH RFC v7 15/16] fuse: {io-uring} Prevent mount point hang on fuse-server termination Bernd Schubert
2024-11-27 13:40 ` [PATCH RFC v7 16/16] fuse: enable fuse-over-io-uring Bernd Schubert
2024-11-27 13:45 ` [PATCH RFC v7 00/16] fuse: fuse-over-io-uring Bernd Schubert
2024-12-03 14:24 ` Pavel Begunkov
2024-12-03 14:32 ` Bernd Schubert
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241127-fuse-uring-for-6-10-rfc4-v7-11-934b3a69baca@ddn.com \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox