From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>,
[email protected], [email protected]
Subject: [PATCH v3 2/6] io_uring: place io_submit_state into ctx
Date: Sat, 1 Feb 2020 01:15:51 +0300 [thread overview]
Message-ID: <fe5b88db210d7925308b42bd67ebf0efa269d378.1580508735.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
io_submit_state is used only during submmission and holding
ctx->uring_lock, so only one instance is used at a time. Move it into
struct io_ring_ctx, so it:
- doesn't consume on-stack memory
- persists across io_uring_enter
- available without passing it through the call-stack
The last point is very useful to make opcode handlers manage their
resources themselfs, like splice would. Also, it's a base for other
hackish optimisations in the future.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 75 +++++++++++++++++++++++++++------------------------
1 file changed, 40 insertions(+), 35 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6f3998e6475a..6109969709ff 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -197,6 +197,27 @@ struct fixed_file_data {
struct completion done;
};
+#define IO_PLUG_THRESHOLD 2
+#define IO_IOPOLL_BATCH 8
+
+struct io_submit_state {
+ /*
+ * io_kiocb alloc cache
+ */
+ void *reqs[IO_IOPOLL_BATCH];
+ unsigned int free_reqs;
+ unsigned int cur_req;
+
+ /*
+ * File reference cache
+ */
+ struct file *file;
+ unsigned int fd;
+ unsigned int has_refs;
+ unsigned int used_refs;
+ unsigned int ios_left;
+};
+
struct io_ring_ctx {
struct {
struct percpu_ref refs;
@@ -310,6 +331,9 @@ struct io_ring_ctx {
spinlock_t inflight_lock;
struct list_head inflight_list;
} ____cacheline_aligned_in_smp;
+
+ /* protected by uring_lock */
+ struct io_submit_state submit_state;
};
/*
@@ -575,27 +599,6 @@ struct io_kiocb {
struct io_wq_work work;
};
-#define IO_PLUG_THRESHOLD 2
-#define IO_IOPOLL_BATCH 8
-
-struct io_submit_state {
- /*
- * io_kiocb alloc cache
- */
- void *reqs[IO_IOPOLL_BATCH];
- unsigned int free_reqs;
- unsigned int cur_req;
-
- /*
- * File reference cache
- */
- struct file *file;
- unsigned int fd;
- unsigned int has_refs;
- unsigned int used_refs;
- unsigned int ios_left;
-};
-
struct io_op_def {
/* needs req->io allocated for deferral/async */
unsigned async_ctx : 1;
@@ -1158,11 +1161,11 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
return NULL;
}
-static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
+static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
{
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
+ struct io_submit_state *state = &ctx->submit_state;
if (!state->free_reqs) {
size_t sz;
@@ -4475,10 +4478,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK];;
}
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+static int io_req_set_file(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_state *state = &ctx->submit_state;
unsigned flags;
int fd;
@@ -4717,7 +4720,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_submit_state *state, struct io_kiocb **link)
+ struct io_kiocb **link)
{
const struct cred *old_creds = NULL;
struct io_ring_ctx *ctx = req->ctx;
@@ -4748,7 +4751,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
IOSQE_ASYNC);
- ret = io_req_set_file(state, req, sqe);
+ ret = io_req_set_file(req, sqe);
if (unlikely(ret)) {
err_req:
io_cqring_add_event(req, ret);
@@ -4823,8 +4826,10 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/*
* Batched submission is done, ensure local IO is flushed out.
*/
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_end(struct io_ring_ctx *ctx)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
io_file_put(state);
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs,
@@ -4834,9 +4839,10 @@ static void io_submit_state_end(struct io_submit_state *state)
/*
* Start submission side cache.
*/
-static void io_submit_state_start(struct io_submit_state *state,
- unsigned int max_ios)
+static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios)
{
+ struct io_submit_state *state = &ctx->submit_state;
+
state->free_reqs = 0;
state->file = NULL;
state->ios_left = max_ios;
@@ -4903,7 +4909,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct mm_struct **mm, bool async)
{
struct blk_plug plug;
- struct io_submit_state state;
struct io_kiocb *link = NULL;
int i, submitted = 0;
bool mm_fault = false;
@@ -4921,7 +4926,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
- io_submit_state_start(&state, nr);
+ io_submit_start(ctx, nr);
if (nr > IO_PLUG_THRESHOLD)
blk_start_plug(&plug);
@@ -4932,7 +4937,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
- req = io_get_req(ctx, &state);
+ req = io_get_req(ctx);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
@@ -4965,7 +4970,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
- if (!io_submit_sqe(req, sqe, &state, &link))
+ if (!io_submit_sqe(req, sqe, &link))
break;
}
@@ -4977,7 +4982,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (link)
io_queue_link_head(link);
- io_submit_state_end(&state);
+ io_submit_end(ctx);
if (nr > IO_PLUG_THRESHOLD)
blk_finish_plug(&plug);
--
2.24.0
next prev parent reply other threads:[~2020-01-31 22:16 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 1/6] io_uring: always pass non-null io_submit_state Pavel Begunkov
2020-01-31 22:15 ` Pavel Begunkov [this message]
2020-01-31 22:15 ` [PATCH v3 3/6] io_uring: move ring_fd into io_submit_state Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 4/6] io_uring: move *link " Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 5/6] io_uring: persistent req bulk allocation cache Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 6/6] io_uring: optimise " Pavel Begunkov
2020-01-31 22:22 ` [PATCH v3 0/6] add persistent submission state Jens Axboe
2020-01-31 22:32 ` Pavel Begunkov
2020-02-01 0:31 ` Pavel Begunkov
2020-02-01 2:10 ` Jens Axboe
2020-02-01 11:42 ` Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fe5b88db210d7925308b42bd67ebf0efa269d378.1580508735.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox