From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], Jens Axboe <[email protected]>
Subject: [PATCH 6/6] io_uring/futex: enable use of the allocation caches for futex_q
Date: Fri, 9 Jun 2023 12:31:25 -0600 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
We're under the ctx uring_lock for the issue and completion path anyway,
wire up the futex_q allocator so we can just recycle entries rather than
hit the allocator every time.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/io_uring_types.h | 1 +
io_uring/futex.c | 65 +++++++++++++++++++++++++++-------
io_uring/futex.h | 8 +++++
io_uring/io_uring.c | 2 ++
4 files changed, 63 insertions(+), 13 deletions(-)
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index d796b578c129..a7f03d8d879f 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -274,6 +274,7 @@ struct io_ring_ctx {
unsigned int locked_free_nr;
struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
const struct cred *sq_creds; /* cred used for __io_sq_thread() */
struct io_sq_data *sq_data; /* if using sq thread polling */
diff --git a/io_uring/futex.c b/io_uring/futex.c
index a1d50145927a..e0707723c689 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -9,6 +9,7 @@
#include "../kernel/futex/futex.h"
#include "io_uring.h"
+#include "rsrc.h"
#include "futex.h"
struct io_futex {
@@ -22,22 +23,48 @@ struct io_futex {
ktime_t timeout;
};
+struct io_futex_data {
+ union {
+ struct futex_q q;
+ struct io_cache_entry cache;
+ };
+};
+
+void io_futex_cache_init(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_init(&ctx->futex_cache, IO_NODE_ALLOC_CACHE_MAX,
+ sizeof(struct io_futex_data));
+}
+
+static void io_futex_cache_entry_free(struct io_cache_entry *entry)
+{
+ kfree(container_of(entry, struct io_futex_data, cache));
+}
+
+void io_futex_cache_free(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_free(&ctx->futex_cache, io_futex_cache_entry_free);
+}
+
static void io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts)
{
+ struct io_futex_data *ifd = req->async_data;
struct io_ring_ctx *ctx = req->ctx;
- kfree(req->async_data);
io_tw_lock(ctx, ts);
+ if (!io_alloc_cache_put(&ctx->futex_cache, &ifd->cache))
+ kfree(ifd);
+ req->async_data = NULL;
hlist_del_init(&req->hash_node);
io_req_task_complete(req, ts);
}
static bool __io_futex_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
- struct futex_q *q = req->async_data;
+ struct io_futex_data *ifd = req->async_data;
/* futex wake already done or in progress */
- if (!futex_unqueue(q))
+ if (!futex_unqueue(&ifd->q))
return false;
hlist_del_init(&req->hash_node);
@@ -133,12 +160,23 @@ static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q)
io_req_task_work_add(req);
}
+static struct io_futex_data *io_alloc_ifd(struct io_ring_ctx *ctx)
+{
+ struct io_cache_entry *entry;
+
+ entry = io_alloc_cache_get(&ctx->futex_cache);
+ if (entry)
+ return container_of(entry, struct io_futex_data, cache);
+
+ return kmalloc(sizeof(struct io_futex_data), GFP_NOWAIT);
+}
+
int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_futex_data *ifd;
unsigned int flags = 0;
- struct futex_q *q;
int ret;
if (!futex_op_to_flags(FUTEX_WAIT, iof->futex_flags, &flags)) {
@@ -146,23 +184,24 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
goto done;
}
- q = kmalloc(sizeof(*q), GFP_NOWAIT);
- if (!q) {
+ io_ring_submit_lock(ctx, issue_flags);
+ ifd = io_alloc_ifd(ctx);
+ if (!ifd) {
+ io_ring_submit_unlock(ctx, issue_flags);
ret = -ENOMEM;
goto done;
}
- req->async_data = q;
- *q = futex_q_init;
- q->bitset = iof->futex_mask;
- q->wake = io_futex_wake_fn;
- q->wake_data = req;
+ req->async_data = ifd;
+ ifd->q = futex_q_init;
+ ifd->q.bitset = iof->futex_mask;
+ ifd->q.wake = io_futex_wake_fn;
+ ifd->q.wake_data = req;
- io_ring_submit_lock(ctx, issue_flags);
hlist_add_head(&req->hash_node, &ctx->futex_list);
io_ring_submit_unlock(ctx, issue_flags);
- ret = futex_queue_wait(q, iof->uaddr, flags, iof->futex_val);
+ ret = futex_queue_wait(&ifd->q, iof->uaddr, flags, iof->futex_val);
if (ret)
goto done;
diff --git a/io_uring/futex.h b/io_uring/futex.h
index 16add2c069cc..e60d0abaf676 100644
--- a/io_uring/futex.h
+++ b/io_uring/futex.h
@@ -11,6 +11,8 @@ int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
unsigned int issue_flags);
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
bool cancel_all);
+void io_futex_cache_init(struct io_ring_ctx *ctx);
+void io_futex_cache_free(struct io_ring_ctx *ctx);
#else
static inline int io_futex_cancel(struct io_ring_ctx *ctx,
struct io_cancel_data *cd,
@@ -23,4 +25,10 @@ static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
{
return false;
}
+static inline void io_futex_cache_init(struct io_ring_ctx *ctx)
+{
+}
+static inline void io_futex_cache_free(struct io_ring_ctx *ctx)
+{
+}
#endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8270f37c312d..7db2a139d110 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -318,6 +318,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
sizeof(struct async_poll));
io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_async_msghdr));
+ io_futex_cache_init(ctx);
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
@@ -2917,6 +2918,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_eventfd_unregister(ctx);
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_futex_cache_free(ctx);
io_destroy_buffers(ctx);
mutex_unlock(&ctx->uring_lock);
if (ctx->sq_creds)
--
2.39.2
prev parent reply other threads:[~2023-06-09 18:31 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-09 18:31 [PATCHSET RFC 0/6] Add io_uring support for futex wait/wake Jens Axboe
2023-06-09 18:31 ` [PATCH 1/6] futex: abstract out futex_op_to_flags() helper Jens Axboe
2023-06-09 18:31 ` [PATCH 2/6] futex: factor out the futex wake handling Jens Axboe
2023-06-09 18:31 ` [PATCH 3/6] futex: assign default futex_q->wait_data at insertion time Jens Axboe
2023-06-09 18:31 ` [PATCH 4/6] futex: add futex wait variant that takes a futex_q directly Jens Axboe
2023-06-09 18:31 ` [PATCH 5/6] io_uring: add support for futex wake and wait Jens Axboe
2023-06-12 16:06 ` Gabriel Krisman Bertazi
2023-06-12 20:37 ` Jens Axboe
2023-06-12 23:00 ` Gabriel Krisman Bertazi
2023-06-13 1:09 ` Jens Axboe
2023-06-13 2:55 ` io_uring link semantics (was [PATCH 5/6] io_uring: add support for futex wake and wait) Gabriel Krisman Bertazi
2023-06-23 19:04 ` [PATCH 5/6] io_uring: add support for futex wake and wait Andres Freund
2023-06-23 19:07 ` Jens Axboe
2023-06-23 19:34 ` Andres Freund
2023-06-23 19:46 ` Jens Axboe
2023-06-09 18:31 ` Jens Axboe [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox