From: Hao Xu <[email protected]>
To: Pavel Begunkov <[email protected]>, [email protected]
Cc: Jens Axboe <[email protected]>
Subject: Re: [PATCH for-next v2 25/25] io_uring: mutex locked poll hashing
Date: Wed, 15 Jun 2022 20:53:21 +0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <b3250f21371e91e43ff488bc695240630cb21667.1655213915.git.asml.silence@gmail.com>
On 6/14/22 22:37, Pavel Begunkov wrote:
> Currently we do two extra spin lock/unlock pairs to add a poll/apoll
> request to the cancellation hash table and remove it from there.
>
> On the submission side we often already hold ->uring_lock and tw
> completion is likely to hold it as well. Add a second cancellation hash
> table protected by ->uring_lock. In concerns for latency because of a
> need to have the mutex locked on the completion side, use the new table
> only in following cases:
>
> 1) IORING_SETUP_SINGLE_ISSUER: only one task grabs uring_lock, so there
> is no contention and so the main tw hander will always end up
> grabbing it before calling into callbacks.
This statement seems not true, the io-worker may grab the uring lock,
and that's why the [1] place I marked below is needed, right? Or do I
miss something?
>
> 2) IORING_SETUP_SQPOLL: same as with single issuer, only one task is
> using ->uring_lock.
same as above.
>
> 3) apoll: we normally grab the lock on the completion side anyway to
> execute the request, so it's free.
>
> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
> io_uring/io_uring.c | 9 +++-
> io_uring/io_uring_types.h | 4 ++
> io_uring/poll.c | 111 ++++++++++++++++++++++++++++++--------
> 3 files changed, 102 insertions(+), 22 deletions(-)
>
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 4bead16e57f7..1395176bc2ea 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -731,6 +731,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
> hash_bits = clamp(hash_bits, 1, 8);
> if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
> goto err;
> + if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
> + goto err;
>
> ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
> if (!ctx->dummy_ubuf)
> @@ -773,6 +775,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
> err:
> kfree(ctx->dummy_ubuf);
> kfree(ctx->cancel_table.hbs);
> + kfree(ctx->cancel_table_locked.hbs);
> kfree(ctx->io_bl);
> xa_destroy(&ctx->io_bl_xa);
> kfree(ctx);
> @@ -3056,6 +3059,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
> if (ctx->hash_map)
> io_wq_put_hash(ctx->hash_map);
> kfree(ctx->cancel_table.hbs);
> + kfree(ctx->cancel_table_locked.hbs);
> kfree(ctx->dummy_ubuf);
> kfree(ctx->io_bl);
> xa_destroy(&ctx->io_bl_xa);
> @@ -3217,12 +3221,13 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
> __io_cqring_overflow_flush(ctx, true);
> xa_for_each(&ctx->personalities, index, creds)
> io_unregister_personality(ctx, index);
> + if (ctx->rings)
> + io_poll_remove_all(ctx, NULL, true);
> mutex_unlock(&ctx->uring_lock);
>
> /* failed during ring init, it couldn't have issued any requests */
> if (ctx->rings) {
> io_kill_timeouts(ctx, NULL, true);
> - io_poll_remove_all(ctx, NULL, true);
> /* if we failed setting up the ctx, we might not have any rings */
> io_iopoll_try_reap_events(ctx);
> }
> @@ -3347,7 +3352,9 @@ static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
> }
>
> ret |= io_cancel_defer_files(ctx, task, cancel_all);
> + mutex_lock(&ctx->uring_lock);
> ret |= io_poll_remove_all(ctx, task, cancel_all);
> + mutex_unlock(&ctx->uring_lock);
> ret |= io_kill_timeouts(ctx, task, cancel_all);
> if (task)
> ret |= io_run_task_work();
> diff --git a/io_uring/io_uring_types.h b/io_uring/io_uring_types.h
> index ce2fbe6749bb..557b8e7719c9 100644
> --- a/io_uring/io_uring_types.h
> +++ b/io_uring/io_uring_types.h
> @@ -189,6 +189,7 @@ struct io_ring_ctx {
> struct xarray io_bl_xa;
> struct list_head io_buffers_cache;
>
> + struct io_hash_table cancel_table_locked;
> struct list_head cq_overflow_list;
> struct list_head apoll_cache;
> struct xarray personalities;
> @@ -323,6 +324,7 @@ enum {
> /* keep async read/write and isreg together and in order */
> REQ_F_SUPPORT_NOWAIT_BIT,
> REQ_F_ISREG_BIT,
> + REQ_F_HASH_LOCKED_BIT,
>
> /* not a real bit, just to check we're not overflowing the space */
> __REQ_F_LAST_BIT,
> @@ -388,6 +390,8 @@ enum {
> REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
> /* recvmsg special flag, clear EPOLLIN */
> REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
> + /* hashed into ->cancel_hash_locked */
> + REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
> };
>
> typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
> diff --git a/io_uring/poll.c b/io_uring/poll.c
> index 07157da1c2cb..d20484c1cbb7 100644
> --- a/io_uring/poll.c
> +++ b/io_uring/poll.c
> @@ -93,6 +93,26 @@ static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
> spin_unlock(lock);
> }
>
> +static void io_poll_req_insert_locked(struct io_kiocb *req)
> +{
> + struct io_hash_table *table = &req->ctx->cancel_table_locked;
> + u32 index = hash_long(req->cqe.user_data, table->hash_bits);
> +
> + hlist_add_head(&req->hash_node, &table->hbs[index].list);
> +}
> +
> +static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
> +{
> + struct io_ring_ctx *ctx = req->ctx;
> +
> + if (req->flags & REQ_F_HASH_LOCKED) {
> + io_tw_lock(ctx, locked);
[1]
> + hash_del(&req->hash_node);
> + } else {
> + io_poll_req_delete(req, ctx);
> + }
> +}
> +
next prev parent reply other threads:[~2022-06-15 12:53 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-14 14:36 [PATCH for-next v2 00/25] 5.20 cleanups and poll optimisations Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 01/25] io_uring: make reg buf init consistent Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 02/25] io_uring: move defer_list to slow data Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 03/25] io_uring: better caching for ctx timeout fields Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 04/25] io_uring: refactor ctx slow data placement Pavel Begunkov
2022-06-15 7:58 ` Hao Xu
2022-06-15 10:11 ` Pavel Begunkov
2022-06-15 10:59 ` Hao Xu
2022-06-14 14:36 ` [PATCH for-next v2 05/25] io_uring: move small helpers to headers Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 06/25] io_uring: explain io_wq_work::cancel_seq placement Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 07/25] io_uring: inline ->registered_rings Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 08/25] io_uring: don't set REQ_F_COMPLETE_INLINE in tw Pavel Begunkov
2022-06-14 14:36 ` [PATCH for-next v2 09/25] io_uring: never defer-complete multi-apoll Pavel Begunkov
2022-06-15 8:05 ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 10/25] io_uring: kill REQ_F_COMPLETE_INLINE Pavel Begunkov
2022-06-15 8:20 ` Hao Xu
2022-06-15 10:18 ` Pavel Begunkov
2022-06-15 10:19 ` Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 11/25] io_uring: refactor io_req_task_complete() Pavel Begunkov
2022-06-14 17:45 ` Hao Xu
2022-06-14 17:52 ` Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 12/25] io_uring: don't inline io_put_kbuf Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 13/25] io_uring: remove check_cq checking from hot paths Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 14/25] io_uring: poll: remove unnecessary req->ref set Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 15/25] io_uring: switch cancel_hash to use per entry spinlock Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 16/25] io_uring: pass poll_find lock back Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 17/25] io_uring: clean up io_try_cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 18/25] io_uring: limit number hash buckets Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 19/25] io_uring: clean up io_ring_ctx_alloc Pavel Begunkov
2022-06-15 8:46 ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 20/25] io_uring: use state completion infra for poll reqs Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 21/25] io_uring: add IORING_SETUP_SINGLE_ISSUER Pavel Begunkov
2022-06-15 9:34 ` Hao Xu
2022-06-15 10:20 ` Pavel Begunkov
2022-06-15 9:41 ` Hao Xu
2022-06-15 10:26 ` Pavel Begunkov
2022-06-15 11:08 ` Hao Xu
2022-06-14 14:37 ` [PATCH for-next v2 22/25] io_uring: pass hash table into poll_find Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 23/25] io_uring: introduce a struct for hash table Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 24/25] io_uring: propagate locking state to poll cancel Pavel Begunkov
2022-06-14 14:37 ` [PATCH for-next v2 25/25] io_uring: mutex locked poll hashing Pavel Begunkov
2022-06-15 12:53 ` Hao Xu [this message]
2022-06-15 13:55 ` Pavel Begunkov
2022-06-15 15:19 ` Hao Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox