public inbox for [email protected]
 help / color / mirror / Atom feed
From: Gabriel Krisman Bertazi <[email protected]>
To: Pavel Begunkov <[email protected]>
Cc: [email protected], Jens Axboe <[email protected]>,
	[email protected]
Subject: Re: [PATCH 10/11] io_uring/rsrc: cache struct io_rsrc_node
Date: Fri, 31 Mar 2023 11:09:32 -0300	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <7f5eb1b89e8dcf93739607c79bbf7aec1784cbbe.1680187408.git.asml.silence@gmail.com> (Pavel Begunkov's message of "Thu, 30 Mar 2023 15:53:28 +0100")

Pavel Begunkov <[email protected]> writes:

> Add allocation cache for struct io_rsrc_node, it's always allocated and
> put under ->uring_lock, so it doesn't need any extra synchronisation
> around caches.

Hi Pavel,

I'm curious if you considered using kmem_cache instead of the custom
cache for this case?  I'm wondering if this provokes visible difference in
performance in your benchmark.

> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
>  include/linux/io_uring_types.h |  1 +
>  io_uring/io_uring.c            | 11 +++++++++--
>  io_uring/rsrc.c                | 23 +++++++++++++++--------
>  io_uring/rsrc.h                |  5 ++++-
>  4 files changed, 29 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
> index 47496059e13a..5d772e36e7fc 100644
> --- a/include/linux/io_uring_types.h
> +++ b/include/linux/io_uring_types.h
> @@ -332,6 +332,7 @@ struct io_ring_ctx {
>  
>  	/* protected by ->uring_lock */
>  	struct list_head		rsrc_ref_list;
> +	struct io_alloc_cache		rsrc_node_cache;
>  
>  	struct list_head		io_buffers_pages;
>  
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 8c3886a4ca96..beedaf403284 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -310,6 +310,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
>  	INIT_LIST_HEAD(&ctx->sqd_list);
>  	INIT_LIST_HEAD(&ctx->cq_overflow_list);
>  	INIT_LIST_HEAD(&ctx->io_buffers_cache);
> +	io_alloc_cache_init(&ctx->rsrc_node_cache, sizeof(struct io_rsrc_node));
>  	io_alloc_cache_init(&ctx->apoll_cache, sizeof(struct async_poll));
>  	io_alloc_cache_init(&ctx->netmsg_cache, sizeof(struct io_async_msghdr));
>  	init_completion(&ctx->ref_comp);
> @@ -2791,6 +2792,11 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
>  	mutex_unlock(&ctx->uring_lock);
>  }
>  
> +void io_rsrc_node_cache_free(struct io_cache_entry *entry)
> +{
> +	kfree(container_of(entry, struct io_rsrc_node, cache));
> +}
> +
>  static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
>  {
>  	io_sq_thread_finish(ctx);
> @@ -2816,9 +2822,9 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
>  
>  	/* there are no registered resources left, nobody uses it */
>  	if (ctx->rsrc_node)
> -		io_rsrc_node_destroy(ctx->rsrc_node);
> +		io_rsrc_node_destroy(ctx, ctx->rsrc_node);
>  	if (ctx->rsrc_backup_node)
> -		io_rsrc_node_destroy(ctx->rsrc_backup_node);
> +		io_rsrc_node_destroy(ctx, ctx->rsrc_backup_node);
>  
>  	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
>  
> @@ -2830,6 +2836,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
>  #endif
>  	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
>  
> +	io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
>  	if (ctx->mm_account) {
>  		mmdrop(ctx->mm_account);
>  		ctx->mm_account = NULL;
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 0f4e245dee1b..345631091d80 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -164,7 +164,7 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
>  		kfree(prsrc);
>  	}
>  
> -	io_rsrc_node_destroy(ref_node);
> +	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
>  	if (atomic_dec_and_test(&rsrc_data->refs))
>  		complete(&rsrc_data->done);
>  }
> @@ -175,9 +175,10 @@ void io_wait_rsrc_data(struct io_rsrc_data *data)
>  		wait_for_completion(&data->done);
>  }
>  
> -void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
> +void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
>  {
> -	kfree(ref_node);
> +	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
> +		kfree(node);
>  }
>  
>  void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
> @@ -198,13 +199,19 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
>  	}
>  }
>  
> -static struct io_rsrc_node *io_rsrc_node_alloc(void)
> +static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
>  {
>  	struct io_rsrc_node *ref_node;
> +	struct io_cache_entry *entry;
>  
> -	ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
> -	if (!ref_node)
> -		return NULL;
> +	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
> +	if (entry) {
> +		ref_node = container_of(entry, struct io_rsrc_node, cache);
> +	} else {
> +		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
> +		if (!ref_node)
> +			return NULL;
> +	}
>  
>  	ref_node->refs = 1;
>  	INIT_LIST_HEAD(&ref_node->node);
> @@ -243,7 +250,7 @@ int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
>  {
>  	if (ctx->rsrc_backup_node)
>  		return 0;
> -	ctx->rsrc_backup_node = io_rsrc_node_alloc();
> +	ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
>  	return ctx->rsrc_backup_node ? 0 : -ENOMEM;
>  }
>  
> diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
> index 17293ab90f64..d1555eaae81a 100644
> --- a/io_uring/rsrc.h
> +++ b/io_uring/rsrc.h
> @@ -4,6 +4,8 @@
>  
>  #include <net/af_unix.h>
>  
> +#include "alloc_cache.h"
> +
>  #define IO_RSRC_TAG_TABLE_SHIFT	(PAGE_SHIFT - 3)
>  #define IO_RSRC_TAG_TABLE_MAX	(1U << IO_RSRC_TAG_TABLE_SHIFT)
>  #define IO_RSRC_TAG_TABLE_MASK	(IO_RSRC_TAG_TABLE_MAX - 1)
> @@ -37,6 +39,7 @@ struct io_rsrc_data {
>  };
>  
>  struct io_rsrc_node {
> +	struct io_cache_entry		cache;
>  	int refs;
>  	struct list_head		node;
>  	struct io_rsrc_data		*rsrc_data;
> @@ -65,7 +68,7 @@ void io_rsrc_put_tw(struct callback_head *cb);
>  void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
>  void io_rsrc_put_work(struct work_struct *work);
>  void io_wait_rsrc_data(struct io_rsrc_data *data);
> -void io_rsrc_node_destroy(struct io_rsrc_node *ref_node);
> +void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
>  int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
>  int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
>  			  struct io_rsrc_node *node, void *rsrc);

-- 
Gabriel Krisman Bertazi

  reply	other threads:[~2023-03-31 14:09 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-30 14:53 [RFC 00/11] optimise registered buffer/file updates Pavel Begunkov
2023-03-30 14:53 ` [PATCH 01/11] io_uring/rsrc: use non-pcpu refcounts for nodes Pavel Begunkov
2023-03-30 14:53 ` [PATCH 02/11] io_uring/rsrc: keep cached refs per node Pavel Begunkov
2023-03-30 14:53 ` [PATCH 03/11] io_uring: don't put nodes under spinlocks Pavel Begunkov
2023-03-30 14:53 ` [PATCH 04/11] io_uring: io_free_req() via tw Pavel Begunkov
2023-03-30 14:53 ` [PATCH 05/11] io_uring/rsrc: protect node refs with uring_lock Pavel Begunkov
2023-03-30 14:53 ` [PATCH 06/11] io_uring/rsrc: kill rsrc_ref_lock Pavel Begunkov
2023-03-30 14:53 ` [PATCH 07/11] io_uring/rsrc: rename rsrc_list Pavel Begunkov
2023-03-30 14:53 ` [PATCH 08/11] io_uring/rsrc: optimise io_rsrc_put allocation Pavel Begunkov
2023-03-30 14:53 ` [PATCH 09/11] io_uring/rsrc: don't offload node free Pavel Begunkov
2023-03-30 14:53 ` [PATCH 10/11] io_uring/rsrc: cache struct io_rsrc_node Pavel Begunkov
2023-03-31 14:09   ` Gabriel Krisman Bertazi [this message]
2023-03-31 16:27     ` Pavel Begunkov
2023-04-01  0:04       ` Gabriel Krisman Bertazi
2023-04-04 13:21         ` Pavel Begunkov
2023-04-04 15:48           ` Gabriel Krisman Bertazi
2023-04-04 15:52             ` Jens Axboe
2023-04-04 16:53               ` Gabriel Krisman Bertazi
2023-04-04 18:26                 ` Pavel Begunkov
2023-03-30 14:53 ` [PATCH 11/11] io_uring/rsrc: add lockdep sanity checks Pavel Begunkov
2023-03-31 13:35 ` [RFC 00/11] optimise registered buffer/file updates Gabriel Krisman Bertazi
2023-03-31 16:21   ` Pavel Begunkov
2023-03-31 15:18 ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox