public inbox for [email protected]
 help / color / mirror / Atom feed
From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], Jens Axboe <[email protected]>
Subject: [PATCH 1/5] bio: add allocation cache abstraction
Date: Tue, 10 Aug 2021 10:37:24 -0600	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

Add a set of helpers that can encapsulate bio allocations, reusing them
as needed. Caller must provide the necessary locking, if any is needed.
The primary intended use case is polled IO from io_uring, which will not
need any external locking.

Very simple - keeps a count of bio's in the cache, and maintains a max
of 512 with a slack of 64. If we get above max + slack, we drop slack
number of bio's.

The cache is intended to be per-task, and the user will need to supply
the storage for it. As io_uring will be the only user right now, provide
a hook that returns the cache there. Stub it out as NULL initially.

Signed-off-by: Jens Axboe <[email protected]>
---
 block/bio.c         | 123 ++++++++++++++++++++++++++++++++++++++++----
 include/linux/bio.h |  24 +++++++--
 2 files changed, 131 insertions(+), 16 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 1fab762e079b..e3680702aeae 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -238,6 +238,35 @@ static void bio_free(struct bio *bio)
 	}
 }
 
+static inline void __bio_init(struct bio *bio)
+{
+	bio->bi_next = NULL;
+	bio->bi_bdev = NULL;
+	bio->bi_opf = 0;
+	bio->bi_flags = bio->bi_ioprio = bio->bi_write_hint = 0;
+	bio->bi_status = 0;
+	bio->bi_iter.bi_sector = 0;
+	bio->bi_iter.bi_size = 0;
+	bio->bi_iter.bi_idx = 0;
+	bio->bi_iter.bi_bvec_done = 0;
+	bio->bi_end_io = NULL;
+	bio->bi_private = NULL;
+#ifdef CONFIG_BLK_CGROUP
+	bio->bi_blkg = NULL;
+	bio->bi_issue.value = 0;
+#ifdef CONFIG_BLK_CGROUP_IOCOST
+	bio->bi_iocost_cost = 0;
+#endif
+#endif
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+	bio->bi_crypt_context = NULL;
+#endif
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+	bio->bi_integrity = NULL;
+#endif
+	bio->bi_vcnt = 0;
+}
+
 /*
  * Users of this function have their own bio allocation. Subsequently,
  * they must remember to pair any call to bio_init() with bio_uninit()
@@ -246,7 +275,7 @@ static void bio_free(struct bio *bio)
 void bio_init(struct bio *bio, struct bio_vec *table,
 	      unsigned short max_vecs)
 {
-	memset(bio, 0, sizeof(*bio));
+	__bio_init(bio);
 	atomic_set(&bio->__bi_remaining, 1);
 	atomic_set(&bio->__bi_cnt, 1);
 
@@ -591,6 +620,19 @@ void guard_bio_eod(struct bio *bio)
 	bio_truncate(bio, maxsector << 9);
 }
 
+static bool __bio_put(struct bio *bio)
+{
+	if (!bio_flagged(bio, BIO_REFFED))
+		return true;
+
+	BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
+
+	/*
+	 * last put frees it
+	 */
+	return atomic_dec_and_test(&bio->__bi_cnt);
+}
+
 /**
  * bio_put - release a reference to a bio
  * @bio:   bio to release reference to
@@ -601,17 +643,8 @@ void guard_bio_eod(struct bio *bio)
  **/
 void bio_put(struct bio *bio)
 {
-	if (!bio_flagged(bio, BIO_REFFED))
+	if (__bio_put(bio))
 		bio_free(bio);
-	else {
-		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
-
-		/*
-		 * last put frees it
-		 */
-		if (atomic_dec_and_test(&bio->__bi_cnt))
-			bio_free(bio);
-	}
 }
 EXPORT_SYMBOL(bio_put);
 
@@ -1595,6 +1628,74 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
 }
 EXPORT_SYMBOL(bioset_init_from_src);
 
+void bio_alloc_cache_init(struct bio_alloc_cache *cache)
+{
+	bio_list_init(&cache->free_list);
+	cache->nr = 0;
+}
+
+static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
+				  unsigned int nr)
+{
+	struct bio *bio;
+	unsigned int i;
+
+	i = 0;
+	while ((bio = bio_list_pop(&cache->free_list)) != NULL) {
+		cache->nr--;
+		bio_free(bio);
+		if (++i == nr)
+			break;
+	}
+}
+
+void bio_alloc_cache_destroy(struct bio_alloc_cache *cache)
+{
+	bio_alloc_cache_prune(cache, -1U);
+}
+
+struct bio *bio_cache_get(struct bio_alloc_cache *cache, gfp_t gfp,
+			  unsigned short nr_vecs, struct bio_set *bs)
+{
+	struct bio *bio;
+
+	if (nr_vecs > BIO_INLINE_VECS)
+		return NULL;
+	if (bio_list_empty(&cache->free_list)) {
+alloc:
+		if (bs)
+			return bio_alloc_bioset(gfp, nr_vecs, bs);
+		else
+			return bio_alloc(gfp, nr_vecs);
+	}
+
+	bio = bio_list_peek(&cache->free_list);
+	if (bs && bio->bi_pool != bs)
+		goto alloc;
+	bio_list_del_head(&cache->free_list, bio);
+	cache->nr--;
+	bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
+	return bio;
+}
+
+#define ALLOC_CACHE_MAX		512
+#define ALLOC_CACHE_SLACK	 64
+
+void bio_cache_put(struct bio_alloc_cache *cache, struct bio *bio)
+{
+	if (unlikely(!__bio_put(bio)))
+		return;
+	if (cache) {
+		bio_uninit(bio);
+		bio_list_add_head(&cache->free_list, bio);
+		cache->nr++;
+		if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+			bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
+	} else {
+		bio_free(bio);
+	}
+}
+
 static int __init init_bio(void)
 {
 	int i;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 2203b686e1f0..c351aa88d137 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -652,18 +652,22 @@ static inline struct bio *bio_list_peek(struct bio_list *bl)
 	return bl->head;
 }
 
-static inline struct bio *bio_list_pop(struct bio_list *bl)
+static inline void bio_list_del_head(struct bio_list *bl, struct bio *head)
 {
-	struct bio *bio = bl->head;
-
-	if (bio) {
+	if (head) {
 		bl->head = bl->head->bi_next;
 		if (!bl->head)
 			bl->tail = NULL;
 
-		bio->bi_next = NULL;
+		head->bi_next = NULL;
 	}
+}
 
+static inline struct bio *bio_list_pop(struct bio_list *bl)
+{
+	struct bio *bio = bl->head;
+
+	bio_list_del_head(bl, bio);
 	return bio;
 }
 
@@ -676,6 +680,16 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
 	return bio;
 }
 
+struct bio_alloc_cache {
+	struct bio_list		free_list;
+	unsigned int		nr;
+};
+
+void bio_alloc_cache_init(struct bio_alloc_cache *);
+void bio_alloc_cache_destroy(struct bio_alloc_cache *);
+struct bio *bio_cache_get(struct bio_alloc_cache *, gfp_t, unsigned short, struct bio_set *bs);
+void bio_cache_put(struct bio_alloc_cache *, struct bio *);
+
 /*
  * Increment chain count for the bio. Make sure the CHAIN flag update
  * is visible before the raised count.
-- 
2.32.0


  reply	other threads:[~2021-08-10 16:37 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-10 16:37 [PATCHSET v3 0/5] Enable bio recycling for polled IO Jens Axboe
2021-08-10 16:37 ` Jens Axboe [this message]
2021-08-11  8:34   ` [PATCH 1/5] bio: add allocation cache abstraction Christoph Hellwig
2021-08-10 16:37 ` [PATCH 2/5] io_uring: use kiocb->private to hold rw_len Jens Axboe
2021-08-11 11:40   ` Christoph Hellwig
2021-08-10 16:37 ` [PATCH 3/5] fs: add ki_bio_cache pointer to struct kiocb Jens Axboe
2021-08-10 16:37 ` [PATCH 4/5] io_uring: wire up bio allocation cache Jens Axboe
2021-08-10 16:37 ` [PATCH 5/5] block: enable use of " Jens Axboe
2021-08-10 16:44 ` [PATCHSET v3 0/5] Enable bio recycling for polled IO Jens Axboe
2021-08-11  8:26 ` Christoph Hellwig
2021-08-11 11:13   ` Christoph Hellwig
2021-08-11 15:05     ` Jens Axboe
2021-08-11 15:08       ` Christoph Hellwig
2021-08-11 15:06     ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox