From: Jonathan Lemon <[email protected]>
To: <[email protected]>
Subject: [RFC v1 7/9] page_pool: add page allocation and free hooks.
Date: Fri, 7 Oct 2022 14:17:11 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
In order to allow for user-allocated page backing, add hooks to the
page pool so pages can be obtained and released from a user-supplied
provider instead of the system page allocator.
skbs are marked with skb_mark_for_recycle() if they contain pages
belonging to a page pool, and page_put() will deliver the pages back
to the pool instead of freeing them to the system page allocator.
Signed-off-by: Jonathan Lemon <[email protected]>
---
include/net/page_pool.h | 6 ++++++
net/core/page_pool.c | 41 ++++++++++++++++++++++++++++++++++-------
2 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 813c93499f20..85c8423f9a7e 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -82,6 +82,12 @@ struct page_pool_params {
unsigned int offset; /* DMA addr offset */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
+ struct page *(*alloc_pages)(void *arg, int nid, gfp_t gfp,
+ unsigned int order);
+ unsigned long (*alloc_bulk)(void *arg, gfp_t gfp, int nid,
+ unsigned long nr_pages,
+ struct page **page_array);
+ void (*put_page)(void *arg, struct page *page);
};
#ifdef CONFIG_PAGE_POOL_STATS
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9b203d8660e4..21c6ee97bc7f 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -342,19 +342,47 @@ static void page_pool_clear_pp_info(struct page *page)
page->pp = NULL;
}
+/* hooks to either page provider or system page allocator */
+static void page_pool_mm_put_page(struct page_pool *pool, struct page *page)
+{
+ if (pool->p.put_page)
+ return pool->p.put_page(pool->p.init_arg, page);
+ put_page(page);
+}
+
+static unsigned long page_pool_mm_alloc_bulk(struct page_pool *pool,
+ gfp_t gfp,
+ unsigned long nr_pages)
+{
+ if (pool->p.alloc_bulk)
+ return pool->p.alloc_bulk(pool->p.init_arg, gfp,
+ pool->p.nid, nr_pages,
+ pool->alloc.cache);
+ return alloc_pages_bulk_array_node(gfp, pool->p.nid,
+ nr_pages, pool->alloc.cache);
+}
+
+static struct page *page_pool_mm_alloc(struct page_pool *pool, gfp_t gfp)
+{
+ if (pool->p.alloc_pages)
+ return pool->p.alloc_pages(pool->p.init_arg, pool->p.nid,
+ gfp, pool->p.order);
+ return alloc_pages_node(pool->p.nid, gfp, pool->p.order);
+}
+
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
struct page *page;
gfp |= __GFP_COMP;
- page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
+ page = page_pool_mm_alloc(pool, gfp);
if (unlikely(!page))
return NULL;
if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
- put_page(page);
+ page_pool_mm_put_page(pool, page);
return NULL;
}
@@ -389,8 +417,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
- pool->alloc.cache);
+ nr_pages = page_pool_mm_alloc_bulk(pool, gfp, bulk);
if (unlikely(!nr_pages))
return NULL;
@@ -401,7 +428,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
page = pool->alloc.cache[i];
if ((pp_flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
- put_page(page);
+ page_pool_mm_put_page(pool, page);
continue;
}
@@ -501,7 +528,7 @@ static void page_pool_return_page(struct page_pool *pool, struct page *page)
{
page_pool_release_page(pool, page);
- put_page(page);
+ page_pool_mm_put_page(pool, page);
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
@@ -593,7 +620,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
recycle_stat_inc(pool, released_refcnt);
/* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page);
- put_page(page);
+ page_pool_mm_put_page(pool, page);
return NULL;
}
--
2.30.2
next prev parent reply other threads:[~2022-10-07 21:17 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-07 21:17 [RFC v1 0/9] zero-copy RX for io_uring Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 1/9] io_uring: add zctap ifq definition Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 2/9] netdevice: add SETUP_ZCTAP to the netdev_bpf structure Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 3/9] io_uring: add register ifq opcode Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 4/9] io_uring: add provide_ifq_region opcode Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 5/9] io_uring: Add io_uring zctap iov structure and helpers Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 6/9] io_uring: introduce reference tracking for user pages Jonathan Lemon
2022-10-07 21:17 ` Jonathan Lemon [this message]
2022-10-07 21:17 ` [RFC v1 8/9] io_uring: provide functions for the page_pool Jonathan Lemon
2022-10-07 21:17 ` [RFC v1 9/9] io_uring: add OP_RECV_ZC command Jonathan Lemon
2022-10-10 7:37 ` [RFC v1 0/9] zero-copy RX for io_uring dust.li
2022-10-10 19:34 ` Jonathan Lemon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox