public inbox for [email protected]
 help / color / mirror / Atom feed
From: David Wei <[email protected]>
To: [email protected], [email protected]
Cc: Jens Axboe <[email protected]>,
	Pavel Begunkov <[email protected]>,
	Jakub Kicinski <[email protected]>, Paolo Abeni <[email protected]>,
	"David S. Miller" <[email protected]>,
	Eric Dumazet <[email protected]>,
	Jesper Dangaard Brouer <[email protected]>,
	David Ahern <[email protected]>,
	Mina Almasry <[email protected]>,
	Stanislav Fomichev <[email protected]>,
	Joe Damato <[email protected]>,
	Pedro Tammela <[email protected]>
Subject: [PATCH net-next v10 06/22] net: page_pool: create hooks for custom memory providers
Date: Wed,  8 Jan 2025 14:06:27 -0800	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>

From: Jakub Kicinski <[email protected]>

A spin off from the original page pool memory providers patch by Jakub,
which allows extending page pools with custom allocators. One of such
providers is devmem TCP, and the other is io_uring zerocopy added in
following patches.

Suggested-by: Jakub Kicinski <[email protected]>
Signed-off-by: Pavel Begunkov <[email protected]>
Signed-off-by: David Wei <[email protected]>
---
 include/net/page_pool/memory_provider.h | 20 ++++++++++++++++++++
 include/net/page_pool/types.h           |  4 ++++
 net/core/devmem.c                       | 15 ++++++++++++++-
 net/core/page_pool.c                    | 23 +++++++++++++++--------
 4 files changed, 53 insertions(+), 9 deletions(-)
 create mode 100644 include/net/page_pool/memory_provider.h

diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
new file mode 100644
index 000000000000..79412a8714fa
--- /dev/null
+++ b/include/net/page_pool/memory_provider.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool/memory_provider.h
+ *	Author:	Pavel Begunkov <[email protected]>
+ *	Author:	David Wei <[email protected]>
+ */
+#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
+#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
+
+#include <net/netmem.h>
+#include <net/page_pool/types.h>
+
+struct memory_provider_ops {
+	netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
+	bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
+	int (*init)(struct page_pool *pool);
+	void (*destroy)(struct page_pool *pool);
+};
+
+#endif
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index ed4cd114180a..88f65c3e2ad9 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -152,8 +152,11 @@ struct page_pool_stats {
  */
 #define PAGE_POOL_FRAG_GROUP_ALIGN	(4 * sizeof(long))
 
+struct memory_provider_ops;
+
 struct pp_memory_provider_params {
 	void *mp_priv;
+	const struct memory_provider_ops *mp_ops;
 };
 
 struct page_pool {
@@ -216,6 +219,7 @@ struct page_pool {
 	struct ptr_ring ring;
 
 	void *mp_priv;
+	const struct memory_provider_ops *mp_ops;
 
 #ifdef CONFIG_PAGE_POOL_STATS
 	/* recycle stats are per-cpu to avoid locking */
diff --git a/net/core/devmem.c b/net/core/devmem.c
index c250db6993d3..48833c1dcbd4 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -15,6 +15,7 @@
 #include <net/netdev_queues.h>
 #include <net/netdev_rx_queue.h>
 #include <net/page_pool/helpers.h>
+#include <net/page_pool/memory_provider.h>
 #include <trace/events/page_pool.h>
 
 #include "devmem.h"
@@ -26,6 +27,8 @@
 /* Protected by rtnl_lock() */
 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
 
+static const struct memory_provider_ops dmabuf_devmem_ops;
+
 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
 					       struct gen_pool_chunk *chunk,
 					       void *not_used)
@@ -117,6 +120,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
 		WARN_ON(rxq->mp_params.mp_priv != binding);
 
 		rxq->mp_params.mp_priv = NULL;
+		rxq->mp_params.mp_ops = NULL;
 
 		rxq_idx = get_netdev_rx_queue_index(rxq);
 
@@ -142,7 +146,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
 	}
 
 	rxq = __netif_get_rx_queue(dev, rxq_idx);
-	if (rxq->mp_params.mp_priv) {
+	if (rxq->mp_params.mp_ops) {
 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
 		return -EEXIST;
 	}
@@ -160,6 +164,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
 		return err;
 
 	rxq->mp_params.mp_priv = binding;
+	rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
 
 	err = netdev_rx_queue_restart(dev, rxq_idx);
 	if (err)
@@ -169,6 +174,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
 
 err_xa_erase:
 	rxq->mp_params.mp_priv = NULL;
+	rxq->mp_params.mp_ops = NULL;
 	xa_erase(&binding->bound_rxqs, xa_idx);
 
 	return err;
@@ -388,3 +394,10 @@ bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
 	/* We don't want the page pool put_page()ing our net_iovs. */
 	return false;
 }
+
+static const struct memory_provider_ops dmabuf_devmem_ops = {
+	.init			= mp_dmabuf_devmem_init,
+	.destroy		= mp_dmabuf_devmem_destroy,
+	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
+	.release_netmem		= mp_dmabuf_devmem_release_page,
+};
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index fc3a04823087..0c5da8c056ec 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -13,6 +13,7 @@
 
 #include <net/netdev_rx_queue.h>
 #include <net/page_pool/helpers.h>
+#include <net/page_pool/memory_provider.h>
 #include <net/xdp.h>
 
 #include <linux/dma-direction.h>
@@ -285,13 +286,19 @@ static int page_pool_init(struct page_pool *pool,
 		rxq = __netif_get_rx_queue(pool->slow.netdev,
 					   pool->slow.queue_idx);
 		pool->mp_priv = rxq->mp_params.mp_priv;
+		pool->mp_ops = rxq->mp_params.mp_ops;
 	}
 
-	if (pool->mp_priv) {
+	if (pool->mp_ops) {
 		if (!pool->dma_map || !pool->dma_sync)
 			return -EOPNOTSUPP;
 
-		err = mp_dmabuf_devmem_init(pool);
+		if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
+			err = -EFAULT;
+			goto free_ptr_ring;
+		}
+
+		err = pool->mp_ops->init(pool);
 		if (err) {
 			pr_warn("%s() mem-provider init failed %d\n", __func__,
 				err);
@@ -588,8 +595,8 @@ netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp)
 		return netmem;
 
 	/* Slow-path: cache empty, do real allocation */
-	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
-		netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
+	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+		netmem = pool->mp_ops->alloc_netmems(pool, gfp);
 	else
 		netmem = __page_pool_alloc_pages_slow(pool, gfp);
 	return netmem;
@@ -696,8 +703,8 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
 	bool put;
 
 	put = true;
-	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
-		put = mp_dmabuf_devmem_release_page(pool, netmem);
+	if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops)
+		put = pool->mp_ops->release_netmem(pool, netmem);
 	else
 		__page_pool_release_page_dma(pool, netmem);
 
@@ -1065,8 +1072,8 @@ static void __page_pool_destroy(struct page_pool *pool)
 	page_pool_unlist(pool);
 	page_pool_uninit(pool);
 
-	if (pool->mp_priv) {
-		mp_dmabuf_devmem_destroy(pool);
+	if (pool->mp_ops) {
+		pool->mp_ops->destroy(pool);
 		static_branch_dec(&page_pool_mem_providers);
 	}
 
-- 
2.43.5


  parent reply	other threads:[~2025-01-08 22:07 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-08 22:06 [PATCH net-next v10 00/22] io_uring zero copy rx David Wei
2025-01-08 22:06 ` [PATCH net-next v10 01/22] net: make page_pool_ref_netmem work with net iovs David Wei
2025-01-16  0:30   ` Jakub Kicinski
2025-01-16  2:12     ` Pavel Begunkov
2025-01-16  2:48       ` Jakub Kicinski
2025-01-16 16:45         ` Pavel Begunkov
2025-01-08 22:06 ` [PATCH net-next v10 02/22] net: page_pool: don't cast mp param to devmem David Wei
2025-01-08 22:06 ` [PATCH net-next v10 03/22] net: prefix devmem specific helpers David Wei
2025-01-08 22:06 ` [PATCH net-next v10 04/22] net: generalise net_iov chunk owners David Wei
2025-01-16  0:31   ` Jakub Kicinski
2025-01-08 22:06 ` [PATCH net-next v10 05/22] net: page pool: export page_pool_set_dma_addr_netmem() David Wei
2025-01-16  0:35   ` Jakub Kicinski
2025-01-16  0:39     ` Jakub Kicinski
2025-01-16  2:12       ` Pavel Begunkov
2025-01-08 22:06 ` David Wei [this message]
2025-01-16  0:44   ` [PATCH net-next v10 06/22] net: page_pool: create hooks for custom memory providers Jakub Kicinski
2025-01-16  2:25     ` Pavel Begunkov
2025-01-08 22:06 ` [PATCH net-next v10 07/22] netdev: add io_uring memory provider info David Wei
2025-01-16  0:45   ` Jakub Kicinski
2025-01-08 22:06 ` [PATCH net-next v10 08/22] net: page_pool: add callback for mp info printing David Wei
2025-01-16  0:46   ` Jakub Kicinski
2025-01-08 22:06 ` [PATCH net-next v10 09/22] net: page_pool: add a mp hook to unregister_netdevice* David Wei
2025-01-08 22:06 ` [PATCH net-next v10 10/22] net: prepare for non devmem TCP memory providers David Wei
2025-01-08 22:06 ` [PATCH net-next v10 11/22] net: page_pool: add memory provider helpers David Wei
2025-01-16  0:49   ` Jakub Kicinski
2025-01-08 22:06 ` [PATCH net-next v10 12/22] io_uring/zcrx: add interface queue and refill queue David Wei
2025-01-08 22:06 ` [PATCH net-next v10 13/22] io_uring/zcrx: add io_zcrx_area David Wei
2025-01-08 22:06 ` [PATCH net-next v10 14/22] io_uring/zcrx: grab a net device David Wei
2025-01-16  1:06   ` Jakub Kicinski
2025-01-16  2:33     ` Pavel Begunkov
2025-01-16  3:12       ` Jakub Kicinski
2025-01-16 16:46         ` Pavel Begunkov
2025-01-08 22:06 ` [PATCH net-next v10 15/22] io_uring/zcrx: implement zerocopy receive pp memory provider David Wei
2025-01-13 22:32   ` Jens Axboe
2025-01-08 22:06 ` [PATCH net-next v10 16/22] io_uring/zcrx: dma-map area for the device David Wei
2025-01-08 22:06 ` [PATCH net-next v10 17/22] io_uring/zcrx: add io_recvzc request David Wei
2025-01-08 22:06 ` [PATCH net-next v10 18/22] io_uring/zcrx: set pp memory provider for an rx queue David Wei
2025-01-16  1:12   ` Jakub Kicinski
2025-01-16  2:27     ` Pavel Begunkov
2025-01-08 22:06 ` [PATCH net-next v10 19/22] io_uring/zcrx: throttle receive requests David Wei
2025-01-08 22:06 ` [PATCH net-next v10 20/22] io_uring/zcrx: add copy fallback David Wei
2025-01-08 22:06 ` [PATCH net-next v10 21/22] net: add documentation for io_uring zcrx David Wei
2025-01-08 22:06 ` [PATCH net-next v10 22/22] io_uring/zcrx: add selftest David Wei
2025-01-09 17:33   ` Stanislav Fomichev
2025-01-09 17:50     ` David Wei
2025-01-13 21:32       ` Pavel Begunkov
2025-01-14  0:11         ` Stanislav Fomichev
2025-01-16  0:53   ` Jakub Kicinski
2025-01-16 22:58     ` David Wei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox