From: Pavel Begunkov <asml.silence@gmail.com>
To: Jakub Kicinski <kuba@kernel.org>, netdev@vger.kernel.org
Cc: asml.silence@gmail.com, io-uring@vger.kernel.org,
Eric Dumazet <edumazet@google.com>,
Willem de Bruijn <willemb@google.com>,
Paolo Abeni <pabeni@redhat.com>,
andrew+netdev@lunn.ch, horms@kernel.org, davem@davemloft.net,
sdf@fomichev.me, almasrymina@google.com, dw@davidwei.uk,
michael.chan@broadcom.com, dtatulea@nvidia.com,
ap420073@gmail.com
Subject: [RFC v1 21/22] net: parametrise mp open with a queue config
Date: Mon, 28 Jul 2025 12:04:25 +0100 [thread overview]
Message-ID: <ca874424e226417fa174ac015ee62cc0e3092400.1753694914.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1753694913.git.asml.silence@gmail.com>
This patch allows memory providers to pass a queue config when opening a
queue. It'll be used in the next patch to pass a custom rx buffer length
from zcrx. As there are many users of netdev_rx_queue_restart(), it's
allowed to pass a NULL qcfg, in which case the function will use the
default configuration.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
include/net/page_pool/memory_provider.h | 4 +-
io_uring/zcrx.c | 2 +-
net/core/netdev_rx_queue.c | 50 +++++++++++++++++--------
3 files changed, 39 insertions(+), 17 deletions(-)
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
index ada4f968960a..c08ba208f67d 100644
--- a/include/net/page_pool/memory_provider.h
+++ b/include/net/page_pool/memory_provider.h
@@ -5,6 +5,7 @@
#include <net/netmem.h>
#include <net/page_pool/types.h>
+struct netdev_queue_config;
struct netdev_rx_queue;
struct netlink_ext_ack;
struct sk_buff;
@@ -24,7 +25,8 @@ void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
void net_mp_niov_clear_page_pool(struct net_iov *niov);
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *p);
+ struct pp_memory_provider_params *p,
+ struct netdev_queue_config *qcfg);
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
const struct pp_memory_provider_params *p,
struct netlink_ext_ack *extack);
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 985c7386e24b..a00243e10164 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -595,7 +595,7 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
mp_param.mp_ops = &io_uring_pp_zc_ops;
mp_param.mp_priv = ifq;
- ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param);
+ ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
if (ret)
goto err;
ifq->if_rxq = reg.if_rxq;
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 7c691eb1a48b..0dbfdb5f5b91 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -10,12 +10,14 @@
#include "dev.h"
#include "page_pool_priv.h"
-int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
- struct netlink_ext_ack *extack)
+static int netdev_rx_queue_restart_cfg(struct net_device *dev,
+ unsigned int rxq_idx,
+ struct netlink_ext_ack *extack,
+ struct netdev_queue_config *qcfg)
{
struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops;
- struct netdev_queue_config qcfg;
+ struct netdev_queue_config tmp_qcfg;
void *new_mem, *old_mem;
int err;
@@ -35,15 +37,18 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
goto err_free_new_mem;
}
- netdev_queue_config(dev, rxq_idx, &qcfg);
+ if (!qcfg) {
+ qcfg = &tmp_qcfg;
+ netdev_queue_config(dev, rxq_idx, qcfg);
+ }
if (qops->ndo_queue_cfg_validate) {
- err = qops->ndo_queue_cfg_validate(dev, rxq_idx, &qcfg, extack);
+ err = qops->ndo_queue_cfg_validate(dev, rxq_idx, qcfg, extack);
if (err)
goto err_free_old_mem;
}
- err = qops->ndo_queue_mem_alloc(dev, &qcfg, new_mem, rxq_idx);
+ err = qops->ndo_queue_mem_alloc(dev, qcfg, new_mem, rxq_idx);
if (err)
goto err_free_old_mem;
@@ -56,7 +61,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
if (err)
goto err_free_new_queue_mem;
- err = qops->ndo_queue_start(dev, &qcfg, new_mem, rxq_idx);
+ err = qops->ndo_queue_start(dev, qcfg, new_mem, rxq_idx);
if (err)
goto err_start_queue;
} else {
@@ -71,7 +76,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
return 0;
err_start_queue:
- __netdev_queue_config(dev, rxq_idx, &qcfg, false);
+ __netdev_queue_config(dev, rxq_idx, qcfg, false);
/* Restarting the queue with old_mem should be successful as we haven't
* changed any of the queue configuration, and there is not much we can
* do to recover from a failure here.
@@ -79,7 +84,7 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
* WARN if we fail to recover the old rx queue, and at least free
* old_mem so we don't also leak that.
*/
- if (qops->ndo_queue_start(dev, &qcfg, old_mem, rxq_idx)) {
+ if (qops->ndo_queue_start(dev, qcfg, old_mem, rxq_idx)) {
WARN(1,
"Failed to restart old queue in error path. RX queue %d may be unhealthy.",
rxq_idx);
@@ -97,11 +102,18 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
return err;
}
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx,
+ struct netlink_ext_ack *extack)
+{
+ return netdev_rx_queue_restart_cfg(dev, rxq_idx, extack, NULL);
+}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
-int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
- const struct pp_memory_provider_params *p,
- struct netlink_ext_ack *extack)
+static int __net_mp_open_rxq_cfg(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack,
+ struct netdev_queue_config *qcfg)
{
struct netdev_rx_queue *rxq;
int ret;
@@ -143,7 +155,7 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
#endif
rxq->mp_params = *p;
- ret = netdev_rx_queue_restart(dev, rxq_idx, extack);
+ ret = netdev_rx_queue_restart_cfg(dev, rxq_idx, extack, qcfg);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
@@ -151,13 +163,21 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
return ret;
}
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack)
+{
+ return __net_mp_open_rxq_cfg(dev, rxq_idx, p, extack, NULL);
+}
+
int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
- struct pp_memory_provider_params *p)
+ struct pp_memory_provider_params *p,
+ struct netdev_queue_config *qcfg)
{
int ret;
netdev_lock(dev);
- ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
+ ret = __net_mp_open_rxq_cfg(dev, rxq_idx, p, NULL, qcfg);
netdev_unlock(dev);
return ret;
}
--
2.49.0
next prev parent reply other threads:[~2025-07-28 11:03 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-28 11:04 [RFC v1 00/22] Large rx buffer support for zcrx Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 01/22] docs: ethtool: document that rx_buf_len must control payload lengths Pavel Begunkov
2025-07-28 18:11 ` Mina Almasry
2025-07-28 21:36 ` Mina Almasry
2025-08-01 23:13 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 02/22] net: ethtool: report max value for rx-buf-len Pavel Begunkov
2025-07-29 5:00 ` Subbaraya Sundeep
2025-07-28 11:04 ` [RFC v1 03/22] net: use zero value to restore rx_buf_len to default Pavel Begunkov
2025-07-29 5:03 ` Subbaraya Sundeep
2025-07-28 11:04 ` [RFC v1 04/22] net: clarify the meaning of netdev_config members Pavel Begunkov
2025-07-28 21:44 ` Mina Almasry
2025-08-01 23:14 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 05/22] net: add rx_buf_len to netdev config Pavel Begunkov
2025-07-28 21:50 ` Mina Almasry
2025-08-01 23:18 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 06/22] eth: bnxt: read the page size from the adapter struct Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 07/22] eth: bnxt: set page pool page order based on rx_page_size Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 08/22] eth: bnxt: support setting size of agg buffers via ethtool Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 09/22] net: move netdev_config manipulation to dedicated helpers Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 10/22] net: reduce indent of struct netdev_queue_mgmt_ops members Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 11/22] net: allocate per-queue config structs and pass them thru the queue API Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 12/22] net: pass extack to netdev_rx_queue_restart() Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 13/22] net: add queue config validation callback Pavel Begunkov
2025-07-28 22:26 ` Mina Almasry
2025-07-28 11:04 ` [RFC v1 14/22] eth: bnxt: always set the queue mgmt ops Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 15/22] eth: bnxt: store the rx buf size per queue Pavel Begunkov
2025-07-28 22:33 ` Mina Almasry
2025-08-01 23:20 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 16/22] eth: bnxt: adjust the fill level of agg queues with larger buffers Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 17/22] netdev: add support for setting rx-buf-len per queue Pavel Begunkov
2025-07-28 23:10 ` Mina Almasry
2025-08-01 23:37 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 18/22] net: wipe the setting of deactived queues Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 19/22] eth: bnxt: use queue op config validate Pavel Begunkov
2025-07-28 11:04 ` [RFC v1 20/22] eth: bnxt: support per queue configuration of rx-buf-len Pavel Begunkov
2025-07-28 11:04 ` Pavel Begunkov [this message]
2025-08-02 0:10 ` [RFC v1 21/22] net: parametrise mp open with a queue config Jakub Kicinski
2025-08-04 12:50 ` Pavel Begunkov
2025-08-05 22:43 ` Jakub Kicinski
2025-08-06 0:05 ` Jakub Kicinski
2025-08-06 16:48 ` Mina Almasry
2025-08-06 18:11 ` Jakub Kicinski
2025-08-06 18:30 ` Mina Almasry
2025-08-06 22:05 ` Jakub Kicinski
2025-07-28 11:04 ` [RFC v1 22/22] io_uring/zcrx: implement large rx buffer support Pavel Begunkov
2025-07-28 17:13 ` [RFC v1 00/22] Large rx buffer support for zcrx Stanislav Fomichev
2025-07-28 18:18 ` Pavel Begunkov
2025-07-28 20:21 ` Stanislav Fomichev
2025-07-28 21:28 ` Pavel Begunkov
2025-07-28 22:06 ` Stanislav Fomichev
2025-07-28 22:44 ` Pavel Begunkov
2025-07-29 16:33 ` Stanislav Fomichev
2025-07-30 14:16 ` Pavel Begunkov
2025-07-30 15:50 ` Stanislav Fomichev
2025-07-31 19:34 ` Mina Almasry
2025-07-31 19:57 ` Pavel Begunkov
2025-07-31 20:05 ` Mina Almasry
2025-08-01 9:48 ` Pavel Begunkov
2025-08-01 9:58 ` Pavel Begunkov
2025-07-28 23:22 ` Mina Almasry
2025-07-29 16:41 ` Stanislav Fomichev
2025-07-29 17:01 ` Mina Almasry
2025-07-28 18:54 ` Mina Almasry
2025-07-28 19:42 ` Pavel Begunkov
2025-07-28 20:23 ` Mina Almasry
2025-07-28 20:57 ` Pavel Begunkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ca874424e226417fa174ac015ee62cc0e3092400.1753694914.git.asml.silence@gmail.com \
--to=asml.silence@gmail.com \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=ap420073@gmail.com \
--cc=davem@davemloft.net \
--cc=dtatulea@nvidia.com \
--cc=dw@davidwei.uk \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=io-uring@vger.kernel.org \
--cc=kuba@kernel.org \
--cc=michael.chan@broadcom.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=willemb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox