From: "Michael S. Tsirkin" <mst@redhat.com>
To: Mina Almasry <almasrymina@google.com>
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, io-uring@vger.kernel.org,
virtualization@lists.linux.dev, kvm@vger.kernel.org,
linux-kselftest@vger.kernel.org,
"Donald Hunter" <donald.hunter@gmail.com>,
"Jakub Kicinski" <kuba@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
"Paolo Abeni" <pabeni@redhat.com>,
"Simon Horman" <horms@kernel.org>,
"Jonathan Corbet" <corbet@lwn.net>,
"Andrew Lunn" <andrew+netdev@lunn.ch>,
"Jeroen de Borst" <jeroendb@google.com>,
"Harshitha Ramamurthy" <hramamurthy@google.com>,
"Kuniyuki Iwashima" <kuniyu@amazon.com>,
"Willem de Bruijn" <willemb@google.com>,
"Jens Axboe" <axboe@kernel.dk>,
"Pavel Begunkov" <asml.silence@gmail.com>,
"David Ahern" <dsahern@kernel.org>,
"Neal Cardwell" <ncardwell@google.com>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Stefano Garzarella" <sgarzare@redhat.com>,
"Jason Wang" <jasowang@redhat.com>,
"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
"Eugenio Pérez" <eperezma@redhat.com>,
"Shuah Khan" <shuah@kernel.org>,
sdf@fomichev.me, dw@davidwei.uk,
"Jamal Hadi Salim" <jhs@mojatatu.com>,
"Victor Nogueira" <victor@mojatatu.com>,
"Pedro Tammela" <pctammela@mojatatu.com>,
"Samiullah Khawaja" <skhawaja@google.com>,
"Kaiyuan Zhang" <kaiyuanz@google.com>
Subject: Re: [PATCH net-next v10 4/9] net: devmem: Implement TX path
Date: Wed, 23 Apr 2025 14:24:42 -0400 [thread overview]
Message-ID: <20250423140931-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20250423031117.907681-5-almasrymina@google.com>
some nits
On Wed, Apr 23, 2025 at 03:11:11AM +0000, Mina Almasry wrote:
> @@ -189,43 +200,44 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
> }
>
> binding->dev = dev;
> -
> - err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
> - binding, xa_limit_32b, &id_alloc_next,
> - GFP_KERNEL);
> - if (err < 0)
> - goto err_free_binding;
> -
> xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
> -
> refcount_set(&binding->ref, 1);
> -
> binding->dmabuf = dmabuf;
>
given you keep iterating, don't tweak whitespace in the same patch-
will make the review a tiny bit easier.
> binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
> if (IS_ERR(binding->attachment)) {
> err = PTR_ERR(binding->attachment);
> NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
> - goto err_free_id;
> + goto err_free_binding;
> }
>
> binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
> - DMA_FROM_DEVICE);
> + direction);
> if (IS_ERR(binding->sgt)) {
> err = PTR_ERR(binding->sgt);
> NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
> goto err_detach;
> }
>
> + if (direction == DMA_TO_DEVICE) {
> + binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
> + sizeof(struct net_iov *),
> + GFP_KERNEL);
> + if (!binding->tx_vec) {
> + err = -ENOMEM;
> + goto err_unmap;
> + }
> + }
> +
> /* For simplicity we expect to make PAGE_SIZE allocations, but the
> * binding can be much more flexible than that. We may be able to
> * allocate MTU sized chunks here. Leave that for future work...
> */
> - binding->chunk_pool =
> - gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
> + binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
> + dev_to_node(&dev->dev));
> if (!binding->chunk_pool) {
> err = -ENOMEM;
> - goto err_unmap;
> + goto err_tx_vec;
> }
>
> virtual = 0;
> @@ -270,24 +282,34 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
> niov->owner = &owner->area;
> page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
> net_devmem_get_dma_addr(niov));
> + if (direction == DMA_TO_DEVICE)
> + binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
> }
>
> virtual += len;
> }
>
> + err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
> + binding, xa_limit_32b, &id_alloc_next,
> + GFP_KERNEL);
> + if (err < 0)
> + goto err_free_id;
> +
> return binding;
>
> +err_free_id:
> + xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> err_free_chunks:
> gen_pool_for_each_chunk(binding->chunk_pool,
> net_devmem_dmabuf_free_chunk_owner, NULL);
> gen_pool_destroy(binding->chunk_pool);
> +err_tx_vec:
> + kvfree(binding->tx_vec);
> err_unmap:
> dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
> DMA_FROM_DEVICE);
> err_detach:
> dma_buf_detach(dmabuf, binding->attachment);
> -err_free_id:
> - xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> err_free_binding:
> kfree(binding);
> err_put_dmabuf:
> @@ -295,6 +317,21 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
> return ERR_PTR(err);
> }
>
> +struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
> +{
> + struct net_devmem_dmabuf_binding *binding;
> +
> + rcu_read_lock();
> + binding = xa_load(&net_devmem_dmabuf_bindings, id);
> + if (binding) {
> + if (!net_devmem_dmabuf_binding_get(binding))
> + binding = NULL;
> + }
> + rcu_read_unlock();
> +
> + return binding;
> +}
> +
> void net_devmem_get_net_iov(struct net_iov *niov)
> {
> net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
> @@ -305,6 +342,53 @@ void net_devmem_put_net_iov(struct net_iov *niov)
> net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
> }
>
> +struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
> + unsigned int dmabuf_id)
> +{
> + struct net_devmem_dmabuf_binding *binding;
> + struct dst_entry *dst = __sk_dst_get(sk);
> + int err = 0;
> +
> + binding = net_devmem_lookup_dmabuf(dmabuf_id);
why not initialize binding together with the declaration?
> + if (!binding || !binding->tx_vec) {
> + err = -EINVAL;
> + goto out_err;
> + }
> +
> + /* The dma-addrs in this binding are only reachable to the corresponding
> + * net_device.
> + */
> + if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
> + err = -ENODEV;
> + goto out_err;
> + }
> +
> + return binding;
> +
> +out_err:
> + if (binding)
> + net_devmem_dmabuf_binding_put(binding);
> +
> + return ERR_PTR(err);
> +}
> +
> +struct net_iov *
> +net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
> + size_t virt_addr, size_t *off, size_t *size)
> +{
> + size_t idx;
> +
> + if (virt_addr >= binding->dmabuf->size)
> + return NULL;
> +
> + idx = virt_addr / PAGE_SIZE;
init this at where it's declared?
or where it's used.
> +
> + *off = virt_addr % PAGE_SIZE;
> + *size = PAGE_SIZE - *off;
> +
> + return binding->tx_vec[idx];
> +}
> +
> /*** "Dmabuf devmem memory provider" ***/
>
> int mp_dmabuf_devmem_init(struct page_pool *pool)
--
MST
next prev parent reply other threads:[~2025-04-23 18:25 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-23 3:11 [PATCH net-next v10 0/9] Device memory TCP TX Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 1/9] netmem: add niov->type attribute to distinguish different net_iov types Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 2/9] net: add get_netmem/put_netmem support Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 3/9] net: devmem: TCP tx netlink api Mina Almasry
2025-04-23 9:55 ` Donald Hunter
2025-04-23 17:59 ` Mina Almasry
2025-04-24 9:06 ` Donald Hunter
2025-04-23 3:11 ` [PATCH net-next v10 4/9] net: devmem: Implement TX path Mina Almasry
2025-04-23 18:24 ` Michael S. Tsirkin [this message]
2025-04-23 23:49 ` Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 5/9] net: add devmem TCP TX documentation Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 6/9] net: enable driver support for netmem TX Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 7/9] gve: add netmem TX support to GVE DQO-RDA mode Mina Almasry
2025-04-23 18:26 ` Harshitha Ramamurthy
2025-04-23 3:11 ` [PATCH net-next v10 8/9] net: check for driver support in netmem TX Mina Almasry
2025-04-23 3:11 ` [PATCH net-next v10 9/9] selftests: ncdevmem: Implement devmem TCP TX Mina Almasry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250423140931-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=asml.silence@gmail.com \
--cc=axboe@kernel.dk \
--cc=corbet@lwn.net \
--cc=davem@davemloft.net \
--cc=donald.hunter@gmail.com \
--cc=dsahern@kernel.org \
--cc=dw@davidwei.uk \
--cc=edumazet@google.com \
--cc=eperezma@redhat.com \
--cc=horms@kernel.org \
--cc=hramamurthy@google.com \
--cc=io-uring@vger.kernel.org \
--cc=jasowang@redhat.com \
--cc=jeroendb@google.com \
--cc=jhs@mojatatu.com \
--cc=kaiyuanz@google.com \
--cc=kuba@kernel.org \
--cc=kuniyu@amazon.com \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=ncardwell@google.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=pctammela@mojatatu.com \
--cc=sdf@fomichev.me \
--cc=sgarzare@redhat.com \
--cc=shuah@kernel.org \
--cc=skhawaja@google.com \
--cc=stefanha@redhat.com \
--cc=victor@mojatatu.com \
--cc=virtualization@lists.linux.dev \
--cc=willemb@google.com \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox