From: Jonathan Lemon <[email protected]>
To: <[email protected]>
Cc: <[email protected]>
Subject: [RFC PATCH v3 07/15] io_uring: Allocate zctap device buffers and dma map them.
Date: Wed, 2 Nov 2022 16:32:36 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
The goal is to register a memory region with the device, and
later specify the desired packet buffer size. The code currently
assumes a page size.
Create the desired number of zctap buffers and DMA map them
to the target device, recording the dma address for later use.
Hold a page reference while the page is dma mapped.
Change the freelist from an array of page pointers to an index
into the device buffer list.
Signed-off-by: Jonathan Lemon <[email protected]>
---
io_uring/zctap.c | 78 ++++++++++++++++++++++++++++++++++++++----------
1 file changed, 63 insertions(+), 15 deletions(-)
diff --git a/io_uring/zctap.c b/io_uring/zctap.c
index c088655ade22..9f892e9ed8f2 100644
--- a/io_uring/zctap.c
+++ b/io_uring/zctap.c
@@ -18,11 +18,14 @@
#define NR_ZCTAP_IFQS 1
struct ifq_region {
+ struct io_zctap_ifq *ifq; /* only for delayed_work */
struct io_mapped_ubuf *imu;
int free_count;
int nr_pages;
u16 id;
- struct page *freelist[];
+
+ struct io_zctap_buf *buf;
+ u16 freelist[];
};
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
@@ -60,49 +63,85 @@ bool io_zctap_put_page(struct io_zctap_ifq *ifq, struct page *page)
}
EXPORT_SYMBOL(io_zctap_put_page);
+static inline struct device *
+netdev2device(struct net_device *dev)
+{
+ return dev->dev.parent; /* from SET_NETDEV_DEV() */
+}
+
static void io_remove_ifq_region(struct ifq_region *ifr)
{
- struct io_mapped_ubuf *imu;
- struct page *page;
+ struct device *device = netdev2device(ifr->ifq->dev);
+ struct io_zctap_buf *buf;
int i;
- imu = ifr->imu;
for (i = 0; i < ifr->nr_pages; i++) {
- page = imu->bvec[i].bv_page;
-
- ClearPagePrivate(page);
- set_page_private(page, 0);
+ buf = &ifr->buf[i];
+ set_page_private(buf->page, 0);
+ ClearPagePrivate(buf->page);
+ dma_unmap_page_attrs(device, buf->dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(buf->page);
}
+ kvfree(ifr->buf);
kvfree(ifr);
}
-static int io_zctap_map_region(struct ifq_region *ifr)
+static int io_zctap_map_region(struct ifq_region *ifr, struct device *device)
{
struct io_mapped_ubuf *imu;
+ struct io_zctap_buf *buf;
struct page *page;
+ dma_addr_t addr;
+ int i, err;
u64 info;
- int i;
imu = ifr->imu;
for (i = 0; i < ifr->nr_pages; i++) {
page = imu->bvec[i].bv_page;
- if (PagePrivate(page))
+
+ if (PagePrivate(page)) {
+ err = -EEXIST;
goto out;
+ }
+
SetPagePrivate(page);
info = zctap_mk_page_info(ifr->id, i);
zctap_set_page_info(page, info);
- ifr->freelist[i] = page;
+
+ buf = &ifr->buf[i];
+ addr = dma_map_page_attrs(device, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(device, addr)) {
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ err = -ENOMEM;
+ goto out;
+ }
+ buf->dma = addr;
+ buf->page = page;
+ atomic_set(&buf->refcount, 0);
+ get_page(page);
+
+ ifr->freelist[i] = i;
}
return 0;
out:
while (i--) {
page = imu->bvec[i].bv_page;
- ClearPagePrivate(page);
set_page_private(page, 0);
+ ClearPagePrivate(page);
+ buf = &ifr->buf[i];
+ dma_unmap_page_attrs(device, buf->dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(page);
}
- return -EEXIST;
+ return err;
}
int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id)
@@ -131,13 +170,22 @@ int io_provide_ifq_region(struct io_zctap_ifq *ifq, u16 id)
if (!ifr)
return -ENOMEM;
+ ifr->buf = kvmalloc_array(nr_pages, sizeof(*ifr->buf), GFP_KERNEL);
+ if (!ifr->buf) {
+ kvfree(ifr);
+ return -ENOMEM;
+ }
+
ifr->nr_pages = nr_pages;
ifr->imu = imu;
ifr->free_count = nr_pages;
ifr->id = id;
- err = io_zctap_map_region(ifr);
+ ifr->ifq = ifq; /* XXX */
+
+ err = io_zctap_map_region(ifr, netdev2device(ifq->dev));
if (err) {
+ kvfree(ifr->buf);
kvfree(ifr);
return err;
}
--
2.30.2
next prev parent reply other threads:[~2022-11-02 23:40 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-02 23:32 [RFC PATCH v3 00/15] zero-copy RX for io_uring Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 01/15] io_uring: add zctap ifq definition Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 02/15] netdevice: add SETUP_ZCTAP to the netdev_bpf structure Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 03/15] io_uring: add register ifq opcode Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 04/15] io_uring: create a zctap region for a mapped buffer Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 05/15] io_uring: mark pages in ifq region with zctap information Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 06/15] io_uring: Provide driver API for zctap packet buffers Jonathan Lemon
2022-11-02 23:32 ` Jonathan Lemon [this message]
2022-11-02 23:32 ` [RFC PATCH v3 08/15] io_uring: Add zctap buffer get/put functions and refcounting Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 09/15] skbuff: Introduce SKBFL_FIXED_FRAG and skb_fixed() Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 10/15] io_uring: Allocate a uarg for use by the ifq RX Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 11/15] io_uring: Define the zctap iov[] returned to the user Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 12/15] io_uring: add OP_RECV_ZC command Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 13/15] io_uring: Make remove_ifq_region a delayed work call Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 14/15] io_uring: Add a buffer caching mechanism for zctap Jonathan Lemon
2022-11-02 23:32 ` [RFC PATCH v3 15/15] io_uring: Notify the application as the fillq is drained Jonathan Lemon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox