From: Jonathan Lemon <[email protected]>
To: <[email protected]>
Cc: <[email protected]>
Subject: [PATCH v1 10/15] io_uring: Allocate a uarg for use by the ifq RX
Date: Mon, 7 Nov 2022 21:05:16 -0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Create a static uarg which is attached to zerocopy RX buffers,
and add a callback to handle freeing the skb.
As the skb is marked as zerocopy, it bypasses the default network
skb fragment destructor and uses our callback. This handles our
buffer refcounts, and releases the ZC buffer back to the freelist.
Add the put_page() implementations, which release the fragments.
This may also be called by drivers during cleanup.
Signed-off-by: Jonathan Lemon <[email protected]>
---
io_uring/zctap.c | 64 ++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 56 insertions(+), 8 deletions(-)
diff --git a/io_uring/zctap.c b/io_uring/zctap.c
index 0da9e6510f36..10d74b8f7cef 100644
--- a/io_uring/zctap.c
+++ b/io_uring/zctap.c
@@ -30,6 +30,12 @@ struct ifq_region {
u16 freelist[];
};
+/* XXX get around not having "struct ubuf_info" defined in io_uring_types.h */
+struct io_zctap_ifq_priv {
+ struct io_zctap_ifq ifq;
+ struct ubuf_info uarg;
+};
+
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
static void zctap_set_page_info(struct page *page, u64 info)
@@ -52,6 +58,16 @@ static u16 zctap_page_id(const struct page *page)
return zctap_page_info(page) & 0xffff;
}
+static bool zctap_page_magic(const struct page *page)
+{
+ return (zctap_page_info(page) >> 48) == 0xface;
+}
+
+static bool zctap_page_ours(struct page *page)
+{
+ return PagePrivate(page) && zctap_page_magic(page);
+}
+
/* driver bias cannot be larger than this */
#define IO_ZCTAP_UREF 0x10000
#define IO_ZCTAP_KREF_MASK (IO_ZCTAP_UREF - 1)
@@ -70,7 +86,9 @@ static bool io_zctap_put_buf_uref(struct io_zctap_buf *buf)
return atomic_sub_and_test(IO_ZCTAP_UREF, &buf->refcount);
}
-/* gets a user-supplied buffer from the fill queue */
+/* gets a user-supplied buffer from the fill queue
+ * note: may drain N entries, but still have no usable buffers
+ */
static struct io_zctap_buf *io_zctap_get_buffer(struct io_zctap_ifq *ifq,
u16 *buf_pgid)
{
@@ -185,9 +203,19 @@ void io_zctap_put_buf_refs(struct io_zctap_ifq *ifq, struct io_zctap_buf *buf,
}
EXPORT_SYMBOL(io_zctap_put_buf_refs);
+/* could be called by the stack as it drops/recycles the skbs */
bool io_zctap_put_page(struct io_zctap_ifq *ifq, struct page *page)
{
- return false;
+ struct ifq_region *ifr;
+ u16 pgid;
+
+ if (!zctap_page_ours(page))
+ return false;
+
+ ifr = ifq->region; /* only one */
+ pgid = zctap_page_id(page);
+ io_zctap_put_buf(ifq, &ifr->buf[pgid]);
+ return true;
}
EXPORT_SYMBOL(io_zctap_put_page);
@@ -351,17 +379,35 @@ static int io_close_zctap_ifq(struct io_zctap_ifq *ifq, u16 queue_id)
return __io_queue_mgmt(ifq->dev, NULL, queue_id);
}
+static void io_zctap_ifq_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct io_zctap_ifq_priv *priv;
+ struct page *page;
+ int i;
+
+ priv = container_of(uarg, struct io_zctap_ifq_priv, uarg);
+
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ page = skb_frag_page(&shinfo->frags[i]);
+ if (!io_zctap_put_page(&priv->ifq, page))
+ __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
+ }
+}
+
static struct io_zctap_ifq *io_zctap_ifq_alloc(struct io_ring_ctx *ctx)
{
- struct io_zctap_ifq *ifq;
+ struct io_zctap_ifq_priv *priv;
- ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
- if (!ifq)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return NULL;
- ifq->ctx = ctx;
- ifq->queue_id = -1;
- return ifq;
+ priv->ifq.ctx = ctx;
+ priv->ifq.queue_id = -1;
+ priv->ifq.uarg = &priv->uarg;
+ return &priv->ifq;
}
static void io_zctap_ifq_free(struct io_zctap_ifq *ifq)
@@ -399,6 +445,8 @@ int io_register_ifq(struct io_ring_ctx *ctx,
return -ENOMEM;
ifq->fill_bgid = req.fill_bgid;
+ ifq->uarg->callback = io_zctap_ifq_callback;
+ ifq->uarg->flags = SKBFL_ALL_ZEROCOPY | SKBFL_FIXED_FRAG;
err = -ENODEV;
ifq->dev = dev_get_by_index(&init_net, req.ifindex);
--
2.30.2
next prev parent reply other threads:[~2022-11-08 5:05 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-08 5:05 [PATCH v1 00/15] zero-copy RX for io_uring Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 01/15] io_uring: add zctap ifq definition Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 02/15] netdevice: add SETUP_ZCTAP to the netdev_bpf structure Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 03/15] io_uring: add register ifq opcode Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 04/15] io_uring: create a zctap region for a mapped buffer Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 05/15] io_uring: mark pages in ifq region with zctap information Jonathan Lemon
2022-11-16 8:12 ` Christoph Hellwig
2022-11-17 20:48 ` Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 06/15] io_uring: Provide driver API for zctap packet buffers Jonathan Lemon
2022-11-16 8:17 ` Christoph Hellwig
2022-11-17 21:01 ` Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 07/15] io_uring: Allocate zctap device buffers and dma map them Jonathan Lemon
2022-11-16 8:15 ` Christoph Hellwig
2022-11-17 20:51 ` Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 08/15] io_uring: Add zctap buffer get/put functions and refcounting Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 09/15] skbuff: Introduce SKBFL_FIXED_FRAG and skb_fixed() Jonathan Lemon
2022-11-08 5:05 ` Jonathan Lemon [this message]
2022-11-08 5:05 ` [PATCH v1 11/15] io_uring: Define the zctap iov[] returned to the user Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 12/15] io_uring: add OP_RECV_ZC command Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 13/15] io_uring: Make remove_ifq_region a delayed work call Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 14/15] io_uring: Add a buffer caching mechanism for zctap Jonathan Lemon
2022-11-08 5:05 ` [PATCH v1 15/15] io_uring: Notify the application as the fillq is drained Jonathan Lemon
2022-11-09 6:37 ` [PATCH v1 00/15] zero-copy RX for io_uring Dust Li
2022-11-09 15:27 ` Jonathan Lemon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox