From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], [email protected],
Jens Axboe <[email protected]>
Subject: [PATCH 7/9] io_uring/poll: pull ownership handling into poll.h
Date: Mon, 3 Feb 2025 09:23:45 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
In preparation for using it from somewhere else. Rather than try and
duplicate the functionality, just make it generically available to
io_uring opcodes.
Note: would have to be used carefully, cannot be used by opcodes that
can trigger poll logic.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/poll.c | 30 +-----------------------------
io_uring/poll.h | 31 +++++++++++++++++++++++++++++++
2 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index bb1c0cd4f809..5e44ac562491 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -41,16 +41,6 @@ struct io_poll_table {
__poll_t result_mask;
};
-#define IO_POLL_CANCEL_FLAG BIT(31)
-#define IO_POLL_RETRY_FLAG BIT(30)
-#define IO_POLL_REF_MASK GENMASK(29, 0)
-
-/*
- * We usually have 1-2 refs taken, 128 is more than enough and we want to
- * maximise the margin between this amount and the moment when it overflows.
- */
-#define IO_POLL_REF_BIAS 128
-
#define IO_WQE_F_DOUBLE 1
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -70,7 +60,7 @@ static inline bool wqe_is_double(struct wait_queue_entry *wqe)
return priv & IO_WQE_F_DOUBLE;
}
-static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
+bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
{
int v;
@@ -85,24 +75,6 @@ static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}
-/*
- * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
- * bump it and acquire ownership. It's disallowed to modify requests while not
- * owning it, that prevents from races for enqueueing task_work's and b/w
- * arming poll and wakeups.
- */
-static inline bool io_poll_get_ownership(struct io_kiocb *req)
-{
- if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
- return io_poll_get_ownership_slowpath(req);
- return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
-}
-
-static void io_poll_mark_cancelled(struct io_kiocb *req)
-{
- atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
-}
-
static struct io_poll *io_poll_get_double(struct io_kiocb *req)
{
/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
diff --git a/io_uring/poll.h b/io_uring/poll.h
index 04ede93113dc..2f416cd3be13 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -21,6 +21,18 @@ struct async_poll {
struct io_poll *double_poll;
};
+#define IO_POLL_CANCEL_FLAG BIT(31)
+#define IO_POLL_RETRY_FLAG BIT(30)
+#define IO_POLL_REF_MASK GENMASK(29, 0)
+
+bool io_poll_get_ownership_slowpath(struct io_kiocb *req);
+
+/*
+ * We usually have 1-2 refs taken, 128 is more than enough and we want to
+ * maximise the margin between this amount and the moment when it overflows.
+ */
+#define IO_POLL_REF_BIAS 128
+
/*
* Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
* potentially other cases where we already "own" this poll request.
@@ -30,6 +42,25 @@ static inline void io_poll_multishot_retry(struct io_kiocb *req)
atomic_inc(&req->poll_refs);
}
+/*
+ * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
+ * bump it and acquire ownership. It's disallowed to modify requests while not
+ * owning it, that prevents from races for enqueueing task_work's and b/w
+ * arming poll and wakeups.
+ */
+static inline bool io_poll_get_ownership(struct io_kiocb *req)
+{
+ if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
+ return io_poll_get_ownership_slowpath(req);
+ return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
+}
+
+static inline void io_poll_mark_cancelled(struct io_kiocb *req)
+{
+ atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
+}
+
+
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
--
2.47.2
next prev parent reply other threads:[~2025-02-03 16:31 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-03 16:23 [PATCHSET 0/9] io_uring epoll wait support Jens Axboe
2025-02-03 16:23 ` [PATCH 1/9] eventpoll: abstract out main epoll reaper into a function Jens Axboe
2025-02-03 16:23 ` [PATCH 2/9] eventpoll: add helper to remove wait entry from wait queue head Jens Axboe
2025-02-03 16:23 ` [PATCH 3/9] eventpoll: abstract out ep_try_send_events() helper Jens Axboe
2025-02-03 16:23 ` [PATCH 4/9] eventpoll: add struct wait_queue_entry argument to epoll_wait() Jens Axboe
2025-02-03 16:23 ` [PATCH 5/9] eventpoll: add ep_poll_queue() loop Jens Axboe
2025-02-03 16:23 ` [PATCH 6/9] io_uring/epoll: remove CONFIG_EPOLL guards Jens Axboe
2025-02-03 16:23 ` Jens Axboe [this message]
2025-02-03 16:23 ` [PATCH 8/9] io_uring/epoll: add support for IORING_OP_EPOLL_WAIT Jens Axboe
2025-02-03 16:23 ` [PATCH 9/9] io_uring/epoll: add multishot " Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox