public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH for-next v3 07/16] io_uring: pass poll_find lock back
Date: Thu, 16 Jun 2022 10:22:03 +0100	[thread overview]
Message-ID: <dae1dc5749aa34367812ecf62f82fd3f053aae44.1655371007.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>

Instead of using implicit knowledge of what is locked or not after
io_poll_find() and co returns, pass back a pointer to the locked
bucket if any. If set the user must to unlock the spinlock.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 io_uring/poll.c | 46 ++++++++++++++++++++++++++--------------------
 1 file changed, 26 insertions(+), 20 deletions(-)

diff --git a/io_uring/poll.c b/io_uring/poll.c
index 7f6b16f687b0..7fc4aafcca95 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -559,12 +559,15 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
 }
 
 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
-				     struct io_cancel_data *cd)
+				     struct io_cancel_data *cd,
+				     struct io_hash_bucket **out_bucket)
 {
 	struct io_kiocb *req;
 	u32 index = hash_long(cd->data, ctx->cancel_hash_bits);
 	struct io_hash_bucket *hb = &ctx->cancel_hash[index];
 
+	*out_bucket = NULL;
+
 	spin_lock(&hb->lock);
 	hlist_for_each_entry(req, &hb->list, hash_node) {
 		if (cd->data != req->cqe.user_data)
@@ -576,6 +579,7 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
 				continue;
 			req->work.cancel_seq = cd->seq;
 		}
+		*out_bucket = hb;
 		return req;
 	}
 	spin_unlock(&hb->lock);
@@ -583,11 +587,14 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
 }
 
 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
-					  struct io_cancel_data *cd)
+					  struct io_cancel_data *cd,
+					  struct io_hash_bucket **out_bucket)
 {
 	struct io_kiocb *req;
 	int i;
 
+	*out_bucket = NULL;
+
 	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
 		struct io_hash_bucket *hb = &ctx->cancel_hash[i];
 
@@ -599,6 +606,7 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
 			if (cd->seq == req->work.cancel_seq)
 				continue;
 			req->work.cancel_seq = cd->seq;
+			*out_bucket = hb;
 			return req;
 		}
 		spin_unlock(&hb->lock);
@@ -617,23 +625,19 @@ static bool io_poll_disarm(struct io_kiocb *req)
 
 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
 {
+	struct io_hash_bucket *bucket;
 	struct io_kiocb *req;
-	u32 index;
-	spinlock_t *lock;
 
 	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
-		req = io_poll_file_find(ctx, cd);
+		req = io_poll_file_find(ctx, cd, &bucket);
 	else
-		req = io_poll_find(ctx, false, cd);
-	if (!req) {
-		return -ENOENT;
-	} else {
-		index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits);
-		lock = &ctx->cancel_hash[index].lock;
-	}
-	io_poll_cancel_req(req);
-	spin_unlock(lock);
-	return 0;
+		req = io_poll_find(ctx, false, cd, &bucket);
+
+	if (req)
+		io_poll_cancel_req(req);
+	if (bucket)
+		spin_unlock(&bucket->lock);
+	return req ? 0 : -ENOENT;
 }
 
 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
@@ -726,19 +730,21 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
 	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
 	struct io_ring_ctx *ctx = req->ctx;
-	u32 index = hash_long(cd.data, ctx->cancel_hash_bits);
-	spinlock_t *lock = &ctx->cancel_hash[index].lock;
+	struct io_hash_bucket *bucket;
 	struct io_kiocb *preq;
 	int ret2, ret = 0;
 	bool locked;
 
-	preq = io_poll_find(ctx, true, &cd);
+	preq = io_poll_find(ctx, true, &cd, &bucket);
+	if (preq)
+		ret2 = io_poll_disarm(preq);
+	if (bucket)
+		spin_unlock(&bucket->lock);
+
 	if (!preq) {
 		ret = -ENOENT;
 		goto out;
 	}
-	ret2 = io_poll_disarm(preq);
-	spin_unlock(lock);
 	if (!ret2) {
 		ret = -EALREADY;
 		goto out;
-- 
2.36.1


  parent reply	other threads:[~2022-06-16  9:22 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-16  9:21 [PATCH for-next v3 00/16] 5.20 cleanups and poll optimisations Pavel Begunkov
2022-06-16  9:21 ` [PATCH for-next v3 01/16] io_uring: rw: delegate sync completions to core io_uring Pavel Begunkov
2022-06-16  9:21 ` [PATCH for-next v3 02/16] io_uring: kill REQ_F_COMPLETE_INLINE Pavel Begunkov
2022-06-16  9:21 ` [PATCH for-next v3 03/16] io_uring: refactor io_req_task_complete() Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 04/16] io_uring: don't inline io_put_kbuf Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 05/16] io_uring: poll: remove unnecessary req->ref set Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 06/16] io_uring: switch cancel_hash to use per entry spinlock Pavel Begunkov
2022-06-16  9:22 ` Pavel Begunkov [this message]
2022-06-16  9:22 ` [PATCH for-next v3 08/16] io_uring: clean up io_try_cancel Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 09/16] io_uring: limit the number of cancellation buckets Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 10/16] io_uring: clean up io_ring_ctx_alloc Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 11/16] io_uring: use state completion infra for poll reqs Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 12/16] io_uring: add IORING_SETUP_SINGLE_ISSUER Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 13/16] io_uring: pass hash table into poll_find Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 14/16] io_uring: introduce a struct for hash table Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 15/16] io_uring: propagate locking state to poll cancel Pavel Begunkov
2022-06-16  9:22 ` [PATCH for-next v3 16/16] io_uring: mutex locked poll hashing Pavel Begunkov
2022-06-17 15:35   ` Nathan Chancellor
2022-06-18 12:07     ` Pavel Begunkov
2022-06-16 13:18 ` [PATCH for-next v3 00/16] 5.20 cleanups and poll optimisations Jens Axboe
2022-06-16 15:58 ` Hao Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=dae1dc5749aa34367812ecf62f82fd3f053aae44.1655371007.git.asml.silence@gmail.com \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox