* [PATCH v2] io_uring: make ASYNC_CANCEL work with poll and timeout
@ 2019-11-10 2:55 Jens Axboe
0 siblings, 0 replies; only message in thread
From: Jens Axboe @ 2019-11-10 2:55 UTC (permalink / raw)
To: io-uring; +Cc: Hrvoje Zeba
It's a little confusing that we have multiple types of command
cancellation opcodes now that we have a generic one. Make the generic
one work with POLL_ADD and TIMEOUT commands as well, that makes for an
easier to use API for the application. The fact that they currently
don't is a bit confusing.
Add a helper that takes care of it, so we can user it from both
IORING_OP_ASYNC_CANCEL and from the linked timeout cancellation.
Reported-by: Hrvoje Zeba <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
---
Changes since v1:
- Add a generic helper that we can use from both IORING_OP_ASYNC_CANCEL
and from the linked timeout handler. This makes it work as expected
on linked timeouts, too.
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a2548a6dd195..1d5a892841e9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1957,6 +1957,20 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
+static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
+{
+ struct io_kiocb *req;
+
+ list_for_each_entry(req, &ctx->cancel_list, list) {
+ if (req->user_data != sqe_addr)
+ continue;
+ io_poll_remove_one(req);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
/*
* Find a running poll command that matches one specified in sqe->addr,
* and remove it if found.
@@ -1964,8 +1978,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *poll_req, *next;
- int ret = -ENOENT;
+ int ret;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -1974,13 +1987,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
- if (READ_ONCE(sqe->addr) == poll_req->user_data) {
- io_poll_remove_one(poll_req);
- ret = 0;
- break;
- }
- }
+ ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req, ret);
@@ -2200,6 +2207,31 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
+{
+ struct io_kiocb *req;
+ int ret = -ENOENT;
+
+ list_for_each_entry(req, &ctx->timeout_list, list) {
+ if (user_data == req->user_data) {
+ list_del_init(&req->list);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT)
+ return ret;
+
+ ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ if (ret == -1)
+ return -EALREADY;
+
+ io_cqring_fill_event(req, -ECANCELED);
+ io_put_req(req);
+ return 0;
+}
+
/*
* Remove or update an existing timeout command
*/
@@ -2207,10 +2239,8 @@ static int io_timeout_remove(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *treq;
- int ret = -ENOENT;
- __u64 user_data;
unsigned flags;
+ int ret;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -2220,42 +2250,15 @@ static int io_timeout_remove(struct io_kiocb *req,
if (flags)
return -EINVAL;
- user_data = READ_ONCE(sqe->addr);
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry(treq, &ctx->timeout_list, list) {
- if (user_data == treq->user_data) {
- list_del_init(&treq->list);
- ret = 0;
- break;
- }
- }
+ ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
- /* didn't find timeout */
- if (ret) {
-fill_ev:
- io_cqring_fill_event(req, ret);
- io_commit_cqring(ctx);
- spin_unlock_irq(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
- if (req->flags & REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
- io_put_req(req);
- return 0;
- }
-
- ret = hrtimer_try_to_cancel(&treq->timeout.timer);
- if (ret == -1) {
- ret = -EBUSY;
- goto fill_ev;
- }
-
- io_cqring_fill_event(req, 0);
- io_cqring_fill_event(treq, -ECANCELED);
+ io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
-
- io_put_req(treq);
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return 0;
}
@@ -2372,12 +2375,38 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
return ret;
}
+static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
+ struct io_kiocb *req, __u64 sqe_addr,
+ struct io_kiocb **nxt)
+{
+ int ret;
+
+ ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
+ if (ret != -ENOENT) {
+ spin_lock_irq(&ctx->completion_lock);
+ goto done;
+ }
+
+ spin_lock_irq(&ctx->completion_lock);
+ ret = io_timeout_cancel(ctx, sqe_addr);
+ if (ret != -ENOENT)
+ goto done;
+ ret = io_poll_cancel(ctx, sqe_addr);
+done:
+ io_cqring_fill_event(req, ret);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_put_req_find_next(req, nxt);
+}
+
static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt)
{
struct io_ring_ctx *ctx = req->ctx;
- void *sqe_addr;
- int ret;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
@@ -2385,13 +2414,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe->cancel_flags)
return -EINVAL;
- sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr);
- ret = io_async_cancel_one(ctx, sqe_addr);
-
- if (ret < 0 && (req->flags & REQ_F_LINK))
- req->flags |= REQ_F_FAIL_LINK;
- io_cqring_add_event(req, ret);
- io_put_req_find_next(req, nxt);
+ io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
return 0;
}
@@ -2653,7 +2676,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *prev = NULL;
unsigned long flags;
- int ret = -ETIME;
spin_lock_irqsave(&ctx->completion_lock, flags);
@@ -2669,12 +2691,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
- void *user_data = (void *) (unsigned long) prev->user_data;
- ret = io_async_cancel_one(ctx, user_data);
+ io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
+ } else {
+ io_cqring_add_event(req, -ETIME);
+ io_put_req(req);
}
-
- io_cqring_add_event(req, ret);
- io_put_req(req);
return HRTIMER_NORESTART;
}
--
Jens Axboe
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2019-11-10 2:55 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-11-10 2:55 [PATCH v2] io_uring: make ASYNC_CANCEL work with poll and timeout Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox