public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>, [email protected]
Cc: [email protected], [email protected]
Subject: Re: [PATCH 2/3] io_uring: pass in io_kiocb to fill/add CQ handlers
Date: Thu, 7 Nov 2019 12:53:47 +0300	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>


[-- Attachment #1.1: Type: text/plain, Size: 12542 bytes --]

On 07/11/2019 02:53, Jens Axboe wrote:
> This is in preparation for handling CQ ring overflow a bit smarter. We
> should not have any functional changes in this patch. Most of the changes
> are fairly straight forward, the only ones that stick out a bit are the
> ones that change:
> 
> __io_free_req()
> 
> to a double io_put_req(). If the request hasn't been submitted yet, we
> know it's safe to simply ignore references and free it. But let's clean
> these up too, as later patches will depend on the caller doing the right
> thing if the completion logging grabs a reference to the request.
> 
> Signed-off-by: Jens Axboe <[email protected]>
> ---
>  fs/io_uring.c | 90 +++++++++++++++++++++++++--------------------------
>  1 file changed, 45 insertions(+), 45 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 36ca7bc38ebf..fb621a564dcf 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -369,8 +369,7 @@ struct io_submit_state {
>  };
>  
>  static void io_wq_submit_work(struct io_wq_work **workptr);
> -static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
> -				 long res);
> +static void io_cqring_fill_event(struct io_kiocb *req, long res);
>  static void __io_free_req(struct io_kiocb *req);
>  static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr);
>  
> @@ -535,8 +534,8 @@ static void io_kill_timeout(struct io_kiocb *req)
>  	if (ret != -1) {
>  		atomic_inc(&req->ctx->cq_timeouts);
>  		list_del_init(&req->list);
> -		io_cqring_fill_event(req->ctx, req->user_data, 0);
> -		__io_free_req(req);
> +		io_cqring_fill_event(req, 0);
> +		io_put_req(req, NULL);
>  	}
>  }
>  
> @@ -588,12 +587,12 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
>  	return &rings->cqes[tail & ctx->cq_mask];
>  }
>  
> -static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
> -				 long res)
> +static void io_cqring_fill_event(struct io_kiocb *req, long res)
>  {
> +	struct io_ring_ctx *ctx = req->ctx;
>  	struct io_uring_cqe *cqe;
>  
> -	trace_io_uring_complete(ctx, ki_user_data, res);
> +	trace_io_uring_complete(ctx, req->user_data, res);
>  
>  	/*
>  	 * If we can't get a cq entry, userspace overflowed the
> @@ -602,7 +601,7 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
>  	 */
>  	cqe = io_get_cqring(ctx);
>  	if (cqe) {
> -		WRITE_ONCE(cqe->user_data, ki_user_data);
> +		WRITE_ONCE(cqe->user_data, req->user_data);
>  		WRITE_ONCE(cqe->res, res);
>  		WRITE_ONCE(cqe->flags, 0);
>  	} else {
> @@ -621,13 +620,13 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
>  		eventfd_signal(ctx->cq_ev_fd, 1);
>  }
>  
> -static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
> -				long res)
> +static void io_cqring_add_event(struct io_kiocb *req, long res)
>  {
> +	struct io_ring_ctx *ctx = req->ctx;
>  	unsigned long flags;
>  
>  	spin_lock_irqsave(&ctx->completion_lock, flags);
> -	io_cqring_fill_event(ctx, user_data, res);
> +	io_cqring_fill_event(req, res);
>  	io_commit_cqring(ctx);
>  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
>  
> @@ -721,10 +720,10 @@ static void io_link_cancel_timeout(struct io_ring_ctx *ctx,
>  
>  	ret = hrtimer_try_to_cancel(&req->timeout.timer);
>  	if (ret != -1) {
> -		io_cqring_fill_event(ctx, req->user_data, -ECANCELED);
> +		io_cqring_fill_event(req, -ECANCELED);
>  		io_commit_cqring(ctx);
>  		req->flags &= ~REQ_F_LINK;
> -		__io_free_req(req);
> +		io_put_req(req, NULL);
>  	}
>  }
>  
> @@ -804,8 +803,10 @@ static void io_fail_links(struct io_kiocb *req)
>  		    link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
>  			io_link_cancel_timeout(ctx, link);
>  		} else {
> -			io_cqring_fill_event(ctx, link->user_data, -ECANCELED);
> -			__io_free_req(link);
> +			io_cqring_fill_event(link, -ECANCELED);
> +			/* drop both submit and complete references */
> +			io_put_req(link, NULL);
> +			io_put_req(link, NULL);

io_put_req() -> ... -> io_free_req() -> io_fail_links() -> io_put_req()

It shouldn't recurse further, but probably it would be better to avoid
it at all.


>  		}
>  	}
>  
> @@ -891,7 +892,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
>  		req = list_first_entry(done, struct io_kiocb, list);
>  		list_del(&req->list);
>  
> -		io_cqring_fill_event(ctx, req->user_data, req->result);
> +		io_cqring_fill_event(req, req->result);
>  		(*nr_events)++;
>  
>  		if (refcount_dec_and_test(&req->refs)) {
> @@ -1087,7 +1088,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
>  
>  	if ((req->flags & REQ_F_LINK) && res != req->result)
>  		req->flags |= REQ_F_FAIL_LINK;
> -	io_cqring_add_event(req->ctx, req->user_data, res);
> +	io_cqring_add_event(req, res);
>  }
>  
>  static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
> @@ -1588,15 +1589,14 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
>  /*
>   * IORING_OP_NOP just posts a completion event, nothing else.
>   */
> -static int io_nop(struct io_kiocb *req, u64 user_data)
> +static int io_nop(struct io_kiocb *req)
>  {
>  	struct io_ring_ctx *ctx = req->ctx;
> -	long err = 0;
>  
>  	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
>  		return -EINVAL;
>  
> -	io_cqring_add_event(ctx, user_data, err);
> +	io_cqring_add_event(req, 0);
>  	io_put_req(req, NULL);
>  	return 0;
>  }
> @@ -1643,7 +1643,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
>  
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	io_put_req(req, nxt);
>  	return 0;
>  }
> @@ -1690,7 +1690,7 @@ static int io_sync_file_range(struct io_kiocb *req,
>  
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	io_put_req(req, nxt);
>  	return 0;
>  }
> @@ -1726,7 +1726,7 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
>  			return ret;
>  	}
>  
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
>  	io_put_req(req, nxt);
> @@ -1782,7 +1782,7 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
>  	}
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	io_put_req(req, nxt);
>  	return 0;
>  #else
> @@ -1843,7 +1843,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  	}
>  	spin_unlock_irq(&ctx->completion_lock);
>  
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
>  	io_put_req(req, NULL);
> @@ -1854,7 +1854,7 @@ static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  			     __poll_t mask)
>  {
>  	req->poll.done = true;
> -	io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
> +	io_cqring_fill_event(req, mangle_poll(mask));
>  	io_commit_cqring(ctx);
>  }
>  
> @@ -2048,7 +2048,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
>  		list_del_init(&req->list);
>  	}
>  
> -	io_cqring_fill_event(ctx, req->user_data, -ETIME);
> +	io_cqring_fill_event(req, -ETIME);
>  	io_commit_cqring(ctx);
>  	spin_unlock_irqrestore(&ctx->completion_lock, flags);
>  
> @@ -2092,7 +2092,7 @@ static int io_timeout_remove(struct io_kiocb *req,
>  	/* didn't find timeout */
>  	if (ret) {
>  fill_ev:
> -		io_cqring_fill_event(ctx, req->user_data, ret);
> +		io_cqring_fill_event(req, ret);
>  		io_commit_cqring(ctx);
>  		spin_unlock_irq(&ctx->completion_lock);
>  		io_cqring_ev_posted(ctx);
> @@ -2108,8 +2108,8 @@ static int io_timeout_remove(struct io_kiocb *req,
>  		goto fill_ev;
>  	}
>  
> -	io_cqring_fill_event(ctx, req->user_data, 0);
> -	io_cqring_fill_event(ctx, treq->user_data, -ECANCELED);
> +	io_cqring_fill_event(req, 0);
> +	io_cqring_fill_event(treq, -ECANCELED);
>  	io_commit_cqring(ctx);
>  	spin_unlock_irq(&ctx->completion_lock);
>  	io_cqring_ev_posted(ctx);
> @@ -2249,7 +2249,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
>  
>  	if (ret < 0 && (req->flags & REQ_F_LINK))
>  		req->flags |= REQ_F_FAIL_LINK;
> -	io_cqring_add_event(req->ctx, sqe->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	io_put_req(req, nxt);
>  	return 0;
>  }
> @@ -2288,12 +2288,10 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  	int ret, opcode;
>  	struct sqe_submit *s = &req->submit;
>  
> -	req->user_data = READ_ONCE(s->sqe->user_data);
> -
>  	opcode = READ_ONCE(s->sqe->opcode);
>  	switch (opcode) {
>  	case IORING_OP_NOP:
> -		ret = io_nop(req, req->user_data);
> +		ret = io_nop(req);
>  		break;
>  	case IORING_OP_READV:
>  		if (unlikely(s->sqe->buf_index))
> @@ -2402,7 +2400,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
>  	if (ret) {
>  		if (req->flags & REQ_F_LINK)
>  			req->flags |= REQ_F_FAIL_LINK;
> -		io_cqring_add_event(ctx, sqe->user_data, ret);
> +		io_cqring_add_event(req, ret);
>  		io_put_req(req, NULL);
>  	}
>  
> @@ -2530,7 +2528,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
>  	if (prev)
>  		ret = io_async_cancel_one(ctx, (void *) prev->user_data);
>  
> -	io_cqring_add_event(ctx, req->user_data, ret);
> +	io_cqring_add_event(req, ret);
>  	io_put_req(req, NULL);
>  	return HRTIMER_NORESTART;
>  }
> @@ -2573,7 +2571,7 @@ static int io_queue_linked_timeout(struct io_kiocb *req, struct io_kiocb *nxt)
>  		 * failed by the regular submission path.
>  		 */
>  		list_del(&nxt->list);
> -		io_cqring_fill_event(ctx, nxt->user_data, ret);
> +		io_cqring_fill_event(nxt, ret);
>  		trace_io_uring_fail_link(req, nxt);
>  		io_commit_cqring(ctx);
>  		io_put_req(nxt, NULL);
> @@ -2646,7 +2644,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
>  
>  	/* and drop final reference, if we failed */
>  	if (ret) {
> -		io_cqring_add_event(ctx, req->user_data, ret);
> +		io_cqring_add_event(req, ret);
>  		if (req->flags & REQ_F_LINK)
>  			req->flags |= REQ_F_FAIL_LINK;
>  		io_put_req(req, NULL);
> @@ -2662,7 +2660,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
>  	ret = io_req_defer(ctx, req);
>  	if (ret) {
>  		if (ret != -EIOCBQUEUED) {
> -			io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
> +			io_cqring_add_event(req, ret);
>  			io_free_req(req, NULL);
>  		}
>  		return 0;
> @@ -2689,8 +2687,8 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  	ret = io_req_defer(ctx, req);
>  	if (ret) {
>  		if (ret != -EIOCBQUEUED) {
> -			io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
> -			io_free_req(req, NULL);
> +			io_cqring_add_event(req, ret);
> +			io_put_req(req, NULL);
>  			__io_free_req(shadow);
>  			return 0;
>  		}
> @@ -2723,6 +2721,8 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  	struct sqe_submit *s = &req->submit;
>  	int ret;
>  
> +	req->user_data = s->sqe->user_data;
> +
>  	/* enforce forwards compatibility on users */
>  	if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
>  		ret = -EINVAL;
> @@ -2732,13 +2732,13 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
>  	ret = io_req_set_file(ctx, state, req);
>  	if (unlikely(ret)) {
>  err_req:
> -		io_cqring_add_event(ctx, s->sqe->user_data, ret);
> -		io_free_req(req, NULL);
> +		io_cqring_add_event(req, ret);
> +		/* drop both submit and complete references */
> +		io_put_req(req, NULL);
> +		io_put_req(req, NULL);
>  		return;
>  	}
>  
> -	req->user_data = s->sqe->user_data;
> -
>  	/*
>  	 * If we already have a head request, queue this one for async
>  	 * submittal once the head completes. If we don't have a head but
> 

-- 
Pavel Begunkov


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

  reply	other threads:[~2019-11-07  9:54 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-06 23:53 [PATCHSET v2 0/3] io_uring CQ ring backpressure Jens Axboe
2019-11-06 23:53 ` [PATCH 1/3] io_uring: make io_cqring_events() take 'ctx' as argument Jens Axboe
2019-11-06 23:53 ` [PATCH 2/3] io_uring: pass in io_kiocb to fill/add CQ handlers Jens Axboe
2019-11-07  9:53   ` Pavel Begunkov [this message]
2019-11-07 13:16     ` Jens Axboe
2019-11-06 23:53 ` [PATCH 3/3] io_uring: add support for backlogged CQ ring Jens Axboe
  -- strict thread matches above, loose matches on Subject: below --
2019-11-07 16:00 [PATCHSET v3 0/3] io_uring CQ ring backpressure Jens Axboe
2019-11-07 16:00 ` [PATCH 2/3] io_uring: pass in io_kiocb to fill/add CQ handlers Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox