public inbox for [email protected]
 help / color / mirror / Atom feed
From: Pavel Begunkov <[email protected]>
To: Jens Axboe <[email protected]>, [email protected]
Subject: Re: [PATCH 3/9] io_uring: split poll and poll update structures
Date: Tue, 13 Apr 2021 18:14:28 +0100	[thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <b2f74d64ffebb57a648f791681af086c7211e3a4.1618278933.git.asml.silence@gmail.com>

On 13/04/2021 02:58, Pavel Begunkov wrote:
> struct io_poll_iocb became pretty nasty combining also update fields.
> Split them, so we would have more clarity to it.

I think we should move update into IORING_OP_POLL_REMOVE until it's
not too late. Would have better struct layouts and didn't get in
way of POLL_ADD, which should be more popular.

Another thing, looks IORING_OP_POLL_REMOVE may cancel apoll, that
doesn't sound great. IMHO we need to limit it to only poll requests,
rather confusing otherwise. note: it can't be used reliably from
the userspace, so we may probably not care about change in behaviour

> 
> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
>  fs/io_uring.c | 55 ++++++++++++++++++++++++++++++---------------------
>  1 file changed, 32 insertions(+), 23 deletions(-)
> 
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 429ee5fd9044..a0f207e62e32 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -490,15 +490,16 @@ struct io_poll_iocb {
>  	__poll_t			events;
>  	bool				done;
>  	bool				canceled;
> +	struct wait_queue_entry		wait;
> +};
> +
> +struct io_poll_update {
> +	struct file			*file;
> +	u64				old_user_data;
> +	u64				new_user_data;
> +	__poll_t			events;
>  	bool				update_events;
>  	bool				update_user_data;
> -	union {
> -		struct wait_queue_entry	wait;
> -		struct {
> -			u64		old_user_data;
> -			u64		new_user_data;
> -		};
> -	};
>  };
>  
>  struct io_poll_remove {
> @@ -715,6 +716,7 @@ enum {
>  	REQ_F_COMPLETE_INLINE_BIT,
>  	REQ_F_REISSUE_BIT,
>  	REQ_F_DONT_REISSUE_BIT,
> +	REQ_F_POLL_UPDATE_BIT,
>  	/* keep async read/write and isreg together and in order */
>  	REQ_F_ASYNC_READ_BIT,
>  	REQ_F_ASYNC_WRITE_BIT,
> @@ -762,6 +764,8 @@ enum {
>  	REQ_F_REISSUE		= BIT(REQ_F_REISSUE_BIT),
>  	/* don't attempt request reissue, see io_rw_reissue() */
>  	REQ_F_DONT_REISSUE	= BIT(REQ_F_DONT_REISSUE_BIT),
> +	/* switches between poll and poll update */
> +	REQ_F_POLL_UPDATE	= BIT(REQ_F_POLL_UPDATE_BIT),
>  	/* supports async reads */
>  	REQ_F_ASYNC_READ	= BIT(REQ_F_ASYNC_READ_BIT),
>  	/* supports async writes */
> @@ -791,6 +795,7 @@ struct io_kiocb {
>  		struct file		*file;
>  		struct io_rw		rw;
>  		struct io_poll_iocb	poll;
> +		struct io_poll_update	poll_update;
>  		struct io_poll_remove	poll_remove;
>  		struct io_accept	accept;
>  		struct io_sync		sync;
> @@ -4989,7 +4994,6 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
>  	poll->head = NULL;
>  	poll->done = false;
>  	poll->canceled = false;
> -	poll->update_events = poll->update_user_data = false;
>  #define IO_POLL_UNMASK	(EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
>  	/* mask in events that we always want/need */
>  	poll->events = events | IO_POLL_UNMASK;
> @@ -5366,7 +5370,6 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
>  
>  static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  {
> -	struct io_poll_iocb *poll = &req->poll;
>  	u32 events, flags;
>  
>  	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
> @@ -5383,20 +5386,26 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
>  #endif
>  	if (!(flags & IORING_POLL_ADD_MULTI))
>  		events |= EPOLLONESHOT;
> -	poll->update_events = poll->update_user_data = false;
> +	events = demangle_poll(events) |
> +				(events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
>  
>  	if (flags & (IORING_POLL_UPDATE_EVENTS|IORING_POLL_UPDATE_USER_DATA)) {
> -		poll->old_user_data = READ_ONCE(sqe->addr);
> -		poll->update_events = flags & IORING_POLL_UPDATE_EVENTS;
> -		poll->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
> -		if (poll->update_user_data)
> -			poll->new_user_data = READ_ONCE(sqe->off);
> +		struct io_poll_update *poll_upd = &req->poll_update;
> +
> +		req->flags |= REQ_F_POLL_UPDATE;
> +		poll_upd->events = events;
> +		poll_upd->old_user_data = READ_ONCE(sqe->addr);
> +		poll_upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
> +		poll_upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
> +		if (poll_upd->update_user_data)
> +			poll_upd->new_user_data = READ_ONCE(sqe->off);
>  	} else {
> +		struct io_poll_iocb *poll = &req->poll;
> +
> +		poll->events = events;
>  		if (sqe->off || sqe->addr)
>  			return -EINVAL;
>  	}
> -	poll->events = demangle_poll(events) |
> -				(events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
>  	return 0;
>  }
>  
> @@ -5434,7 +5443,7 @@ static int io_poll_update(struct io_kiocb *req)
>  	int ret;
>  
>  	spin_lock_irq(&ctx->completion_lock);
> -	preq = io_poll_find(ctx, req->poll.old_user_data);
> +	preq = io_poll_find(ctx, req->poll_update.old_user_data);
>  	if (!preq) {
>  		ret = -ENOENT;
>  		goto err;
> @@ -5464,13 +5473,13 @@ static int io_poll_update(struct io_kiocb *req)
>  		return 0;
>  	}
>  	/* only mask one event flags, keep behavior flags */
> -	if (req->poll.update_events) {
> +	if (req->poll_update.update_events) {
>  		preq->poll.events &= ~0xffff;
> -		preq->poll.events |= req->poll.events & 0xffff;
> +		preq->poll.events |= req->poll_update.events & 0xffff;
>  		preq->poll.events |= IO_POLL_UNMASK;
>  	}
> -	if (req->poll.update_user_data)
> -		preq->user_data = req->poll.new_user_data;
> +	if (req->poll_update.update_user_data)
> +		preq->user_data = req->poll_update.new_user_data;
>  
>  	spin_unlock_irq(&ctx->completion_lock);
>  
> @@ -5489,7 +5498,7 @@ static int io_poll_update(struct io_kiocb *req)
>  
>  static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
>  {
> -	if (!req->poll.update_events && !req->poll.update_user_data)
> +	if (!(req->flags & REQ_F_POLL_UPDATE))
>  		return __io_poll_add(req);
>  	return io_poll_update(req);
>  }
> 

-- 
Pavel Begunkov

  reply	other threads:[~2021-04-13 17:19 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-13  1:58 [PATCH 5.13 0/9] another 5.13 pack Pavel Begunkov
2021-04-13  1:58 ` [PATCH 1/9] io_uring: fix leaking reg files on exit Pavel Begunkov
2021-04-13  1:58 ` [PATCH 2/9] io_uring: fix uninit old data for poll event upd Pavel Begunkov
2021-04-13  1:58 ` [PATCH 3/9] io_uring: split poll and poll update structures Pavel Begunkov
2021-04-13 17:14   ` Pavel Begunkov [this message]
2021-04-13  1:58 ` [PATCH 4/9] io_uring: add timeout completion_lock annotation Pavel Begunkov
2021-04-13  1:58 ` [PATCH 5/9] io_uring: refactor hrtimer_try_to_cancel uses Pavel Begunkov
2021-04-13  1:58 ` [PATCH 6/9] io_uring: clean up io_poll_remove_waitqs() Pavel Begunkov
2021-04-13  1:58 ` [PATCH 7/9] io_uring: don't fail overflow on in_idle Pavel Begunkov
2021-04-13  1:58 ` [PATCH 8/9] io_uring: skip futile iopoll iterations Pavel Begunkov
2021-04-13  1:58 ` [PATCH 9/9] io_uring: inline io_iopoll_getevents() Pavel Begunkov
2021-04-13 15:38 ` [PATCH 5.13 0/9] another 5.13 pack Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    [email protected] \
    [email protected] \
    [email protected] \
    [email protected] \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox