On 05/02/2020 22:07, Pavel Begunkov wrote: > Save an sqe for a head of a link, so it doesn't go through switch in > io_req_defer_prep() nor allocating an async context in advance. > > Also, it's fixes potenial memleak for double-preparing head requests. > E.g. prep in io_submit_sqe() and then prep in io_req_defer(), > which leaks iovec for vectored read/writes. Looking through -rc1, remembered that Jens already fixed this. So, this may be striked out. > > Signed-off-by: Pavel Begunkov > --- > fs/io_uring.c | 19 ++++++++++--------- > 1 file changed, 10 insertions(+), 9 deletions(-) > > diff --git a/fs/io_uring.c b/fs/io_uring.c > index f00c2c9c67c0..e18056af5672 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -4721,20 +4721,22 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) > } > } > > -static inline void io_queue_link_head(struct io_kiocb *req) > +static inline void io_queue_link_head(struct io_kiocb *req, > + const struct io_uring_sqe *sqe) > { > if (unlikely(req->flags & REQ_F_FAIL_LINK)) { > io_cqring_add_event(req, -ECANCELED); > io_double_put_req(req); > } else > - io_queue_sqe(req, NULL); > + io_queue_sqe(req, sqe); > } > > #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ > IOSQE_IO_HARDLINK | IOSQE_ASYNC) > > static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, > - struct io_submit_state *state, struct io_kiocb **link) > + struct io_submit_state *state, struct io_kiocb **link, > + const struct io_uring_sqe **link_sqe) > { > const struct cred *old_creds = NULL; > struct io_ring_ctx *ctx = req->ctx; > @@ -4812,7 +4814,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, > > /* last request of a link, enqueue the link */ > if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) { > - io_queue_link_head(head); > + io_queue_link_head(head, *link_sqe); > *link = NULL; > } > } else { > @@ -4823,10 +4825,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, > if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) { > req->flags |= REQ_F_LINK; > INIT_LIST_HEAD(&req->link_list); > - ret = io_req_defer_prep(req, sqe); > - if (ret) > - req->flags |= REQ_F_FAIL_LINK; > *link = req; > + *link_sqe = sqe; > } else { > io_queue_sqe(req, sqe); > } > @@ -4924,6 +4924,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, > struct io_kiocb *link = NULL; > int i, submitted = 0; > bool mm_fault = false; > + const struct io_uring_sqe *link_sqe = NULL; > > /* if we have a backlog and couldn't flush it all, return BUSY */ > if (test_bit(0, &ctx->sq_check_overflow)) { > @@ -4983,7 +4984,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, > req->needs_fixed_file = async; > trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, > true, async); > - if (!io_submit_sqe(req, sqe, statep, &link)) > + if (!io_submit_sqe(req, sqe, statep, &link, &link_sqe)) > break; > } > > @@ -4993,7 +4994,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, > percpu_ref_put_many(&ctx->refs, nr - ref_used); > } > if (link) > - io_queue_link_head(link); > + io_queue_link_head(link, link_sqe); > if (statep) > io_submit_state_end(&state); > > -- Pavel Begunkov