From: Pavel Begunkov <[email protected]>
To: [email protected]
Cc: Jens Axboe <[email protected]>, [email protected]
Subject: [PATCH 5/8] io_uring: optimise read/write iov state storing
Date: Thu, 14 Oct 2021 16:10:16 +0100 [thread overview]
Message-ID: <5c5e7ffd7dc25fc35075c70411ba99df72f237fa.1634144845.git.asml.silence@gmail.com> (raw)
In-Reply-To: <[email protected]>
Currently io_read() and io_write() keep separate pointers to an iter and
to struct iov_iter_state, which is not great for register spilling and
requires more on-stack copies. They are both either on-stack or in
req->async_data at the same time, so use struct io_rw_state and keep a
pointer only to it, so having all the state with just one pointer.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 79 ++++++++++++++++++++++++---------------------------
1 file changed, 37 insertions(+), 42 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3447243805d9..248ef7b09268 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -694,9 +694,9 @@ struct io_async_msghdr {
};
struct io_rw_state {
- struct iovec fast_iov[UIO_FASTIOV];
struct iov_iter iter;
struct iov_iter_state iter_state;
+ struct iovec fast_iov[UIO_FASTIOV];
};
struct io_async_rw {
@@ -3259,8 +3259,7 @@ static inline bool io_alloc_async_data(struct io_kiocb *req)
}
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
- const struct iovec *fast_iov,
- struct iov_iter *iter, bool force)
+ struct io_rw_state *s, bool force)
{
if (!force && !io_op_defs[req->opcode].needs_async_setup)
return 0;
@@ -3272,7 +3271,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
return -ENOMEM;
}
- io_req_map_rw(req, iovec, fast_iov, iter);
+ io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
iorw = req->async_data;
/* we've copied and mapped the iter, ensure state is saved */
iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
@@ -3394,33 +3393,33 @@ static bool need_read_all(struct io_kiocb *req)
static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
- struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+ struct io_rw_state __s, *s;
+ struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb;
- struct iov_iter __iter, *iter = &__iter;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- struct iov_iter_state __state, *state;
struct io_async_rw *rw;
ssize_t ret, ret2;
if (req_has_async_data(req)) {
rw = req->async_data;
- iter = &rw->s.iter;
- state = &rw->s.iter_state;
+ s = &rw->s;
/*
* We come here from an earlier attempt, restore our state to
* match in case it doesn't. It's cheap enough that we don't
* need to make this conditional.
*/
- iov_iter_restore(iter, state);
+ iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
} else {
- ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
+ s = &__s;
+ iovec = s->fast_iov;
+ ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock);
if (ret < 0)
return ret;
- state = &__state;
- iov_iter_save_state(iter, state);
+
+ iov_iter_save_state(&s->iter, &s->iter_state);
}
- req->result = iov_iter_count(iter);
+ req->result = iov_iter_count(&s->iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3430,7 +3429,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_nowait(req, READ)) {
- ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
+ ret = io_setup_async_rw(req, iovec, s, true);
return ret ?: -EAGAIN;
}
@@ -3440,7 +3439,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
- ret = io_iter_do_read(req, iter);
+ ret = io_iter_do_read(req, &s->iter);
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
@@ -3464,22 +3463,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
* untouched in case of error. Restore it and we'll advance it
* manually if we need to.
*/
- iov_iter_restore(iter, state);
+ iov_iter_restore(&s->iter, &s->iter_state);
- ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
+ ret2 = io_setup_async_rw(req, iovec, s, true);
if (ret2)
return ret2;
iovec = NULL;
rw = req->async_data;
+ s = &rw->s;
/*
* Now use our persistent iterator and state, if we aren't already.
* We've restored and mapped the iter to match.
*/
- if (iter != &rw->s.iter) {
- iter = &rw->s.iter;
- state = &rw->s.iter_state;
- }
do {
/*
@@ -3487,11 +3483,11 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
* above or inside this loop. Advance the iter by the bytes
* that were consumed.
*/
- iov_iter_advance(iter, ret);
- if (!iov_iter_count(iter))
+ iov_iter_advance(&s->iter, ret);
+ if (!iov_iter_count(&s->iter))
break;
rw->bytes_done += ret;
- iov_iter_save_state(iter, state);
+ iov_iter_save_state(&s->iter, &s->iter_state);
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
@@ -3505,12 +3501,12 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
* desired page gets unlocked. We can also get a partial read
* here, and if we do, then just retry at the new offset.
*/
- ret = io_iter_do_read(req, iter);
+ ret = io_iter_do_read(req, &s->iter);
if (ret == -EIOCBQUEUED)
return 0;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
- iov_iter_restore(iter, state);
+ iov_iter_restore(&s->iter, &s->iter_state);
} while (ret > 0);
done:
kiocb_done(kiocb, ret, issue_flags);
@@ -3530,28 +3526,27 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
- struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+ struct io_rw_state __s, *s;
+ struct io_async_rw *rw;
+ struct iovec *iovec;
struct kiocb *kiocb = &req->rw.kiocb;
- struct iov_iter __iter, *iter = &__iter;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- struct iov_iter_state __state, *state;
- struct io_async_rw *rw;
ssize_t ret, ret2;
if (req_has_async_data(req)) {
rw = req->async_data;
- iter = &rw->s.iter;
- state = &rw->s.iter_state;
- iov_iter_restore(iter, state);
+ s = &rw->s;
+ iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
} else {
- ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
+ s = &__s;
+ iovec = s->fast_iov;
+ ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock);
if (ret < 0)
return ret;
- state = &__state;
- iov_iter_save_state(iter, state);
+ iov_iter_save_state(&s->iter, &s->iter_state);
}
- req->result = iov_iter_count(iter);
+ req->result = iov_iter_count(&s->iter);
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3587,9 +3582,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb->ki_flags |= IOCB_WRITE;
if (req->file->f_op->write_iter)
- ret2 = call_write_iter(req->file, kiocb, iter);
+ ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write)
- ret2 = loop_rw_iter(WRITE, req, iter);
+ ret2 = loop_rw_iter(WRITE, req, &s->iter);
else
ret2 = -EINVAL;
@@ -3615,8 +3610,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb_done(kiocb, ret2, issue_flags);
} else {
copy_iov:
- iov_iter_restore(iter, state);
- ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
+ iov_iter_restore(&s->iter, &s->iter_state);
+ ret = io_setup_async_rw(req, iovec, s, false);
return ret ?: -EAGAIN;
}
out_free:
--
2.33.0
next prev parent reply other threads:[~2021-10-14 15:12 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-14 15:10 [PATCH for-next 0/8] read/write cleanup Pavel Begunkov
2021-10-14 15:10 ` [PATCH 1/8] io_uring: consistent typing for issue_flags Pavel Begunkov
2021-10-14 15:10 ` [PATCH 2/8] io_uring: prioritise read success path over fails Pavel Begunkov
2021-10-14 15:10 ` [PATCH 3/8] io_uring: optimise rw comletion handlers Pavel Begunkov
2021-10-14 15:10 ` [PATCH 4/8] io_uring: encapsulate rw state Pavel Begunkov
2021-10-18 6:06 ` Hao Xu
2021-10-14 15:10 ` Pavel Begunkov [this message]
2021-10-14 15:10 ` [PATCH 6/8] io_uring: optimise io_import_iovec nonblock passing Pavel Begunkov
2021-10-14 15:10 ` [PATCH 7/8] io_uring: clean up io_import_iovec Pavel Begunkov
2021-10-14 15:10 ` [PATCH 8/8] io_uring: rearrange io_read()/write() Pavel Begunkov
2021-10-16 22:52 ` Noah Goldstein
2021-10-16 23:25 ` Pavel Begunkov
2021-10-17 1:35 ` Noah Goldstein
2021-10-14 18:17 ` [PATCH for-next 0/8] read/write cleanup Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5c5e7ffd7dc25fc35075c70411ba99df72f237fa.1634144845.git.asml.silence@gmail.com \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox