From edb629fc246ef146ad4e25bc51fd3f5db797b2be Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 23 Feb 2020 22:22:33 -0800 Subject: [PATCH v1 1/2] WIP: io_uring: Deduplicate request prep. Signed-off-by: Andres Freund --- fs/io_uring.c | 192 +++++++++++++------------------------------------- 1 file changed, 49 insertions(+), 143 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index de650df9ac53..9a8fda8b28c9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4116,31 +4116,24 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) return 0; } -static int io_req_defer_prep(struct io_kiocb *req, - const struct io_uring_sqe *sqe) +static inline int io_req_prep(u8 opcode, struct io_kiocb *req, + const struct io_uring_sqe *sqe, + bool force_nonblock) { ssize_t ret = 0; - if (io_op_defs[req->opcode].file_table) { - ret = io_grab_files(req); - if (unlikely(ret)) - return ret; - } - - io_req_work_grab_env(req, &io_op_defs[req->opcode]); - - switch (req->opcode) { + switch (opcode) { case IORING_OP_NOP: break; case IORING_OP_READV: case IORING_OP_READ_FIXED: case IORING_OP_READ: - ret = io_read_prep(req, sqe, true); + ret = io_read_prep(req, sqe, force_nonblock); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: - ret = io_write_prep(req, sqe, true); + ret = io_write_prep(req, sqe, force_nonblock); break; case IORING_OP_POLL_ADD: ret = io_poll_add_prep(req, sqe); @@ -4162,23 +4155,23 @@ static int io_req_defer_prep(struct io_kiocb *req, case IORING_OP_RECV: ret = io_recvmsg_prep(req, sqe); break; - case IORING_OP_CONNECT: - ret = io_connect_prep(req, sqe); - break; case IORING_OP_TIMEOUT: ret = io_timeout_prep(req, sqe, false); break; case IORING_OP_TIMEOUT_REMOVE: ret = io_timeout_remove_prep(req, sqe); break; + case IORING_OP_ACCEPT: + ret = io_accept_prep(req, sqe); + break; case IORING_OP_ASYNC_CANCEL: ret = io_async_cancel_prep(req, sqe); break; case IORING_OP_LINK_TIMEOUT: ret = io_timeout_prep(req, sqe, true); break; - case IORING_OP_ACCEPT: - ret = io_accept_prep(req, sqe); + case IORING_OP_CONNECT: + ret = io_connect_prep(req, sqe); break; case IORING_OP_FALLOCATE: ret = io_fallocate_prep(req, sqe); @@ -4217,6 +4210,23 @@ static int io_req_defer_prep(struct io_kiocb *req, return ret; } +static int io_req_defer_prep(struct io_kiocb *req, + const struct io_uring_sqe *sqe) +{ + ssize_t ret = 0; + u8 opcode = req->opcode; + + if (io_op_defs[opcode].file_table) { + ret = io_grab_files(req); + if (unlikely(ret)) + return ret; + } + + io_req_work_grab_env(req, &io_op_defs[opcode]); + + return io_req_prep(opcode, req, sqe, true); +} + static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; @@ -4278,198 +4288,94 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, struct io_kiocb **nxt, bool force_nonblock) { struct io_ring_ctx *ctx = req->ctx; + /* allow compiler to infer opcode doesn't change */ + u8 opcode = req->opcode; int ret; - switch (req->opcode) { + if (sqe) { + ret = io_req_prep(opcode, req, sqe, force_nonblock); + if (ret) + return ret; + } + + switch (opcode) { case IORING_OP_NOP: ret = io_nop(req); break; case IORING_OP_READV: case IORING_OP_READ_FIXED: case IORING_OP_READ: - if (sqe) { - ret = io_read_prep(req, sqe, force_nonblock); - if (ret < 0) - break; - } ret = io_read(req, nxt, force_nonblock); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE: - if (sqe) { - ret = io_write_prep(req, sqe, force_nonblock); - if (ret < 0) - break; - } ret = io_write(req, nxt, force_nonblock); break; - case IORING_OP_FSYNC: - if (sqe) { - ret = io_prep_fsync(req, sqe); - if (ret < 0) - break; - } - ret = io_fsync(req, nxt, force_nonblock); - break; case IORING_OP_POLL_ADD: - if (sqe) { - ret = io_poll_add_prep(req, sqe); - if (ret) - break; - } ret = io_poll_add(req, nxt); break; case IORING_OP_POLL_REMOVE: - if (sqe) { - ret = io_poll_remove_prep(req, sqe); - if (ret < 0) - break; - } ret = io_poll_remove(req); break; + case IORING_OP_FSYNC: + ret = io_fsync(req, nxt, force_nonblock); + break; case IORING_OP_SYNC_FILE_RANGE: - if (sqe) { - ret = io_prep_sfr(req, sqe); - if (ret < 0) - break; - } ret = io_sync_file_range(req, nxt, force_nonblock); break; case IORING_OP_SENDMSG: + ret = io_sendmsg(req, nxt, force_nonblock); + break; case IORING_OP_SEND: - if (sqe) { - ret = io_sendmsg_prep(req, sqe); - if (ret < 0) - break; - } - if (req->opcode == IORING_OP_SENDMSG) - ret = io_sendmsg(req, nxt, force_nonblock); - else - ret = io_send(req, nxt, force_nonblock); + ret = io_send(req, nxt, force_nonblock); break; case IORING_OP_RECVMSG: + ret = io_recvmsg(req, nxt, force_nonblock); + break; case IORING_OP_RECV: - if (sqe) { - ret = io_recvmsg_prep(req, sqe); - if (ret) - break; - } - if (req->opcode == IORING_OP_RECVMSG) - ret = io_recvmsg(req, nxt, force_nonblock); - else - ret = io_recv(req, nxt, force_nonblock); + ret = io_recv(req, nxt, force_nonblock); break; case IORING_OP_TIMEOUT: - if (sqe) { - ret = io_timeout_prep(req, sqe, false); - if (ret) - break; - } ret = io_timeout(req); break; case IORING_OP_TIMEOUT_REMOVE: - if (sqe) { - ret = io_timeout_remove_prep(req, sqe); - if (ret) - break; - } ret = io_timeout_remove(req); break; case IORING_OP_ACCEPT: - if (sqe) { - ret = io_accept_prep(req, sqe); - if (ret) - break; - } ret = io_accept(req, nxt, force_nonblock); break; - case IORING_OP_CONNECT: - if (sqe) { - ret = io_connect_prep(req, sqe); - if (ret) - break; - } - ret = io_connect(req, nxt, force_nonblock); - break; case IORING_OP_ASYNC_CANCEL: - if (sqe) { - ret = io_async_cancel_prep(req, sqe); - if (ret) - break; - } ret = io_async_cancel(req, nxt); break; + case IORING_OP_CONNECT: + ret = io_connect(req, nxt, force_nonblock); + break; case IORING_OP_FALLOCATE: - if (sqe) { - ret = io_fallocate_prep(req, sqe); - if (ret) - break; - } ret = io_fallocate(req, nxt, force_nonblock); break; case IORING_OP_OPENAT: - if (sqe) { - ret = io_openat_prep(req, sqe); - if (ret) - break; - } ret = io_openat(req, nxt, force_nonblock); break; case IORING_OP_CLOSE: - if (sqe) { - ret = io_close_prep(req, sqe); - if (ret) - break; - } ret = io_close(req, nxt, force_nonblock); break; case IORING_OP_FILES_UPDATE: - if (sqe) { - ret = io_files_update_prep(req, sqe); - if (ret) - break; - } ret = io_files_update(req, force_nonblock); break; case IORING_OP_STATX: - if (sqe) { - ret = io_statx_prep(req, sqe); - if (ret) - break; - } ret = io_statx(req, nxt, force_nonblock); break; case IORING_OP_FADVISE: - if (sqe) { - ret = io_fadvise_prep(req, sqe); - if (ret) - break; - } ret = io_fadvise(req, nxt, force_nonblock); break; case IORING_OP_MADVISE: - if (sqe) { - ret = io_madvise_prep(req, sqe); - if (ret) - break; - } ret = io_madvise(req, nxt, force_nonblock); break; case IORING_OP_OPENAT2: - if (sqe) { - ret = io_openat2_prep(req, sqe); - if (ret) - break; - } ret = io_openat2(req, nxt, force_nonblock); break; case IORING_OP_EPOLL_CTL: - if (sqe) { - ret = io_epoll_ctl_prep(req, sqe); - if (ret) - break; - } ret = io_epoll_ctl(req, nxt, force_nonblock); break; default: -- 2.25.0.114.g5b0ca878e0