FYI, for-next fs/io_uring.c: In function ‘io_epoll_ctl’: fs/io_uring.c:2661:22: error: ‘IO_WQ_WORK_NEEDS_FILES’ undeclared (first use in this function) 2661 | req->work.flags |= IO_WQ_WORK_NEEDS_FILES; | ^~~~~~~~~~~~~~~~~~~~~~ fs/io_uring.c:2661:22: note: each undeclared identifier is reported only once for each function it appears in make[1]: *** [scripts/Makefile.build:266: fs/io_uring.o] Error 1 make: *** [Makefile:1693: fs] Error 2 On 29/01/2020 23:49, Jens Axboe wrote: > We're not consistent in how the file table is grabbed and assigned if we > have a command linked that requires the use of it. > > Add ->file_table to the io_op_defs[] array, and use that to determine > when to grab the table instead of having the handlers set it if they > need to defer. This also means we can kill the IO_WQ_WORK_NEEDS_FILES > flag. We always initialize work->files, so io-wq can just check for > that. > > Signed-off-by: Jens Axboe > > --- > > diff --git a/fs/io-wq.c b/fs/io-wq.c > index f7eb577ccd2d..cb60a42b9fdf 100644 > --- a/fs/io-wq.c > +++ b/fs/io-wq.c > @@ -476,8 +476,7 @@ static void io_worker_handle_work(struct io_worker *worker) > if (work->flags & IO_WQ_WORK_CB) > work->func(&work); > > - if ((work->flags & IO_WQ_WORK_NEEDS_FILES) && > - current->files != work->files) { > + if (work->files && current->files != work->files) { > task_lock(current); > current->files = work->files; > task_unlock(current); > diff --git a/fs/io-wq.h b/fs/io-wq.h > index c42602c58c56..50b3378febf2 100644 > --- a/fs/io-wq.h > +++ b/fs/io-wq.h > @@ -7,7 +7,6 @@ enum { > IO_WQ_WORK_CANCEL = 1, > IO_WQ_WORK_HAS_MM = 2, > IO_WQ_WORK_HASHED = 4, > - IO_WQ_WORK_NEEDS_FILES = 16, > IO_WQ_WORK_UNBOUND = 32, > IO_WQ_WORK_INTERNAL = 64, > IO_WQ_WORK_CB = 128, > diff --git a/fs/io_uring.c b/fs/io_uring.c > index 8bcf0538e2e1..0d8d0e217847 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -603,6 +603,8 @@ struct io_op_def { > unsigned unbound_nonreg_file : 1; > /* opcode is not supported by this kernel */ > unsigned not_supported : 1; > + /* needs file table */ > + unsigned file_table : 1; > }; > > static const struct io_op_def io_op_defs[] = { > @@ -661,6 +663,7 @@ static const struct io_op_def io_op_defs[] = { > .needs_mm = 1, > .needs_file = 1, > .unbound_nonreg_file = 1, > + .file_table = 1, > }, > [IORING_OP_ASYNC_CANCEL] = {}, > [IORING_OP_LINK_TIMEOUT] = { > @@ -679,12 +682,15 @@ static const struct io_op_def io_op_defs[] = { > [IORING_OP_OPENAT] = { > .needs_file = 1, > .fd_non_neg = 1, > + .file_table = 1, > }, > [IORING_OP_CLOSE] = { > .needs_file = 1, > + .file_table = 1, > }, > [IORING_OP_FILES_UPDATE] = { > .needs_mm = 1, > + .file_table = 1, > }, > [IORING_OP_STATX] = { > .needs_mm = 1, > @@ -720,6 +726,7 @@ static const struct io_op_def io_op_defs[] = { > [IORING_OP_OPENAT2] = { > .needs_file = 1, > .fd_non_neg = 1, > + .file_table = 1, > }, > }; > > @@ -732,6 +739,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req); > static int __io_sqe_files_update(struct io_ring_ctx *ctx, > struct io_uring_files_update *ip, > unsigned nr_args); > +static int io_grab_files(struct io_kiocb *req); > > static struct kmem_cache *req_cachep; > > @@ -2568,10 +2576,8 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt, > struct file *file; > int ret; > > - if (force_nonblock) { > - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; > + if (force_nonblock) > return -EAGAIN; > - } > > ret = build_open_flags(&req->open.how, &op); > if (ret) > @@ -2797,10 +2803,8 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt, > return ret; > > /* if the file has a flush method, be safe and punt to async */ > - if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) { > - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; > + if (req->close.put_file->f_op->flush && !io_wq_current_is_worker()) > goto eagain; > - } > > /* > * No ->flush(), safely close from here and just punt the > @@ -3244,7 +3248,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt, > ret = __io_accept(req, nxt, force_nonblock); > if (ret == -EAGAIN && force_nonblock) { > req->work.func = io_accept_finish; > - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; > io_put_req(req); > return -EAGAIN; > } > @@ -3967,10 +3970,8 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) > struct io_uring_files_update up; > int ret; > > - if (force_nonblock) { > - req->work.flags |= IO_WQ_WORK_NEEDS_FILES; > + if (force_nonblock) > return -EAGAIN; > - } > > up.offset = req->files_update.offset; > up.fds = req->files_update.arg; > @@ -3991,6 +3992,12 @@ static int io_req_defer_prep(struct io_kiocb *req, > { > ssize_t ret = 0; > > + if (io_op_defs[req->opcode].file_table) { > + ret = io_grab_files(req); > + if (unlikely(ret)) > + return ret; > + } > + > io_req_work_grab_env(req, &io_op_defs[req->opcode]); > > switch (req->opcode) { > @@ -4424,6 +4431,8 @@ static int io_grab_files(struct io_kiocb *req) > int ret = -EBADF; > struct io_ring_ctx *ctx = req->ctx; > > + if (req->work.files) > + return 0; > if (!ctx->ring_file) > return -EBADF; > > @@ -4542,7 +4551,7 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) > if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || > (req->flags & REQ_F_MUST_PUNT))) { > punt: > - if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { > + if (io_op_defs[req->opcode].file_table) { > ret = io_grab_files(req); > if (ret) > goto err; > -- Pavel Begunkov