* [PATCH 1/4] io_uring: flag the fact that linked file assignment is sane
2022-04-11 23:09 [PATCHSET 0/4] Misc fixups for 5.18 Jens Axboe
@ 2022-04-11 23:09 ` Jens Axboe
2022-04-11 23:09 ` [PATCH 2/4] io_uring: io_kiocb_update_pos() should not touch file for non -1 offset Jens Axboe
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-11 23:09 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Give applications a way to tell if the kernel supports sane linked files,
as in files being assigned at the right time to be able to reliably
do <open file direct into slot X><read file from slot X> while using
IOSQE_IO_LINK to order them.
Not really a bug fix, but flag it as such so that it gets pulled in with
backports of the deferred file assignment.
Fixes: 6bf9c47a3989 ("io_uring: defer file assignment")
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 3 ++-
include/uapi/linux/io_uring.h | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 659f8ecba5b7..f060ad018ba4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -11178,7 +11178,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+ IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+ IORING_FEAT_LINKED_FILE;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 784adc6f6ed2..1845cf7c80ba 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -296,6 +296,7 @@ struct io_uring_params {
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
#define IORING_FEAT_RSRC_TAGS (1U << 10)
#define IORING_FEAT_CQE_SKIP (1U << 11)
+#define IORING_FEAT_LINKED_FILE (1U << 12)
/*
* io_uring_register(2) opcodes and arguments
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/4] io_uring: io_kiocb_update_pos() should not touch file for non -1 offset
2022-04-11 23:09 [PATCHSET 0/4] Misc fixups for 5.18 Jens Axboe
2022-04-11 23:09 ` [PATCH 1/4] io_uring: flag the fact that linked file assignment is sane Jens Axboe
@ 2022-04-11 23:09 ` Jens Axboe
2022-04-11 23:09 ` [PATCH 3/4] io_uring: move apoll->events cache Jens Axboe
2022-04-11 23:09 ` [PATCH 4/4] io_uring: stop using io_wq_work as an fd placeholder Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-11 23:09 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
-1 tells use to use the current position, but we check if the file is
a stream regardless of that. Fix up io_kiocb_update_pos() to only
dip into file if we need to. This is both more efficient and also drops
12 bytes of text on aarch64 and 64 bytes on x86-64.
Fixes: b4aec4001595 ("io_uring: do not recalculate ppos unnecessarily")
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f060ad018ba4..b4a5e2a6aa9c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3183,19 +3183,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;
- bool is_stream = req->file->f_mode & FMODE_STREAM;
- if (kiocb->ki_pos == -1) {
- if (!is_stream) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = req->file->f_pos;
- return &kiocb->ki_pos;
- } else {
- kiocb->ki_pos = 0;
- return NULL;
- }
+ if (kiocb->ki_pos != -1)
+ return &kiocb->ki_pos;
+
+ if (!(req->file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = req->file->f_pos;
+ return &kiocb->ki_pos;
}
- return is_stream ? NULL : &kiocb->ki_pos;
+
+ kiocb->ki_pos = 0;
+ return NULL;
}
static void kiocb_done(struct io_kiocb *req, ssize_t ret,
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 3/4] io_uring: move apoll->events cache
2022-04-11 23:09 [PATCHSET 0/4] Misc fixups for 5.18 Jens Axboe
2022-04-11 23:09 ` [PATCH 1/4] io_uring: flag the fact that linked file assignment is sane Jens Axboe
2022-04-11 23:09 ` [PATCH 2/4] io_uring: io_kiocb_update_pos() should not touch file for non -1 offset Jens Axboe
@ 2022-04-11 23:09 ` Jens Axboe
2022-04-11 23:09 ` [PATCH 4/4] io_uring: stop using io_wq_work as an fd placeholder Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-11 23:09 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
In preparation for fixing a regression with pulling in an extra cacheline
for IO that doesn't usually touch the last cacheline of the io_kiocb,
move the cached location of apoll->events to space shared with some other
completion data. Like cflags, this isn't used until after the request
has been completed, so we can piggy back on top of comp_list.
Fixes: 81459350d581 ("io_uring: cache req->apoll->events in req->cflags")
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b4a5e2a6aa9c..3a97535d0550 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -916,8 +916,12 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
- /* used by request caches, completion batching and iopoll */
- struct io_wq_work_node comp_list;
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ int apoll_events;
+ };
atomic_t refs;
atomic_t poll_refs;
struct io_task_work io_task_work;
@@ -5833,7 +5837,6 @@ static void io_poll_remove_entries(struct io_kiocb *req)
static int io_poll_check_events(struct io_kiocb *req, bool locked)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_iocb *poll = io_poll_get_single(req);
int v;
/* req->task == current here, checking PF_EXITING is safe */
@@ -5850,17 +5853,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
return -ECANCELED;
if (!req->result) {
- struct poll_table_struct pt = { ._key = req->cflags };
+ struct poll_table_struct pt = { ._key = req->apoll_events };
if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
req->result = -EBADF;
else
- req->result = vfs_poll(req->file, &pt) & req->cflags;
+ req->result = vfs_poll(req->file, &pt) & req->apoll_events;
}
/* multishot, just fill an CQE and proceed */
- if (req->result && !(req->cflags & EPOLLONESHOT)) {
- __poll_t mask = mangle_poll(req->result & poll->events);
+ if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+ __poll_t mask = mangle_poll(req->result & req->apoll_events);
bool filled;
spin_lock(&ctx->completion_lock);
@@ -5938,7 +5941,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
* CPU. We want to avoid pulling in req->apoll->events for that
* case.
*/
- req->cflags = events;
+ req->apoll_events = events;
if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func;
else
@@ -6330,7 +6333,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL;
io_req_set_refcount(req);
- req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+ req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 4/4] io_uring: stop using io_wq_work as an fd placeholder
2022-04-11 23:09 [PATCHSET 0/4] Misc fixups for 5.18 Jens Axboe
` (2 preceding siblings ...)
2022-04-11 23:09 ` [PATCH 3/4] io_uring: move apoll->events cache Jens Axboe
@ 2022-04-11 23:09 ` Jens Axboe
3 siblings, 0 replies; 5+ messages in thread
From: Jens Axboe @ 2022-04-11 23:09 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
There are two reasons why this isn't the best idea:
- It's an odd area to grab a bit of storage space, hence it's an odd area
to grab storage from.
- It puts the 3rd io_kiocb cacheline into the hot path, where normal hot
path just needs the first two.
Use 'cflags' for joint fd/cflags storage. We only need fd until we
successfully issue, and we only need cflags once a request is done and is
completed.
Fixes: 6bf9c47a3989 ("io_uring: defer file assignment")
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io-wq.h | 1 -
fs/io_uring.c | 12 ++++++++----
2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 04d374e65e54..dbecd27656c7 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -155,7 +155,6 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
struct io_wq_work {
struct io_wq_work_node list;
unsigned flags;
- int fd;
};
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3a97535d0550..38e62b1c6297 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -907,7 +907,11 @@ struct io_kiocb {
u64 user_data;
u32 result;
- u32 cflags;
+ /* fd initially, then cflags for completion */
+ union {
+ u32 cflags;
+ int fd;
+ };
struct io_ring_ctx *ctx;
struct task_struct *task;
@@ -7090,9 +7094,9 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
return true;
if (req->flags & REQ_F_FIXED_FILE)
- req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
+ req->file = io_file_get_fixed(req, req->fd, issue_flags);
else
- req->file = io_file_get_normal(req, req->work.fd);
+ req->file = io_file_get_normal(req, req->fd);
if (req->file)
return true;
@@ -7630,7 +7634,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[opcode].needs_file) {
struct io_submit_state *state = &ctx->submit_state;
- req->work.fd = READ_ONCE(sqe->fd);
+ req->fd = READ_ONCE(sqe->fd);
/*
* Plug now if we have more than 2 IO left after this, and the
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread