* [PATCH 1/3] io_uring: add IORING_OP_FADVISE
2020-01-10 15:47 [PATCHSET 0/3] io_uring: add support for madvise/fadvise Jens Axboe
@ 2020-01-10 15:47 ` Jens Axboe
2020-01-10 15:47 ` [PATCH 2/3] mm: make do_madvise() available internally Jens Axboe
2020-01-10 15:47 ` [PATCH 3/3] io_uring: add IORING_OP_MADVISE Jens Axboe
2 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-01-10 15:47 UTC (permalink / raw)
To: io-uring; +Cc: linux-fsdevel, linux-mm, Jens Axboe
This adds support for doing fadvise through io_uring. We assume that
WILLNEED doesn't block, but that DONTNEED may block.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 53 +++++++++++++++++++++++++++++++++++
include/uapi/linux/io_uring.h | 2 ++
2 files changed, 55 insertions(+)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 62459a79a61f..0b200a7d4ae0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -72,6 +72,7 @@
#include <linux/highmem.h>
#include <linux/namei.h>
#include <linux/fsnotify.h>
+#include <linux/fadvise.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -400,6 +401,13 @@ struct io_files_update {
u32 offset;
};
+struct io_fadvise {
+ struct file *file;
+ u64 offset;
+ u32 len;
+ u32 advice;
+};
+
struct io_async_connect {
struct sockaddr_storage address;
};
@@ -452,6 +460,7 @@ struct io_kiocb {
struct io_open open;
struct io_close close;
struct io_files_update files_update;
+ struct io_fadvise fadvise;
};
struct io_async_ctx *io;
@@ -669,6 +678,10 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
},
+ {
+ /* IORING_OP_FADVISE */
+ .needs_file = 1,
+ },
};
static void io_wq_submit_work(struct io_wq_work **workptr);
@@ -2435,6 +2448,35 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
+static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ if (sqe->ioprio || sqe->buf_index || sqe->addr)
+ return -EINVAL;
+
+ req->fadvise.offset = READ_ONCE(sqe->off);
+ req->fadvise.len = READ_ONCE(sqe->len);
+ req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
+ return 0;
+}
+
+static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+ struct io_fadvise *fa = &req->fadvise;
+ int ret;
+
+ /* DONTNEED may block, others _should_ not */
+ if (fa->advice == POSIX_FADV_DONTNEED && force_nonblock)
+ return -EAGAIN;
+
+ ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+}
+
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
unsigned lookup_flags;
@@ -3724,6 +3766,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_STATX:
ret = io_statx_prep(req, sqe);
break;
+ case IORING_OP_FADVISE:
+ ret = io_fadvise_prep(req, sqe);
+ break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode);
@@ -3920,6 +3965,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
ret = io_statx(req, nxt, force_nonblock);
break;
+ case IORING_OP_FADVISE:
+ if (sqe) {
+ ret = io_fadvise_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_fadvise(req, nxt, force_nonblock);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 80f892628e66..f87d8fb42916 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -36,6 +36,7 @@ struct io_uring_sqe {
__u32 cancel_flags;
__u32 open_flags;
__u32 statx_flags;
+ __u32 fadvise_advice;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -86,6 +87,7 @@ enum {
IORING_OP_STATX,
IORING_OP_READ,
IORING_OP_WRITE,
+ IORING_OP_FADVISE,
/* this goes last, obviously */
IORING_OP_LAST,
--
2.24.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/3] mm: make do_madvise() available internally
2020-01-10 15:47 [PATCHSET 0/3] io_uring: add support for madvise/fadvise Jens Axboe
2020-01-10 15:47 ` [PATCH 1/3] io_uring: add IORING_OP_FADVISE Jens Axboe
@ 2020-01-10 15:47 ` Jens Axboe
2020-01-10 15:47 ` [PATCH 3/3] io_uring: add IORING_OP_MADVISE Jens Axboe
2 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-01-10 15:47 UTC (permalink / raw)
To: io-uring; +Cc: linux-fsdevel, linux-mm, Jens Axboe
This is in preparation for enabling this functionality through io_uring.
Add a helper that is just exporting what sys_madvise() does, and have the
system call use it.
No functional changes in this patch.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/mm.h | 1 +
mm/madvise.c | 7 ++++++-
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80a9162b406c..766cad8aaa60 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2328,6 +2328,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
+extern int do_madvise(unsigned long start, size_t len_in, int behavior);
static inline unsigned long
do_mmap_pgoff(struct file *file, unsigned long addr,
diff --git a/mm/madvise.c b/mm/madvise.c
index bcdb6a042787..43b47d3fae02 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1044,7 +1044,7 @@ madvise_behavior_valid(int behavior)
* -EBADF - map exists, but area maps something that isn't a file.
* -EAGAIN - a kernel resource was temporarily unavailable.
*/
-SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+int do_madvise(unsigned long start, size_t len_in, int behavior)
{
unsigned long end, tmp;
struct vm_area_struct *vma, *prev;
@@ -1141,3 +1141,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
return error;
}
+
+SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
+{
+ return do_madvise(start, len_in, behavior);
+}
--
2.24.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 3/3] io_uring: add IORING_OP_MADVISE
2020-01-10 15:47 [PATCHSET 0/3] io_uring: add support for madvise/fadvise Jens Axboe
2020-01-10 15:47 ` [PATCH 1/3] io_uring: add IORING_OP_FADVISE Jens Axboe
2020-01-10 15:47 ` [PATCH 2/3] mm: make do_madvise() available internally Jens Axboe
@ 2020-01-10 15:47 ` Jens Axboe
2020-01-11 23:10 ` Kirill A. Shutemov
2020-01-12 21:39 ` Pavel Begunkov
2 siblings, 2 replies; 8+ messages in thread
From: Jens Axboe @ 2020-01-10 15:47 UTC (permalink / raw)
To: io-uring; +Cc: linux-fsdevel, linux-mm, Jens Axboe
This adds support for doing madvise(2) through io_uring. We assume that
any operation can block, and hence punt everything async. This could be
improved, but hard to make bullet proof. The async punt ensures it's
safe.
Signed-off-by: Jens Axboe <[email protected]>
---
fs/io_uring.c | 56 ++++++++++++++++++++++++++++++++++-
include/uapi/linux/io_uring.h | 1 +
2 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0b200a7d4ae0..378f97cc2bf2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -403,7 +403,10 @@ struct io_files_update {
struct io_fadvise {
struct file *file;
- u64 offset;
+ union {
+ u64 offset;
+ u64 addr;
+ };
u32 len;
u32 advice;
};
@@ -682,6 +685,10 @@ static const struct io_op_def io_op_defs[] = {
/* IORING_OP_FADVISE */
.needs_file = 1,
},
+ {
+ /* IORING_OP_MADVISE */
+ .needs_mm = 1,
+ },
};
static void io_wq_submit_work(struct io_wq_work **workptr);
@@ -2448,6 +2455,42 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
+static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ if (sqe->ioprio || sqe->buf_index || sqe->off)
+ return -EINVAL;
+
+ req->fadvise.addr = READ_ONCE(sqe->addr);
+ req->fadvise.len = READ_ONCE(sqe->len);
+ req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
+{
+#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
+ struct io_fadvise *fa = &req->fadvise;
+ int ret;
+
+ if (force_nonblock)
+ return -EAGAIN;
+
+ ret = do_madvise(fa->addr, fa->len, fa->advice);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (sqe->ioprio || sqe->buf_index || sqe->addr)
@@ -3769,6 +3812,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_FADVISE:
ret = io_fadvise_prep(req, sqe);
break;
+ case IORING_OP_MADVISE:
+ ret = io_madvise_prep(req, sqe);
+ break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode);
@@ -3973,6 +4019,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
ret = io_fadvise(req, nxt, force_nonblock);
break;
+ case IORING_OP_MADVISE:
+ if (sqe) {
+ ret = io_madvise_prep(req, sqe);
+ if (ret)
+ break;
+ }
+ ret = io_madvise(req, nxt, force_nonblock);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index f87d8fb42916..7cb6fe0fccd7 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -88,6 +88,7 @@ enum {
IORING_OP_READ,
IORING_OP_WRITE,
IORING_OP_FADVISE,
+ IORING_OP_MADVISE,
/* this goes last, obviously */
IORING_OP_LAST,
--
2.24.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 3/3] io_uring: add IORING_OP_MADVISE
2020-01-10 15:47 ` [PATCH 3/3] io_uring: add IORING_OP_MADVISE Jens Axboe
@ 2020-01-11 23:10 ` Kirill A. Shutemov
2020-01-12 2:16 ` Jens Axboe
2020-01-12 21:39 ` Pavel Begunkov
1 sibling, 1 reply; 8+ messages in thread
From: Kirill A. Shutemov @ 2020-01-11 23:10 UTC (permalink / raw)
To: Jens Axboe; +Cc: io-uring, linux-fsdevel, linux-mm
On Fri, Jan 10, 2020 at 08:47:39AM -0700, Jens Axboe wrote:
> This adds support for doing madvise(2) through io_uring. We assume that
> any operation can block, and hence punt everything async. This could be
> improved, but hard to make bullet proof. The async punt ensures it's
> safe.
>
> Signed-off-by: Jens Axboe <[email protected]>
How capability checks work with io_uring?
MADV_HWPOISON requires CAP_SYS_ADMIN and I just want to make sure it will
not open a way around.
--
Kirill A. Shutemov
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 3/3] io_uring: add IORING_OP_MADVISE
2020-01-11 23:10 ` Kirill A. Shutemov
@ 2020-01-12 2:16 ` Jens Axboe
0 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-01-12 2:16 UTC (permalink / raw)
To: Kirill A. Shutemov; +Cc: io-uring, linux-fsdevel, linux-mm
On 1/11/20 4:10 PM, Kirill A. Shutemov wrote:
> On Fri, Jan 10, 2020 at 08:47:39AM -0700, Jens Axboe wrote:
>> This adds support for doing madvise(2) through io_uring. We assume that
>> any operation can block, and hence punt everything async. This could be
>> improved, but hard to make bullet proof. The async punt ensures it's
>> safe.
>>
>> Signed-off-by: Jens Axboe <[email protected]>
>
> How capability checks work with io_uring?
>
> MADV_HWPOISON requires CAP_SYS_ADMIN and I just want to make sure it will
> not open a way around.
There are two ways the request can get invoked from io_uring:
1) Inline from the system call, personality is the application (of course)
in that case.
2) Async helper, personality (creds, mm, etc) are inherited from the ring.
So it should be totally safe, and madvise is no different than the other
system calls supported in that regard.
--
Jens Axboe
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 3/3] io_uring: add IORING_OP_MADVISE
2020-01-10 15:47 ` [PATCH 3/3] io_uring: add IORING_OP_MADVISE Jens Axboe
2020-01-11 23:10 ` Kirill A. Shutemov
@ 2020-01-12 21:39 ` Pavel Begunkov
2020-01-13 16:58 ` Jens Axboe
1 sibling, 1 reply; 8+ messages in thread
From: Pavel Begunkov @ 2020-01-12 21:39 UTC (permalink / raw)
To: Jens Axboe, io-uring; +Cc: linux-fsdevel, linux-mm
[-- Attachment #1.1: Type: text/plain, Size: 3840 bytes --]
On 10/01/2020 18:47, Jens Axboe wrote:
> This adds support for doing madvise(2) through io_uring. We assume that
> any operation can block, and hence punt everything async. This could be
> improved, but hard to make bullet proof. The async punt ensures it's
> safe.
>
I don't like that it share structs/fields names with fadvise. E.g. madvise's
context is called struct io_fadvise. Could it at least have fadvise_advice filed
in struct io_uring_sqe? io_uring parts of the patchset look good.
Reviewed-by: Pavel Begunkov <[email protected]>
> Signed-off-by: Jens Axboe <[email protected]>
> ---
> fs/io_uring.c | 56 ++++++++++++++++++++++++++++++++++-
> include/uapi/linux/io_uring.h | 1 +
> 2 files changed, 56 insertions(+), 1 deletion(-)
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 0b200a7d4ae0..378f97cc2bf2 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -403,7 +403,10 @@ struct io_files_update {
>
> struct io_fadvise {
> struct file *file;
> - u64 offset;
> + union {
> + u64 offset;
> + u64 addr;
> + };
> u32 len;
> u32 advice;
> };
> @@ -682,6 +685,10 @@ static const struct io_op_def io_op_defs[] = {
> /* IORING_OP_FADVISE */
> .needs_file = 1,
> },
> + {
> + /* IORING_OP_MADVISE */
> + .needs_mm = 1,
> + },
> };
>
> static void io_wq_submit_work(struct io_wq_work **workptr);
> @@ -2448,6 +2455,42 @@ static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
> return 0;
> }
>
> +static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
> +{
> +#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
> + if (sqe->ioprio || sqe->buf_index || sqe->off)
> + return -EINVAL;
> +
> + req->fadvise.addr = READ_ONCE(sqe->addr);
> + req->fadvise.len = READ_ONCE(sqe->len);
> + req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
> + return 0;
> +#else
> + return -EOPNOTSUPP;
> +#endif
> +}
> +
> +static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
> + bool force_nonblock)
> +{
> +#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
> + struct io_fadvise *fa = &req->fadvise;
> + int ret;
> +
> + if (force_nonblock)
> + return -EAGAIN;
> +
> + ret = do_madvise(fa->addr, fa->len, fa->advice);
> + if (ret < 0)
> + req_set_fail_links(req);
> + io_cqring_add_event(req, ret);
> + io_put_req_find_next(req, nxt);
> + return 0;
> +#else
> + return -EOPNOTSUPP;
> +#endif
> +}
> +
> static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
> {
> if (sqe->ioprio || sqe->buf_index || sqe->addr)
> @@ -3769,6 +3812,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
> case IORING_OP_FADVISE:
> ret = io_fadvise_prep(req, sqe);
> break;
> + case IORING_OP_MADVISE:
> + ret = io_madvise_prep(req, sqe);
> + break;
> default:
> printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
> req->opcode);
> @@ -3973,6 +4019,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
> }
> ret = io_fadvise(req, nxt, force_nonblock);
> break;
> + case IORING_OP_MADVISE:
> + if (sqe) {
> + ret = io_madvise_prep(req, sqe);
> + if (ret)
> + break;
> + }
> + ret = io_madvise(req, nxt, force_nonblock);
> + break;
> default:
> ret = -EINVAL;
> break;
> diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
> index f87d8fb42916..7cb6fe0fccd7 100644
> --- a/include/uapi/linux/io_uring.h
> +++ b/include/uapi/linux/io_uring.h
> @@ -88,6 +88,7 @@ enum {
> IORING_OP_READ,
> IORING_OP_WRITE,
> IORING_OP_FADVISE,
> + IORING_OP_MADVISE,
>
> /* this goes last, obviously */
> IORING_OP_LAST,
>
--
Pavel Begunkov
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 3/3] io_uring: add IORING_OP_MADVISE
2020-01-12 21:39 ` Pavel Begunkov
@ 2020-01-13 16:58 ` Jens Axboe
0 siblings, 0 replies; 8+ messages in thread
From: Jens Axboe @ 2020-01-13 16:58 UTC (permalink / raw)
To: Pavel Begunkov, io-uring; +Cc: linux-fsdevel, linux-mm
On 1/12/20 2:39 PM, Pavel Begunkov wrote:
> On 10/01/2020 18:47, Jens Axboe wrote:
>> This adds support for doing madvise(2) through io_uring. We assume that
>> any operation can block, and hence punt everything async. This could be
>> improved, but hard to make bullet proof. The async punt ensures it's
>> safe.
>>
> I don't like that it share structs/fields names with fadvise. E.g. madvise's
> context is called struct io_fadvise. Could it at least have fadvise_advice filed
> in struct io_uring_sqe? io_uring parts of the patchset look good.
>
> Reviewed-by: Pavel Begunkov <[email protected]>
Thanks, I can add the separate union, not a big deal.
--
Jens Axboe
^ permalink raw reply [flat|nested] 8+ messages in thread