* [PATCH 1/5] io_uring: move io_recvmsg_copy_hdr()
2020-11-15 10:35 [RFC 0/5] support for {send,recv}[msg] with registered bufs Pavel Begunkov
@ 2020-11-15 10:35 ` Pavel Begunkov
2020-11-15 10:35 ` [PATCH 2/5] io_uring: copy hdr consistently for send and recv Pavel Begunkov
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-11-15 10:35 UTC (permalink / raw)
To: Jens Axboe, io-uring, v
Move io_recvmsg_copy_hdr() for it to be reused in later patches.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 238 +++++++++++++++++++++++++-------------------------
1 file changed, 119 insertions(+), 119 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index aafdcf94be9d..bcd6f63af711 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4494,125 +4494,6 @@ static int io_setup_async_msg(struct io_kiocb *req,
return -EAGAIN;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- iomsg->iov = iomsg->fast_iov;
- iomsg->msg.msg_name = &iomsg->addr;
- return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
- req->sr_msg.msg_flags, &iomsg->iov);
-}
-
-static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_async_msghdr *async_msg = req->async_data;
- struct io_sr_msg *sr = &req->sr_msg;
- int ret;
-
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
- sr->msg_flags = READ_ONCE(sqe->msg_flags);
- sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
- sr->len = READ_ONCE(sqe->len);
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
-
- if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
- return 0;
- ret = io_sendmsg_copy_hdr(req, async_msg);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
-}
-
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- struct io_async_msghdr iomsg, *kmsg;
- struct socket *sock;
- unsigned flags;
- int ret;
-
- sock = sock_from_file(req->file, &ret);
- if (unlikely(!sock))
- return ret;
-
- kmsg = req->async_data;
- if (!kmsg) {
- ret = io_sendmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
- ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if (force_nonblock && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- /* it's reportedly faster to check for null here */
- if (kmsg->iov)
- kfree(kmsg->iov);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < 0)
- req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
- return 0;
-}
-
-static int io_send(struct io_kiocb *req, bool force_nonblock,
- struct io_comp_state *cs)
-{
- struct io_sr_msg *sr = &req->sr_msg;
- struct msghdr msg;
- struct iovec iov;
- struct socket *sock;
- unsigned flags;
- int ret;
-
- sock = sock_from_file(req->file, &ret);
- if (unlikely(!sock))
- return ret;
-
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- return ret;
-
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
-
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
- msg.msg_flags = flags;
- ret = sock_sendmsg(sock, &msg);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- if (ret < 0)
- req_set_fail_links(req);
- __io_req_complete(req, ret, 0, cs);
- return 0;
-}
-
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
@@ -4733,6 +4614,125 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
return 0;
}
+static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
+{
+ iomsg->iov = iomsg->fast_iov;
+ iomsg->msg.msg_name = &iomsg->addr;
+ return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
+ req->sr_msg.msg_flags, &iomsg->iov);
+}
+
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_async_msghdr *async_msg = req->async_data;
+ struct io_sr_msg *sr = &req->sr_msg;
+ int ret;
+
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ sr->msg_flags = READ_ONCE(sqe->msg_flags);
+ sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ sr->len = READ_ONCE(sqe->len);
+
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+
+ if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
+ return 0;
+ ret = io_sendmsg_copy_hdr(req, async_msg);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
+}
+
+static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
+{
+ struct io_async_msghdr iomsg, *kmsg;
+ struct socket *sock;
+ unsigned flags;
+ int ret;
+
+ sock = sock_from_file(req->file, &ret);
+ if (unlikely(!sock))
+ return ret;
+
+ kmsg = req->async_data;
+ if (!kmsg) {
+ ret = io_sendmsg_copy_hdr(req, &iomsg);
+ if (ret)
+ return ret;
+ kmsg = &iomsg;
+ }
+
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg);
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+
+ /* it's reportedly faster to check for null here */
+ if (kmsg->iov)
+ kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (ret < 0)
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, 0, cs);
+ return 0;
+}
+
+static int io_send(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
+{
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct msghdr msg;
+ struct iovec iov;
+ struct socket *sock;
+ unsigned flags;
+ int ret;
+
+ sock = sock_from_file(req->file, &ret);
+ if (unlikely(!sock))
+ return ret;
+
+ ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ msg.msg_flags = flags;
+ ret = sock_sendmsg(sock, &msg);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+
+ if (ret < 0)
+ req_set_fail_links(req);
+ __io_req_complete(req, ret, 0, cs);
+ return 0;
+}
+
static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
{
return io_put_kbuf(req, req->sr_msg.kbuf);
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/5] io_uring: copy hdr consistently for send and recv
2020-11-15 10:35 [RFC 0/5] support for {send,recv}[msg] with registered bufs Pavel Begunkov
2020-11-15 10:35 ` [PATCH 1/5] io_uring: move io_recvmsg_copy_hdr() Pavel Begunkov
@ 2020-11-15 10:35 ` Pavel Begunkov
2020-11-15 10:35 ` [PATCH 3/5] io_uring: opcode independent import_fixed Pavel Begunkov
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-11-15 10:35 UTC (permalink / raw)
To: Jens Axboe, io-uring, v
recvmsg() goes through a custom written msg headers/iovec copying
helper, do that for sendmsg() as well. Apart from being more consistent
in general, it allows to extend it (e.g. for registered buffers) without
duplication.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 51 ++++++++++++++++++++++++---------------------------
1 file changed, 24 insertions(+), 27 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bcd6f63af711..88daf5fc7e8e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4494,16 +4494,18 @@ static int io_setup_async_msg(struct io_kiocb *req,
return -EAGAIN;
}
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int __io_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg, int rw)
{
+ struct sockaddr __user **save_addr;
struct io_sr_msg *sr = &req->sr_msg;
struct iovec __user *uiov;
size_t iov_len;
int ret;
- ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
- &iomsg->uaddr, &uiov, &iov_len);
+ save_addr = (rw == READ) ? &iomsg->uaddr : NULL;
+ ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg, save_addr,
+ &uiov, &iov_len);
if (ret)
return ret;
@@ -4517,7 +4519,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
sr->len);
iomsg->iov = NULL;
} else {
- ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
+ ret = __import_iovec(rw, uiov, iov_len, UIO_FASTIOV,
&iomsg->iov, &iomsg->msg.msg_iter,
false);
if (ret > 0)
@@ -4528,9 +4530,10 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
}
#ifdef CONFIG_COMPAT
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg, int rw)
{
+ struct sockaddr __user **save_addr;
struct compat_msghdr __user *msg_compat;
struct io_sr_msg *sr = &req->sr_msg;
struct compat_iovec __user *uiov;
@@ -4539,8 +4542,9 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
int ret;
msg_compat = (struct compat_msghdr __user *) sr->umsg;
- ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
- &ptr, &len);
+ save_addr = (rw == READ) ? &iomsg->uaddr : NULL;
+ ret = __get_compat_msghdr(&iomsg->msg, msg_compat, save_addr,
+ &ptr, &len);
if (ret)
return ret;
@@ -4559,7 +4563,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
sr->len = iomsg->iov[0].iov_len;
iomsg->iov = NULL;
} else {
- ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
+ ret = __import_iovec(rw, (struct iovec __user *)uiov, len,
UIO_FASTIOV, &iomsg->iov,
&iomsg->msg.msg_iter, true);
if (ret < 0)
@@ -4585,8 +4589,8 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
return kbuf;
}
-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_import_msg(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ int rw)
{
struct io_buffer *kbuf;
int ret;
@@ -4595,14 +4599,16 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
iomsg->iov = iomsg->fast_iov;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
- ret = __io_compat_recvmsg_copy_hdr(req, iomsg);
+ ret = __io_compat_msg_copy_hdr(req, iomsg, rw);
else
#endif
- ret = __io_recvmsg_copy_hdr(req, iomsg);
+ ret = __io_msg_copy_hdr(req, iomsg, rw);
if (ret < 0)
return ret;
if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (rw != READ)
+ return -EINVAL;
/* init is always done with uring_lock held */
kbuf = io_recv_buffer_select(req, false);
if (IS_ERR(kbuf))
@@ -4614,15 +4620,6 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
return 0;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- iomsg->iov = iomsg->fast_iov;
- iomsg->msg.msg_name = &iomsg->addr;
- return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
- req->sr_msg.msg_flags, &iomsg->iov);
-}
-
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_async_msghdr *async_msg = req->async_data;
@@ -4643,7 +4640,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- ret = io_sendmsg_copy_hdr(req, async_msg);
+ ret = io_import_msg(req, async_msg, WRITE);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
@@ -4663,7 +4660,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
kmsg = req->async_data;
if (!kmsg) {
- ret = io_sendmsg_copy_hdr(req, &iomsg);
+ ret = io_import_msg(req, &iomsg, WRITE);
if (ret)
return ret;
kmsg = &iomsg;
@@ -4760,7 +4757,7 @@ static int io_recvmsg_prep(struct io_kiocb *req,
if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
return 0;
- ret = io_recvmsg_copy_hdr(req, async_msg);
+ ret = io_import_msg(req, async_msg, READ);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
@@ -4780,7 +4777,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
kmsg = req->async_data;
if (!kmsg) {
- ret = io_recvmsg_copy_hdr(req, &iomsg);
+ ret = io_import_msg(req, &iomsg, READ);
if (ret)
return ret;
kmsg = &iomsg;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 3/5] io_uring: opcode independent import_fixed
2020-11-15 10:35 [RFC 0/5] support for {send,recv}[msg] with registered bufs Pavel Begunkov
2020-11-15 10:35 ` [PATCH 1/5] io_uring: move io_recvmsg_copy_hdr() Pavel Begunkov
2020-11-15 10:35 ` [PATCH 2/5] io_uring: copy hdr consistently for send and recv Pavel Begunkov
@ 2020-11-15 10:35 ` Pavel Begunkov
2020-11-15 10:35 ` [PATCH 4/5] io_uring: send/recv with registered buffer Pavel Begunkov
2020-11-15 10:35 ` [PATCH 5/5] io_uring: sendmsg/recvmsg with registered buffers Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-11-15 10:35 UTC (permalink / raw)
To: Jens Axboe, io-uring, v
Pass a buf explicitly into io_import_fixed(), so it can be used not only
for rw requests.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 88daf5fc7e8e..7703291617f3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2933,21 +2933,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
io_rw_done(kiocb, ret);
}
-static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
- struct iov_iter *iter)
+static ssize_t io_import_fixed(struct io_kiocb *req, int rw, u64 buf_addr,
+ size_t len, struct iov_iter *iter)
{
struct io_ring_ctx *ctx = req->ctx;
- size_t len = req->rw.len;
struct io_mapped_ubuf *imu;
u16 index, buf_index = req->buf_index;
size_t offset;
- u64 buf_addr;
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
imu = &ctx->user_bufs[index];
- buf_addr = req->rw.addr;
/* overflow */
if (buf_addr + len < buf_addr)
@@ -3153,7 +3150,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
- return io_import_fixed(req, rw, iter);
+ return io_import_fixed(req, rw, req->rw.addr, sqe_len, iter);
}
/* buffer index only valid with fixed read/write, or buffer select */
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/5] io_uring: send/recv with registered buffer
2020-11-15 10:35 [RFC 0/5] support for {send,recv}[msg] with registered bufs Pavel Begunkov
` (2 preceding siblings ...)
2020-11-15 10:35 ` [PATCH 3/5] io_uring: opcode independent import_fixed Pavel Begunkov
@ 2020-11-15 10:35 ` Pavel Begunkov
2020-11-15 10:35 ` [PATCH 5/5] io_uring: sendmsg/recvmsg with registered buffers Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-11-15 10:35 UTC (permalink / raw)
To: Jens Axboe, io-uring, v
Add support of registered buffers to send() and recv(). Done by
exploiting last bit of send/recv flags, IO_MSG_FIXED, which is cleared
before going into net stack.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 39 +++++++++++++++++++++++++++------------
1 file changed, 27 insertions(+), 12 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7703291617f3..390495170fb0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -104,6 +104,8 @@
#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
IORING_REGISTER_LAST + IORING_OP_LAST)
+#define IO_MSG_FIXED (1U << 31)
+
struct io_uring {
u32 head ____cacheline_aligned_in_smp;
u32 tail ____cacheline_aligned_in_smp;
@@ -4689,18 +4691,25 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
struct io_comp_state *cs)
{
struct io_sr_msg *sr = &req->sr_msg;
+ unsigned int flags = sr->msg_flags;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
- unsigned flags;
int ret;
sock = sock_from_file(req->file, &ret);
if (unlikely(!sock))
return ret;
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
+ if (flags & IO_MSG_FIXED) {
+ ret = io_import_fixed(req, WRITE, (u64)sr->buf, sr->len,
+ &msg.msg_iter);
+ flags &= ~IO_MSG_FIXED;
+ } else {
+ ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
+ &msg.msg_iter);
+ }
+ if (unlikely(ret < 0))
return ret;
msg.msg_name = NULL;
@@ -4708,7 +4717,6 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- flags = req->sr_msg.msg_flags;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
@@ -4821,15 +4829,22 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
if (unlikely(!sock))
return ret;
- if (req->flags & REQ_F_BUFFER_SELECT) {
- kbuf = io_recv_buffer_select(req, !force_nonblock);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
- buf = u64_to_user_ptr(kbuf->addr);
- }
+ if (flags & IO_MSG_FIXED) {
+ ret = io_import_fixed(req, READ, (u64)buf, sr->len,
+ &msg.msg_iter);
+ flags &= ~IO_MSG_FIXED;
+ } else {
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ kbuf = io_recv_buffer_select(req, !force_nonblock);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+ buf = u64_to_user_ptr(kbuf->addr);
+ }
- ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
+ ret = import_single_range(READ, buf, sr->len, &iov,
+ &msg.msg_iter);
+ }
+ if (unlikely(ret < 0))
goto out_free;
msg.msg_name = NULL;
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 5/5] io_uring: sendmsg/recvmsg with registered buffers
2020-11-15 10:35 [RFC 0/5] support for {send,recv}[msg] with registered bufs Pavel Begunkov
` (3 preceding siblings ...)
2020-11-15 10:35 ` [PATCH 4/5] io_uring: send/recv with registered buffer Pavel Begunkov
@ 2020-11-15 10:35 ` Pavel Begunkov
4 siblings, 0 replies; 6+ messages in thread
From: Pavel Begunkov @ 2020-11-15 10:35 UTC (permalink / raw)
To: Jens Axboe, io-uring, v
Add support of registered buffers to sendmsg() and recvmsg(). As with
previous one, it uses IO_MSG_FIXED, last bit of flags, which is cleared
before going into net stack.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 390495170fb0..7b13dafc84ab 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4488,7 +4488,7 @@ static int io_setup_async_msg(struct io_kiocb *req,
memcpy(async_msg, kmsg, sizeof(*kmsg));
async_msg->msg.msg_name = &async_msg->addr;
/* if iov is not set, it uses fast_iov */
- if (!async_msg->iov)
+ if (!async_msg->iov && !(req->sr_msg.msg_flags & IO_MSG_FIXED))
async_msg->msg.msg_iter.iov = async_msg->fast_iov;
return -EAGAIN;
}
@@ -4508,7 +4508,8 @@ static int __io_msg_copy_hdr(struct io_kiocb *req,
if (ret)
return ret;
- if (req->flags & REQ_F_BUFFER_SELECT) {
+ if ((req->flags & REQ_F_BUFFER_SELECT) ||
+ (sr->msg_flags & IO_MSG_FIXED)) {
if (iov_len > 1)
return -EINVAL;
if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
@@ -4548,7 +4549,8 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
return ret;
uiov = compat_ptr(ptr);
- if (req->flags & REQ_F_BUFFER_SELECT) {
+ if ((req->flags & REQ_F_BUFFER_SELECT) ||
+ (sr->msg_flags & IO_MSG_FIXED)) {
compat_ssize_t clen;
if (len > 1)
@@ -4591,6 +4593,7 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
static int io_import_msg(struct io_kiocb *req, struct io_async_msghdr *iomsg,
int rw)
{
+ struct io_sr_msg *sr = &req->sr_msg;
struct io_buffer *kbuf;
int ret;
@@ -4606,7 +4609,7 @@ static int io_import_msg(struct io_kiocb *req, struct io_async_msghdr *iomsg,
return ret;
if (req->flags & REQ_F_BUFFER_SELECT) {
- if (rw != READ)
+ if (rw != READ || (sr->msg_flags & IO_MSG_FIXED))
return -EINVAL;
/* init is always done with uring_lock held */
kbuf = io_recv_buffer_select(req, false);
@@ -4614,7 +4617,14 @@ static int io_import_msg(struct io_kiocb *req, struct io_async_msghdr *iomsg,
return PTR_ERR(kbuf);
iomsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->fast_iov, 1,
- req->sr_msg.len);
+ sr->len);
+ } else if (sr->msg_flags & IO_MSG_FIXED) {
+ struct iovec *iov = &iomsg->fast_iov[0];
+
+ ret = io_import_fixed(req, rw, (u64)iov->iov_base, iov->iov_len,
+ &iomsg->msg.msg_iter);
+ if (ret < 0)
+ return ret;
}
return 0;
}
@@ -4631,6 +4641,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
+ req->buf_index = READ_ONCE(sqe->buf_index);
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
@@ -4665,7 +4676,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
kmsg = &iomsg;
}
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags & ~IO_MSG_FIXED;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
@@ -4754,6 +4765,7 @@ static int io_recvmsg_prep(struct io_kiocb *req,
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->bgid = READ_ONCE(sqe->buf_group);
+ req->buf_index = READ_ONCE(sqe->buf_index);
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
@@ -4788,7 +4800,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
kmsg = &iomsg;
}
- flags = req->sr_msg.msg_flags;
+ flags = req->sr_msg.msg_flags & ~IO_MSG_FIXED;
if (flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
else if (force_nonblock)
--
2.24.0
^ permalink raw reply related [flat|nested] 6+ messages in thread