* [PATCHSET 0/3] Cleanup and improve sendmsg/recvmsg header handling
@ 2024-02-27 18:46 Jens Axboe
2024-02-27 18:46 ` [PATCH 1/3] io_uring/net: unify how recvmsg and sendmsg copy in the msghdr Jens Axboe
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Jens Axboe @ 2024-02-27 18:46 UTC (permalink / raw)
To: io-uring
Hi,
Been bugging me that I didn't like the v2 of the "improve the usercopy
for sendmsg/recvmsg" patch, so I re-did patch 1 here so that I could
do a prep patch (patch 2), and finally re-do the usercopy improvement
on top of that. Passes my testing, win is actually slightly larger
with this change (closer to 10%, so at least it didn't regress.
--
Jens Axboe
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/3] io_uring/net: unify how recvmsg and sendmsg copy in the msghdr
2024-02-27 18:46 [PATCHSET 0/3] Cleanup and improve sendmsg/recvmsg header handling Jens Axboe
@ 2024-02-27 18:46 ` Jens Axboe
2024-02-27 18:46 ` [PATCH 2/3] io_uring/net: move receive multishot out of the generic msghdr path Jens Axboe
2024-02-27 18:46 ` [PATCH 3/3] io_uring/net: improve the usercopy for sendmsg/recvmsg Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2024-02-27 18:46 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
For recvmsg, we roll our own since we support buffer selections. This
isn't the case for sendmsg right now, but in preparation for doing so,
make the recvmsg copy helpers generic so we can call them from the
sendmsg side as well.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/net.c | 271 ++++++++++++++++++++++++++-----------------------
1 file changed, 142 insertions(+), 129 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 43bc9a5f96f9..8c64e552a00f 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -204,16 +204,150 @@ static int io_setup_async_msg(struct io_kiocb *req,
return -EAGAIN;
}
+static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
+{
+ int hdr;
+
+ if (iomsg->namelen < 0)
+ return true;
+ if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
+ iomsg->namelen, &hdr))
+ return true;
+ if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_COMPAT
+static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ struct sockaddr __user **addr, int ddir)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct compat_msghdr msg;
+ struct compat_iovec __user *uiov;
+ int ret;
+
+ if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
+ return -EFAULT;
+
+ ret = __get_compat_msghdr(&iomsg->msg, &msg, addr);
+ if (ret)
+ return ret;
+
+ uiov = compat_ptr(msg.msg_iov);
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ compat_ssize_t clen;
+
+ iomsg->free_iov = NULL;
+ if (msg.msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg.msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ if (!access_ok(uiov, sizeof(*uiov)))
+ return -EFAULT;
+ if (__get_user(clen, &uiov->iov_len))
+ return -EFAULT;
+ if (clen < 0)
+ return -EINVAL;
+ sr->len = clen;
+ }
+
+ if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) {
+ iomsg->namelen = msg.msg_namelen;
+ iomsg->controllen = msg.msg_controllen;
+ if (io_recvmsg_multishot_overflow(iomsg))
+ return -EOVERFLOW;
+ }
+
+ return 0;
+ }
+
+ iomsg->free_iov = iomsg->fast_iov;
+ ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg.msg_iovlen,
+ UIO_FASTIOV, &iomsg->free_iov,
+ &iomsg->msg.msg_iter, true);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+#endif
+
+static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct sockaddr __user **addr, int ddir)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct user_msghdr msg;
+ int ret;
+
+ if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
+ return -EFAULT;
+
+ ret = __copy_msghdr(&iomsg->msg, &msg, addr);
+ if (ret)
+ return ret;
+
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (msg.msg_iovlen == 0) {
+ sr->len = iomsg->fast_iov[0].iov_len = 0;
+ iomsg->fast_iov[0].iov_base = NULL;
+ iomsg->free_iov = NULL;
+ } else if (msg.msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ if (copy_from_user(iomsg->fast_iov, msg.msg_iov,
+ sizeof(*msg.msg_iov)))
+ return -EFAULT;
+ sr->len = iomsg->fast_iov[0].iov_len;
+ iomsg->free_iov = NULL;
+ }
+
+ if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) {
+ iomsg->namelen = msg.msg_namelen;
+ iomsg->controllen = msg.msg_controllen;
+ if (io_recvmsg_multishot_overflow(iomsg))
+ return -EOVERFLOW;
+ }
+
+ return 0;
+ }
+
+ iomsg->free_iov = iomsg->fast_iov;
+ ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+ &iomsg->free_iov, &iomsg->msg.msg_iter, false);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct sockaddr __user **addr, int ddir)
+{
+ iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->msg.msg_iter.nr_segs = 0;
+
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ return __io_compat_msg_copy_hdr(req, iomsg, addr, ddir);
+#endif
+
+ return __io_msg_copy_hdr(req, iomsg, addr, ddir);
+}
+
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->free_iov = iomsg->fast_iov;
- ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
- &iomsg->free_iov);
+ ret = io_msg_copy_hdr(req, iomsg, NULL, ITER_SOURCE);
+ if (ret)
+ return ret;
+
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
@@ -435,142 +569,21 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
-{
- int hdr;
-
- if (iomsg->namelen < 0)
- return true;
- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
- iomsg->namelen, &hdr))
- return true;
- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
- return true;
-
- return false;
-}
-
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
- int ret;
-
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
- return -EFAULT;
-
- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
- return ret;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- if (msg.msg_iovlen == 0) {
- sr->len = iomsg->fast_iov[0].iov_len = 0;
- iomsg->fast_iov[0].iov_base = NULL;
- iomsg->free_iov = NULL;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
- return -EFAULT;
- sr->len = iomsg->fast_iov[0].iov_len;
- iomsg->free_iov = NULL;
- }
-
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
- &iomsg->free_iov, &iomsg->msg.msg_iter,
- false);
- if (ret > 0)
- ret = 0;
- }
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct compat_msghdr msg;
- struct compat_iovec __user *uiov;
- int ret;
-
- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
- return -EFAULT;
-
- ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
- return ret;
-
- uiov = compat_ptr(msg.msg_iov);
- if (req->flags & REQ_F_BUFFER_SELECT) {
- compat_ssize_t clen;
-
- iomsg->free_iov = NULL;
- if (msg.msg_iovlen == 0) {
- sr->len = 0;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
- sr->len = clen;
- }
-
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
- UIO_FASTIOV, &iomsg->free_iov,
- &iomsg->msg.msg_iter, true);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-#endif
-
static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->msg.msg_iter.nr_segs = 0;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return __io_compat_recvmsg_copy_hdr(req, iomsg);
-#endif
-
- return __io_recvmsg_copy_hdr(req, iomsg);
+ return io_msg_copy_hdr(req, iomsg, &iomsg->uaddr, ITER_DEST);
}
int io_recvmsg_prep_async(struct io_kiocb *req)
{
+ struct io_async_msghdr *iomsg;
int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
- ret = io_recvmsg_copy_hdr(req, req->async_data);
+ iomsg = req->async_data;
+ ret = io_recvmsg_copy_hdr(req, iomsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/3] io_uring/net: move receive multishot out of the generic msghdr path
2024-02-27 18:46 [PATCHSET 0/3] Cleanup and improve sendmsg/recvmsg header handling Jens Axboe
2024-02-27 18:46 ` [PATCH 1/3] io_uring/net: unify how recvmsg and sendmsg copy in the msghdr Jens Axboe
@ 2024-02-27 18:46 ` Jens Axboe
2024-02-27 18:46 ` [PATCH 3/3] io_uring/net: improve the usercopy for sendmsg/recvmsg Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2024-02-27 18:46 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
Move the actual user_msghdr / compat_msghdr into the send and receive
sides, respectively, so we can move the uaddr receive handling into its
own handler, and ditto the multishot with buffer selection logic.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/net.c | 161 ++++++++++++++++++++++++++++---------------------
1 file changed, 91 insertions(+), 70 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 8c64e552a00f..7a07c5563d66 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -204,46 +204,26 @@ static int io_setup_async_msg(struct io_kiocb *req,
return -EAGAIN;
}
-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
-{
- int hdr;
-
- if (iomsg->namelen < 0)
- return true;
- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
- iomsg->namelen, &hdr))
- return true;
- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
- return true;
-
- return false;
-}
-
#ifdef CONFIG_COMPAT
-static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg,
- struct sockaddr __user **addr, int ddir)
+static int io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ struct compat_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct compat_msghdr msg;
struct compat_iovec __user *uiov;
int ret;
- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
+ if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
return -EFAULT;
- ret = __get_compat_msghdr(&iomsg->msg, &msg, addr);
- if (ret)
- return ret;
-
- uiov = compat_ptr(msg.msg_iov);
+ uiov = compat_ptr(msg->msg_iov);
if (req->flags & REQ_F_BUFFER_SELECT) {
compat_ssize_t clen;
iomsg->free_iov = NULL;
- if (msg.msg_iovlen == 0) {
+ if (msg->msg_iovlen == 0) {
sr->len = 0;
- } else if (msg.msg_iovlen > 1) {
+ } else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
if (!access_ok(uiov, sizeof(*uiov)))
@@ -255,18 +235,11 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
sr->len = clen;
}
- if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
-
return 0;
}
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg.msg_iovlen,
+ ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
&iomsg->msg.msg_iter, true);
if (unlikely(ret < 0))
@@ -276,47 +249,35 @@ static int __io_compat_msg_copy_hdr(struct io_kiocb *req,
}
#endif
-static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
- struct sockaddr __user **addr, int ddir)
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct user_msghdr *msg, int ddir)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
int ret;
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
+ if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
- ret = __copy_msghdr(&iomsg->msg, &msg, addr);
- if (ret)
- return ret;
-
if (req->flags & REQ_F_BUFFER_SELECT) {
- if (msg.msg_iovlen == 0) {
+ if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
- } else if (msg.msg_iovlen > 1) {
+ } else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov,
- sizeof(*msg.msg_iov)))
+ if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
+ sizeof(*msg->msg_iov)))
return -EFAULT;
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
- if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
-
return 0;
}
iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
+ ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);
if (unlikely(ret < 0))
return ret;
@@ -324,30 +285,34 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg
return 0;
}
-static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
- struct sockaddr __user **addr, int ddir)
+static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct user_msghdr msg;
+ int ret;
+
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return __io_compat_msg_copy_hdr(req, iomsg, addr, ddir);
-#endif
+ if (unlikely(req->ctx->compat)) {
+ struct compat_msghdr cmsg;
- return __io_msg_copy_hdr(req, iomsg, addr, ddir);
-}
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
+ if (unlikely(ret))
+ return ret;
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- int ret;
+ return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ }
+#endif
- ret = io_msg_copy_hdr(req, iomsg, NULL, ITER_SOURCE);
- if (ret)
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
+ if (unlikely(ret))
return ret;
+ ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
+
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
@@ -569,10 +534,66 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
+static int io_recvmsg_mshot_prep(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ size_t namelen, size_t controllen)
+{
+ if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
+ (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
+ int hdr;
+
+ if (unlikely(namelen < 0))
+ return -EOVERFLOW;
+ if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
+ namelen, &hdr))
+ return -EOVERFLOW;
+ if (check_add_overflow(hdr, (int)controllen, &hdr))
+ return -EOVERFLOW;
+
+ iomsg->namelen = namelen;
+ iomsg->controllen = controllen;
+ return 0;
+ }
+
+ return 0;
+}
+
static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
- return io_msg_copy_hdr(req, iomsg, &iomsg->uaddr, ITER_DEST);
+ struct user_msghdr msg;
+ int ret;
+
+ iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->msg.msg_iter.nr_segs = 0;
+
+#ifdef CONFIG_COMPAT
+ if (unlikely(req->ctx->compat)) {
+ struct compat_msghdr cmsg;
+
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
+
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
+ if (unlikely(ret))
+ return ret;
+
+ return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
+ cmsg.msg_controllen);
+ }
+#endif
+
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
+
+ ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+ if (unlikely(ret))
+ return ret;
+
+ return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
+ msg.msg_controllen);
}
int io_recvmsg_prep_async(struct io_kiocb *req)
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 3/3] io_uring/net: improve the usercopy for sendmsg/recvmsg
2024-02-27 18:46 [PATCHSET 0/3] Cleanup and improve sendmsg/recvmsg header handling Jens Axboe
2024-02-27 18:46 ` [PATCH 1/3] io_uring/net: unify how recvmsg and sendmsg copy in the msghdr Jens Axboe
2024-02-27 18:46 ` [PATCH 2/3] io_uring/net: move receive multishot out of the generic msghdr path Jens Axboe
@ 2024-02-27 18:46 ` Jens Axboe
2 siblings, 0 replies; 4+ messages in thread
From: Jens Axboe @ 2024-02-27 18:46 UTC (permalink / raw)
To: io-uring; +Cc: Jens Axboe
We're spending a considerable amount of the sendmsg/recvmsg time just
copying in the message header. And for provided buffers, the known
single entry iovec.
Be a bit smarter about it and enable/disable user access around our
copying. In a test case that does both sendmsg and recvmsg, the
runtime before this change (averaged over multiple runs, very stable
times however):
Kernel Time Diff
====================================
-git 4720 usec
-git+commit 4311 usec -8.7%
and looking at a profile diff, we see the following:
0.25% +9.33% [kernel.kallsyms] [k] _copy_from_user
4.47% -3.32% [kernel.kallsyms] [k] __io_msg_copy_hdr.constprop.0
where we drop more than 9% of _copy_from_user() time, and consequently
add time to __io_msg_copy_hdr() where the copies are now attributed to,
but with a net win of 6%.
In comparison, the same test case with send/recv runs in 3745 usec, which
is (expectedly) still quite a bit faster. But at least sendmsg/recvmsg is
now only ~13% slower, where it was ~21% slower before.
Signed-off-by: Jens Axboe <[email protected]>
---
io_uring/net.c | 29 ++++++++++++++++++++++-------
1 file changed, 22 insertions(+), 7 deletions(-)
diff --git a/io_uring/net.c b/io_uring/net.c
index 7a07c5563d66..83fba2882720 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -255,27 +255,42 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
- if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
+ if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
+ ret = -EFAULT;
+ unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
+ unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
+ unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
+ unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
+ unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
+ unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
+ msg->msg_flags = 0;
+
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg->msg_iovlen > 1) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto ua_end;
} else {
- if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
- sizeof(*msg->msg_iov)))
- return -EFAULT;
+ /* we only need the length for provided buffers */
+ if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+ goto ua_end;
+ unsafe_get_user(iomsg->fast_iov[0].iov_len,
+ &msg->msg_iov[0].iov_len, ua_end);
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
-
- return 0;
+ ret = 0;
+ua_end:
+ user_access_end();
+ return ret;
}
+ user_access_end();
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);
--
2.43.0
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-02-27 18:48 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-02-27 18:46 [PATCHSET 0/3] Cleanup and improve sendmsg/recvmsg header handling Jens Axboe
2024-02-27 18:46 ` [PATCH 1/3] io_uring/net: unify how recvmsg and sendmsg copy in the msghdr Jens Axboe
2024-02-27 18:46 ` [PATCH 2/3] io_uring/net: move receive multishot out of the generic msghdr path Jens Axboe
2024-02-27 18:46 ` [PATCH 3/3] io_uring/net: improve the usercopy for sendmsg/recvmsg Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox