public inbox for [email protected]
 help / color / mirror / Atom feed
* [PATCH 0/5] small for-next cleanups
@ 2022-03-25 11:52 Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 1/5] io_uring: cleanup conditional submit locking Pavel Begunkov
                   ` (6 more replies)
  0 siblings, 7 replies; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Minor cleanups around the code w/o any particular theme.

Pavel Begunkov (5):
  io_uring: cleanup conditional submit locking
  io_uring: partially uninline io_put_task()
  io_uring: silence io_for_each_link() warning
  io_uring: refactor io_req_add_compl_list()
  io_uring: improve req fields comments

 fs/io_uring.c | 130 +++++++++++++++++++++++---------------------------
 1 file changed, 61 insertions(+), 69 deletions(-)

-- 
2.35.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/5] io_uring: cleanup conditional submit locking
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
@ 2022-03-25 11:52 ` Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 2/5] io_uring: partially uninline io_put_task() Pavel Begunkov
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Refactor io_ring_submit_[un]lock(), make it accept issue_flags and
remove manual IO_URING_F_UNLOCKED checks. It also allows us to place
lockdep annotations inside instead of sprinkling them in a bunch of
places. There is only one user that doesn't fit now, so hand code
locking in __io_rsrc_put_work().

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 95 ++++++++++++++++++++++-----------------------------
 1 file changed, 41 insertions(+), 54 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index bb40c80fd9ca..3e6f334ba520 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1213,6 +1213,26 @@ struct sock *io_uring_get_socket(struct file *file)
 }
 EXPORT_SYMBOL(io_uring_get_socket);
 
+static void io_ring_submit_unlock(struct io_ring_ctx *ctx, unsigned issue_flags)
+{
+	lockdep_assert_held(&ctx->uring_lock);
+	if (issue_flags & IO_URING_F_UNLOCKED)
+		mutex_unlock(&ctx->uring_lock);
+}
+
+static void io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
+{
+	/*
+	 * "Normal" inline submissions always hold the uring_lock, since we
+	 * grab it from the system call. Same is true for the SQPOLL offload.
+	 * The only exception is when we've detached the request and issue it
+	 * from an async worker thread, grab the lock for that case.
+	 */
+	if (issue_flags & IO_URING_F_UNLOCKED)
+		mutex_lock(&ctx->uring_lock);
+	lockdep_assert_held(&ctx->uring_lock);
+}
+
 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
 {
 	if (!*locked) {
@@ -1399,10 +1419,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 	if (req->flags & REQ_F_PARTIAL_IO)
 		return;
 
-	if (issue_flags & IO_URING_F_UNLOCKED)
-		mutex_lock(&ctx->uring_lock);
-
-	lockdep_assert_held(&ctx->uring_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 
 	buf = req->kbuf;
 	bl = io_buffer_get_list(ctx, buf->bgid);
@@ -1410,8 +1427,7 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 	req->flags &= ~REQ_F_BUFFER_SELECTED;
 	req->kbuf = NULL;
 
-	if (issue_flags & IO_URING_F_UNLOCKED)
-		mutex_unlock(&ctx->uring_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 }
 
 static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -3371,24 +3387,6 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
 	return __io_import_fixed(req, rw, iter, imu);
 }
 
-static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
-{
-	if (needs_lock)
-		mutex_unlock(&ctx->uring_lock);
-}
-
-static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
-{
-	/*
-	 * "Normal" inline submissions always hold the uring_lock, since we
-	 * grab it from the system call. Same is true for the SQPOLL offload.
-	 * The only exception is when we've detached the request and issue it
-	 * from an async worker thread, grab the lock for that case.
-	 */
-	if (needs_lock)
-		mutex_lock(&ctx->uring_lock);
-}
-
 static void io_buffer_add_list(struct io_ring_ctx *ctx,
 			       struct io_buffer_list *bl, unsigned int bgid)
 {
@@ -3404,16 +3402,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
 					  int bgid, unsigned int issue_flags)
 {
 	struct io_buffer *kbuf = req->kbuf;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
 
 	if (req->flags & REQ_F_BUFFER_SELECTED)
 		return kbuf;
 
-	io_ring_submit_lock(ctx, needs_lock);
-
-	lockdep_assert_held(&ctx->uring_lock);
+	io_ring_submit_lock(req->ctx, issue_flags);
 
 	bl = io_buffer_get_list(ctx, bgid);
 	if (bl && !list_empty(&bl->buf_list)) {
@@ -3427,7 +3422,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
 		kbuf = ERR_PTR(-ENOBUFS);
 	}
 
-	io_ring_submit_unlock(req->ctx, needs_lock);
+	io_ring_submit_unlock(req->ctx, issue_flags);
 	return kbuf;
 }
 
@@ -4746,11 +4741,8 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
 	int ret = 0;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 
-	io_ring_submit_lock(ctx, needs_lock);
-
-	lockdep_assert_held(&ctx->uring_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 
 	ret = -ENOENT;
 	bl = io_buffer_get_list(ctx, p->bgid);
@@ -4761,7 +4753,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
 
 	/* complete before unlock, IOPOLL may need the lock */
 	__io_req_complete(req, issue_flags, ret, 0);
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 	return 0;
 }
 
@@ -4875,11 +4867,8 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
 	int ret = 0;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
-
-	io_ring_submit_lock(ctx, needs_lock);
 
-	lockdep_assert_held(&ctx->uring_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 
 	bl = io_buffer_get_list(ctx, p->bgid);
 	if (unlikely(!bl)) {
@@ -4897,7 +4886,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
 		req_set_fail(req);
 	/* complete before unlock, IOPOLL may need the lock */
 	__io_req_complete(req, issue_flags, ret, 0);
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 	return 0;
 }
 
@@ -6885,7 +6874,6 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	u64 sqe_addr = req->cancel.addr;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 	struct io_tctx_node *node;
 	int ret;
 
@@ -6894,7 +6882,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 		goto done;
 
 	/* slow path, try all io-wq's */
-	io_ring_submit_lock(ctx, needs_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 	ret = -ENOENT;
 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
 		struct io_uring_task *tctx = node->task->io_uring;
@@ -6903,7 +6891,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
 		if (ret != -ENOENT)
 			break;
 	}
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 done:
 	if (ret < 0)
 		req_set_fail(req);
@@ -6930,7 +6918,6 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 	struct io_uring_rsrc_update2 up;
 	int ret;
 
@@ -6940,10 +6927,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
 	up.tags = 0;
 	up.resv = 0;
 
-	io_ring_submit_lock(ctx, needs_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 	ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
 					&up, req->rsrc_update.nr_args);
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 
 	if (ret < 0)
 		req_set_fail(req);
@@ -8949,15 +8936,17 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
 		list_del(&prsrc->list);
 
 		if (prsrc->tag) {
-			bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL;
+			if (ctx->flags & IORING_SETUP_IOPOLL)
+				mutex_lock(&ctx->uring_lock);
 
-			io_ring_submit_lock(ctx, lock_ring);
 			spin_lock(&ctx->completion_lock);
 			io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
 			io_commit_cqring(ctx);
 			spin_unlock(&ctx->completion_lock);
 			io_cqring_ev_posted(ctx);
-			io_ring_submit_unlock(ctx, lock_ring);
+
+			if (ctx->flags & IORING_SETUP_IOPOLL)
+				mutex_unlock(&ctx->uring_lock);
 		}
 
 		rsrc_data->do_put(ctx, prsrc);
@@ -9131,12 +9120,11 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 				 unsigned int issue_flags, u32 slot_index)
 {
 	struct io_ring_ctx *ctx = req->ctx;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 	bool needs_switch = false;
 	struct io_fixed_file *file_slot;
 	int ret = -EBADF;
 
-	io_ring_submit_lock(ctx, needs_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 	if (file->f_op == &io_uring_fops)
 		goto err;
 	ret = -ENXIO;
@@ -9177,7 +9165,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
 err:
 	if (needs_switch)
 		io_rsrc_node_switch(ctx, ctx->file_data);
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 	if (ret)
 		fput(file);
 	return ret;
@@ -9187,12 +9175,11 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
 {
 	unsigned int offset = req->close.file_slot - 1;
 	struct io_ring_ctx *ctx = req->ctx;
-	bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
 	struct io_fixed_file *file_slot;
 	struct file *file;
 	int ret, i;
 
-	io_ring_submit_lock(ctx, needs_lock);
+	io_ring_submit_lock(ctx, issue_flags);
 	ret = -ENXIO;
 	if (unlikely(!ctx->file_data))
 		goto out;
@@ -9218,7 +9205,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
 	io_rsrc_node_switch(ctx, ctx->file_data);
 	ret = 0;
 out:
-	io_ring_submit_unlock(ctx, needs_lock);
+	io_ring_submit_unlock(ctx, issue_flags);
 	return ret;
 }
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/5] io_uring: partially uninline io_put_task()
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 1/5] io_uring: cleanup conditional submit locking Pavel Begunkov
@ 2022-03-25 11:52 ` Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 3/5] io_uring: silence io_for_each_link() warning Pavel Begunkov
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

In most cases io_put_task() is called from the submitter task and go
through a higly optimised fast path, which has to be inlined. The other
branch though is bulkier and we don't care about it as much because it
implies atomics and other heavy calls. Extract it into a helper, which
is expected not to be inlined.

[before] size ./fs/io_uring.o
   text    data     bss     dec     hex filename
  89328   13646       8  102982   19246 ./fs/io_uring.o
[after] size ./fs/io_uring.o
   text    data     bss     dec     hex filename
  89096   13646       8  102750   1915e ./fs/io_uring.o

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3e6f334ba520..b868c7c85a94 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2000,19 +2000,23 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
 	return ret;
 }
 
-/* must to be called somewhat shortly after putting a request */
-static inline void io_put_task(struct task_struct *task, int nr)
+static void __io_put_task(struct task_struct *task, int nr)
 {
 	struct io_uring_task *tctx = task->io_uring;
 
-	if (likely(task == current)) {
-		tctx->cached_refs += nr;
-	} else {
-		percpu_counter_sub(&tctx->inflight, nr);
-		if (unlikely(atomic_read(&tctx->in_idle)))
-			wake_up(&tctx->wait);
-		put_task_struct_many(task, nr);
-	}
+	percpu_counter_sub(&tctx->inflight, nr);
+	if (unlikely(atomic_read(&tctx->in_idle)))
+		wake_up(&tctx->wait);
+	put_task_struct_many(task, nr);
+}
+
+/* must to be called somewhat shortly after putting a request */
+static inline void io_put_task(struct task_struct *task, int nr)
+{
+	if (likely(task == current))
+		task->io_uring->cached_refs += nr;
+	else
+		__io_put_task(task, nr);
 }
 
 static void io_task_refs_refill(struct io_uring_task *tctx)
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/5] io_uring: silence io_for_each_link() warning
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 1/5] io_uring: cleanup conditional submit locking Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 2/5] io_uring: partially uninline io_put_task() Pavel Begunkov
@ 2022-03-25 11:52 ` Pavel Begunkov
  2022-03-25 12:35   ` Jens Axboe
  2022-03-25 11:52 ` [PATCH 4/5] io_uring: refactor io_req_add_compl_list() Pavel Begunkov
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Some tooling keep complaining about self assignment in
io_for_each_link(), the code is correct but still let's workaround it.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index b868c7c85a94..e651a0bb00fe 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -7057,9 +7057,10 @@ static int io_req_prep_async(struct io_kiocb *req)
 static u32 io_get_sequence(struct io_kiocb *req)
 {
 	u32 seq = req->ctx->cached_sq_head;
+	struct io_kiocb *cur;
 
 	/* need original cached_sq_head, but it was increased for each req */
-	io_for_each_link(req, req)
+	io_for_each_link(cur, req)
 		seq--;
 	return seq;
 }
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 4/5] io_uring: refactor io_req_add_compl_list()
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
                   ` (2 preceding siblings ...)
  2022-03-25 11:52 ` [PATCH 3/5] io_uring: silence io_for_each_link() warning Pavel Begunkov
@ 2022-03-25 11:52 ` Pavel Begunkov
  2022-03-25 11:52 ` [PATCH 5/5] io_uring: improve req fields comments Pavel Begunkov
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

A small refactoring for io_req_add_compl_list() deduplicating some code.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index e651a0bb00fe..9cd33278089b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1726,11 +1726,10 @@ static void io_prep_async_link(struct io_kiocb *req)
 
 static inline void io_req_add_compl_list(struct io_kiocb *req)
 {
-	struct io_ring_ctx *ctx = req->ctx;
-	struct io_submit_state *state = &ctx->submit_state;
+	struct io_submit_state *state = &req->ctx->submit_state;
 
 	if (!(req->flags & REQ_F_CQE_SKIP))
-		ctx->submit_state.flush_cqes = true;
+		state->flush_cqes = true;
 	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
 }
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 5/5] io_uring: improve req fields comments
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
                   ` (3 preceding siblings ...)
  2022-03-25 11:52 ` [PATCH 4/5] io_uring: refactor io_req_add_compl_list() Pavel Begunkov
@ 2022-03-25 11:52 ` Pavel Begunkov
  2022-03-25 12:37   ` Jens Axboe
  2022-03-25 12:38 ` (subset) [PATCH 0/5] small for-next cleanups Jens Axboe
  2022-03-25 12:39 ` Jens Axboe
  6 siblings, 1 reply; 10+ messages in thread
From: Pavel Begunkov @ 2022-03-25 11:52 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, asml.silence

Move a misplaced comment about req->creds and add a line with
assumptions about req->link.

Signed-off-by: Pavel Begunkov <[email protected]>
---
 fs/io_uring.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9cd33278089b..51a00ef88136 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -935,10 +935,11 @@ struct io_kiocb {
 	struct async_poll		*apoll;
 	/* opcode allocated if it needs to store data for async defer */
 	void				*async_data;
-	/* custom credentials, valid IFF REQ_F_CREDS is set */
 	/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
 	struct io_buffer		*kbuf;
+	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
 	struct io_kiocb			*link;
+	/* custom credentials, valid IFF REQ_F_CREDS is set */
 	const struct cred		*creds;
 	struct io_wq_work		work;
 };
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/5] io_uring: silence io_for_each_link() warning
  2022-03-25 11:52 ` [PATCH 3/5] io_uring: silence io_for_each_link() warning Pavel Begunkov
@ 2022-03-25 12:35   ` Jens Axboe
  0 siblings, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2022-03-25 12:35 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 3/25/22 5:52 AM, Pavel Begunkov wrote:
> Some tooling keep complaining about self assignment in
> io_for_each_link(), the code is correct but still let's workaround it.

Honestly, it's worth it to avoid having to reply about reports on
this. So thanks for doing that.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 5/5] io_uring: improve req fields comments
  2022-03-25 11:52 ` [PATCH 5/5] io_uring: improve req fields comments Pavel Begunkov
@ 2022-03-25 12:37   ` Jens Axboe
  0 siblings, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2022-03-25 12:37 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On 3/25/22 5:52 AM, Pavel Begunkov wrote:
> Move a misplaced comment about req->creds and add a line with
> assumptions about req->link.

I'm going to pick this one for 5.18.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: (subset) [PATCH 0/5] small for-next cleanups
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
                   ` (4 preceding siblings ...)
  2022-03-25 11:52 ` [PATCH 5/5] io_uring: improve req fields comments Pavel Begunkov
@ 2022-03-25 12:38 ` Jens Axboe
  2022-03-25 12:39 ` Jens Axboe
  6 siblings, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2022-03-25 12:38 UTC (permalink / raw)
  To: io-uring, Pavel Begunkov

On Fri, 25 Mar 2022 11:52:13 +0000, Pavel Begunkov wrote:
> Minor cleanups around the code w/o any particular theme.
> 
> Pavel Begunkov (5):
>   io_uring: cleanup conditional submit locking
>   io_uring: partially uninline io_put_task()
>   io_uring: silence io_for_each_link() warning
>   io_uring: refactor io_req_add_compl_list()
>   io_uring: improve req fields comments
> 
> [...]

Applied, thanks!

[1/5] io_uring: cleanup conditional submit locking
      commit: 81c39c8099a617f7603a108862216abd23e2b7de
[2/5] io_uring: partially uninline io_put_task()
      commit: 72a1ccb0b1db12d8b4de100fbc668fc7addb6723
[3/5] io_uring: silence io_for_each_link() warning
      commit: 4ab65ca65797a7777721c21d3bd37cf21d2c2774
[4/5] io_uring: refactor io_req_add_compl_list()
      commit: 45c468c636f1ecb6dde1619aca5b716e6fbd5a9c

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: (subset) [PATCH 0/5] small for-next cleanups
  2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
                   ` (5 preceding siblings ...)
  2022-03-25 12:38 ` (subset) [PATCH 0/5] small for-next cleanups Jens Axboe
@ 2022-03-25 12:39 ` Jens Axboe
  6 siblings, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2022-03-25 12:39 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring

On Fri, 25 Mar 2022 11:52:13 +0000, Pavel Begunkov wrote:
> Minor cleanups around the code w/o any particular theme.
> 
> Pavel Begunkov (5):
>   io_uring: cleanup conditional submit locking
>   io_uring: partially uninline io_put_task()
>   io_uring: silence io_for_each_link() warning
>   io_uring: refactor io_req_add_compl_list()
>   io_uring: improve req fields comments
> 
> [...]

Applied, thanks!

[5/5] io_uring: improve req fields comments
      commit: 41cdcc2202d4c466534b8f38975d2e6b16317c0c

Best regards,
-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-03-25 12:39 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-03-25 11:52 [PATCH 0/5] small for-next cleanups Pavel Begunkov
2022-03-25 11:52 ` [PATCH 1/5] io_uring: cleanup conditional submit locking Pavel Begunkov
2022-03-25 11:52 ` [PATCH 2/5] io_uring: partially uninline io_put_task() Pavel Begunkov
2022-03-25 11:52 ` [PATCH 3/5] io_uring: silence io_for_each_link() warning Pavel Begunkov
2022-03-25 12:35   ` Jens Axboe
2022-03-25 11:52 ` [PATCH 4/5] io_uring: refactor io_req_add_compl_list() Pavel Begunkov
2022-03-25 11:52 ` [PATCH 5/5] io_uring: improve req fields comments Pavel Begunkov
2022-03-25 12:37   ` Jens Axboe
2022-03-25 12:38 ` (subset) [PATCH 0/5] small for-next cleanups Jens Axboe
2022-03-25 12:39 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox