* [PATCH v3 0/3] async punting improvements for io_uring
@ 2020-02-24 8:30 Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 1/3] io_uring: don't call work.func from sync ctx Pavel Begunkov
` (3 more replies)
0 siblings, 4 replies; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 8:30 UTC (permalink / raw)
To: Jens Axboe, io-uring
*on top of for-5.6*
This cleans up io-wq punting paths, doing small fixes and removing
unnecessary logic from different submission paths.
v2:
- remove pid-related comment, as it's fixed separately
- make ("add missing io_req_cancelled()") first
in the series, so it may be picked for 5.6
v3:
- rebase + drop a patch definitely colliding with poll work
Pavel Begunkov (3):
io_uring: don't call work.func from sync ctx
io_uring: don't do full *prep_worker() from io-wq
io_uring: remove req->in_async
fs/io_uring.c | 101 +++++++++++++++++++++++++++-----------------------
1 file changed, 54 insertions(+), 47 deletions(-)
--
2.24.0
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v3 1/3] io_uring: don't call work.func from sync ctx
2020-02-24 8:30 [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
@ 2020-02-24 8:30 ` Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq Pavel Begunkov
` (2 subsequent siblings)
3 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 8:30 UTC (permalink / raw)
To: Jens Axboe, io-uring
Many operations define custom work.func before getting into an io-wq.
There are several points against:
- it calls io_wq_assign_next() from outside io-wq, that may be confusing
- sync context would go unnecessary through io_req_cancelled()
- prototypes are quite different, so work!=old_work looks strange
- makes async/sync responsibilities fuzzy
- adds extra overhead
Don't call generic path and io-wq handlers from each other, but use
helpers instead
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 76 +++++++++++++++++++++++++--------------------------
1 file changed, 38 insertions(+), 38 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7d0be264527d..819661f49023 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2462,23 +2462,28 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
}
}
-static void io_fsync_finish(struct io_wq_work **workptr)
+static void __io_fsync(struct io_kiocb *req, struct io_kiocb **nxt)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
loff_t end = req->sync.off + req->sync.len;
- struct io_kiocb *nxt = NULL;
int ret;
- if (io_req_cancelled(req))
- return;
-
ret = vfs_fsync_range(req->file, req->sync.off,
end > 0 ? end : LLONG_MAX,
req->sync.flags & IORING_FSYNC_DATASYNC);
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, &nxt);
+ io_put_req_find_next(req, nxt);
+}
+
+static void io_fsync_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ if (io_req_cancelled(req))
+ return;
+ __io_fsync(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2486,26 +2491,18 @@ static void io_fsync_finish(struct io_wq_work **workptr)
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
- struct io_wq_work *work, *old_work;
-
/* fsync always requires a blocking context */
if (force_nonblock) {
io_put_req(req);
req->work.func = io_fsync_finish;
return -EAGAIN;
}
-
- work = old_work = &req->work;
- io_fsync_finish(&work);
- if (work && work != old_work)
- *nxt = container_of(work, struct io_kiocb, work);
+ __io_fsync(req, nxt);
return 0;
}
-static void io_fallocate_finish(struct io_wq_work **workptr)
+static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
int ret;
if (io_req_cancelled(req))
@@ -2516,7 +2513,15 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, &nxt);
+ io_put_req_find_next(req, nxt);
+}
+
+static void io_fallocate_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ __io_fallocate(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2536,8 +2541,6 @@ static int io_fallocate_prep(struct io_kiocb *req,
static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
- struct io_wq_work *work, *old_work;
-
/* fallocate always requiring blocking context */
if (force_nonblock) {
io_put_req(req);
@@ -2545,11 +2548,7 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- work = old_work = &req->work;
- io_fallocate_finish(&work);
- if (work && work != old_work)
- *nxt = container_of(work, struct io_kiocb, work);
-
+ __io_fallocate(req, nxt);
return 0;
}
@@ -2953,21 +2952,27 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
+static void __io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *nxt = NULL;
int ret;
- if (io_req_cancelled(req))
- return;
-
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
req->sync.flags);
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
- io_put_req_find_next(req, &nxt);
+ io_put_req_find_next(req, nxt);
+}
+
+
+static void io_sync_file_range_finish(struct io_wq_work **workptr)
+{
+ struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
+
+ if (io_req_cancelled(req))
+ return;
+ __io_sync_file_range(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2975,8 +2980,6 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
- struct io_wq_work *work, *old_work;
-
/* sync_file_range always requires a blocking context */
if (force_nonblock) {
io_put_req(req);
@@ -2984,10 +2987,7 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
}
- work = old_work = &req->work;
- io_sync_file_range_finish(&work);
- if (work && work != old_work)
- *nxt = container_of(work, struct io_kiocb, work);
+ __io_sync_file_range(req, nxt);
return 0;
}
--
2.24.0
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq
2020-02-24 8:30 [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 1/3] io_uring: don't call work.func from sync ctx Pavel Begunkov
@ 2020-02-24 8:30 ` Pavel Begunkov
2020-02-24 15:30 ` Jens Axboe
2020-02-24 8:30 ` [PATCH v3 3/3] io_uring: remove req->in_async Pavel Begunkov
2020-02-24 9:17 ` [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
3 siblings, 1 reply; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 8:30 UTC (permalink / raw)
To: Jens Axboe, io-uring
io_prep_async_worker() called io_wq_assign_next() do many useless checks:
io_req_work_grab_env() was already called during prep, and @do_hashed
is not ever used. Add io_prep_next_work() -- simplified version, that
can be called io-wq.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 819661f49023..3003e767ced3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -955,6 +955,17 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
}
}
+static inline void io_prep_next_work(struct io_kiocb *req,
+ struct io_kiocb **link)
+{
+ const struct io_op_def *def = &io_op_defs[req->opcode];
+
+ if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
+
+ *link = io_prep_linked_timeout(req);
+}
+
static inline bool io_prep_async_work(struct io_kiocb *req,
struct io_kiocb **link)
{
@@ -2453,7 +2464,7 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
- io_prep_async_work(nxt, &link);
+ io_prep_next_work(nxt, &link);
*workptr = &nxt->work;
if (link) {
nxt->work.flags |= IO_WQ_WORK_CB;
--
2.24.0
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v3 3/3] io_uring: remove req->in_async
2020-02-24 8:30 [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 1/3] io_uring: don't call work.func from sync ctx Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq Pavel Begunkov
@ 2020-02-24 8:30 ` Pavel Begunkov
2020-02-24 9:17 ` [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
3 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 8:30 UTC (permalink / raw)
To: Jens Axboe, io-uring
req->in_async is not really needed, it only prevents propagation of
@nxt for fast not-blocked submissions. Remove it.
Signed-off-by: Pavel Begunkov <[email protected]>
---
fs/io_uring.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 3003e767ced3..b149b6e080c5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -556,7 +556,6 @@ struct io_kiocb {
* llist_node is only used for poll deferred completions
*/
struct llist_node llist_node;
- bool in_async;
bool needs_fixed_file;
u8 opcode;
@@ -1974,14 +1973,13 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
- bool in_async)
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
- if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
+ if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
*nxt = __io_complete_rw(kiocb, ret);
else
io_rw_done(kiocb, ret);
@@ -2274,7 +2272,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt, req->in_async);
+ kiocb_done(kiocb, ret2, nxt);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -2387,7 +2385,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2, nxt, req->in_async);
+ kiocb_done(kiocb, ret2, nxt);
} else {
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
@@ -4524,7 +4522,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
}
if (!ret) {
- req->in_async = true;
do {
ret = io_issue_sqe(req, NULL, &nxt, false);
/*
@@ -5066,7 +5063,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
*mm = ctx->sqo_mm;
}
- req->in_async = async;
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async);
--
2.24.0
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v3 0/3] async punting improvements for io_uring
2020-02-24 8:30 [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
` (2 preceding siblings ...)
2020-02-24 8:30 ` [PATCH v3 3/3] io_uring: remove req->in_async Pavel Begunkov
@ 2020-02-24 9:17 ` Pavel Begunkov
2020-02-24 15:30 ` Jens Axboe
3 siblings, 1 reply; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 9:17 UTC (permalink / raw)
To: Jens Axboe, io-uring
[-- Attachment #1.1: Type: text/plain, Size: 864 bytes --]
On 24/02/2020 11:30, Pavel Begunkov wrote:
> *on top of for-5.6*
Jens, let me know if this and the splice patchset should be rebased onto
your poll branch.
>
> This cleans up io-wq punting paths, doing small fixes and removing
> unnecessary logic from different submission paths.
>
> v2:
> - remove pid-related comment, as it's fixed separately
> - make ("add missing io_req_cancelled()") first
> in the series, so it may be picked for 5.6
>
> v3:
> - rebase + drop a patch definitely colliding with poll work
>
> Pavel Begunkov (3):
> io_uring: don't call work.func from sync ctx
> io_uring: don't do full *prep_worker() from io-wq
> io_uring: remove req->in_async
>
> fs/io_uring.c | 101 +++++++++++++++++++++++++++-----------------------
> 1 file changed, 54 insertions(+), 47 deletions(-)
>
--
Pavel Begunkov
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq
2020-02-24 8:30 ` [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq Pavel Begunkov
@ 2020-02-24 15:30 ` Jens Axboe
2020-02-24 15:48 ` Pavel Begunkov
0 siblings, 1 reply; 10+ messages in thread
From: Jens Axboe @ 2020-02-24 15:30 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 2/24/20 1:30 AM, Pavel Begunkov wrote:
> io_prep_async_worker() called io_wq_assign_next() do many useless checks:
> io_req_work_grab_env() was already called during prep, and @do_hashed
> is not ever used. Add io_prep_next_work() -- simplified version, that
> can be called io-wq.
>
> Signed-off-by: Pavel Begunkov <[email protected]>
> ---
> fs/io_uring.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 819661f49023..3003e767ced3 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -955,6 +955,17 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
> }
> }
>
> +static inline void io_prep_next_work(struct io_kiocb *req,
> + struct io_kiocb **link)
> +{
> + const struct io_op_def *def = &io_op_defs[req->opcode];
> +
> + if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
> + req->work.flags |= IO_WQ_WORK_UNBOUND;
Extra tab?
Otherwise looks fine.
--
Jens Axboe
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 0/3] async punting improvements for io_uring
2020-02-24 9:17 ` [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
@ 2020-02-24 15:30 ` Jens Axboe
0 siblings, 0 replies; 10+ messages in thread
From: Jens Axboe @ 2020-02-24 15:30 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 2/24/20 2:17 AM, Pavel Begunkov wrote:
> On 24/02/2020 11:30, Pavel Begunkov wrote:
>> *on top of for-5.6*
>
> Jens, let me know if this and the splice patchset should be rebased onto
> your poll branch.
Since those are both ready at this point, I think we should queue
those up first and I'll rebase on top of that.
--
Jens Axboe
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq
2020-02-24 15:30 ` Jens Axboe
@ 2020-02-24 15:48 ` Pavel Begunkov
2020-02-24 15:57 ` Jens Axboe
0 siblings, 1 reply; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 15:48 UTC (permalink / raw)
To: Jens Axboe, io-uring
[-- Attachment #1.1: Type: text/plain, Size: 1157 bytes --]
On 24/02/2020 18:30, Jens Axboe wrote:
> On 2/24/20 1:30 AM, Pavel Begunkov wrote:
>> io_prep_async_worker() called io_wq_assign_next() do many useless checks:
>> io_req_work_grab_env() was already called during prep, and @do_hashed
>> is not ever used. Add io_prep_next_work() -- simplified version, that
>> can be called io-wq.
>>
>> Signed-off-by: Pavel Begunkov <[email protected]>
>> ---
>> fs/io_uring.c | 13 ++++++++++++-
>> 1 file changed, 12 insertions(+), 1 deletion(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index 819661f49023..3003e767ced3 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -955,6 +955,17 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
>> }
>> }
>>
>> +static inline void io_prep_next_work(struct io_kiocb *req,
>> + struct io_kiocb **link)
>> +{
>> + const struct io_op_def *def = &io_op_defs[req->opcode];
>> +
>> + if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
>> + req->work.flags |= IO_WQ_WORK_UNBOUND;
>
> Extra tab?
Yep. Would resending [2/3] be enough?
> Otherwise looks fine.
>
--
Pavel Begunkov
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq
2020-02-24 15:48 ` Pavel Begunkov
@ 2020-02-24 15:57 ` Jens Axboe
2020-02-24 15:57 ` Pavel Begunkov
0 siblings, 1 reply; 10+ messages in thread
From: Jens Axboe @ 2020-02-24 15:57 UTC (permalink / raw)
To: Pavel Begunkov, io-uring
On 2/24/20 8:48 AM, Pavel Begunkov wrote:
> On 24/02/2020 18:30, Jens Axboe wrote:
>> On 2/24/20 1:30 AM, Pavel Begunkov wrote:
>>> io_prep_async_worker() called io_wq_assign_next() do many useless checks:
>>> io_req_work_grab_env() was already called during prep, and @do_hashed
>>> is not ever used. Add io_prep_next_work() -- simplified version, that
>>> can be called io-wq.
>>>
>>> Signed-off-by: Pavel Begunkov <[email protected]>
>>> ---
>>> fs/io_uring.c | 13 ++++++++++++-
>>> 1 file changed, 12 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>>> index 819661f49023..3003e767ced3 100644
>>> --- a/fs/io_uring.c
>>> +++ b/fs/io_uring.c
>>> @@ -955,6 +955,17 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
>>> }
>>> }
>>>
>>> +static inline void io_prep_next_work(struct io_kiocb *req,
>>> + struct io_kiocb **link)
>>> +{
>>> + const struct io_op_def *def = &io_op_defs[req->opcode];
>>> +
>>> + if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
>>> + req->work.flags |= IO_WQ_WORK_UNBOUND;
>>
>> Extra tab?
>
> Yep. Would resending [2/3] be enough?
No need, I just did a hand edit of the patch before applying.
--
Jens Axboe
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq
2020-02-24 15:57 ` Jens Axboe
@ 2020-02-24 15:57 ` Pavel Begunkov
0 siblings, 0 replies; 10+ messages in thread
From: Pavel Begunkov @ 2020-02-24 15:57 UTC (permalink / raw)
To: Jens Axboe, io-uring
[-- Attachment #1.1: Type: text/plain, Size: 1367 bytes --]
On 24/02/2020 18:57, Jens Axboe wrote:
> On 2/24/20 8:48 AM, Pavel Begunkov wrote:
>> On 24/02/2020 18:30, Jens Axboe wrote:
>>> On 2/24/20 1:30 AM, Pavel Begunkov wrote:
>>>> io_prep_async_worker() called io_wq_assign_next() do many useless checks:
>>>> io_req_work_grab_env() was already called during prep, and @do_hashed
>>>> is not ever used. Add io_prep_next_work() -- simplified version, that
>>>> can be called io-wq.
>>>>
>>>> Signed-off-by: Pavel Begunkov <[email protected]>
>>>> ---
>>>> fs/io_uring.c | 13 ++++++++++++-
>>>> 1 file changed, 12 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>>>> index 819661f49023..3003e767ced3 100644
>>>> --- a/fs/io_uring.c
>>>> +++ b/fs/io_uring.c
>>>> @@ -955,6 +955,17 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
>>>> }
>>>> }
>>>>
>>>> +static inline void io_prep_next_work(struct io_kiocb *req,
>>>> + struct io_kiocb **link)
>>>> +{
>>>> + const struct io_op_def *def = &io_op_defs[req->opcode];
>>>> +
>>>> + if (!(req->flags & REQ_F_ISREG) && def->unbound_nonreg_file)
>>>> + req->work.flags |= IO_WQ_WORK_UNBOUND;
>>>
>>> Extra tab?
>>
>> Yep. Would resending [2/3] be enough?
>
> No need, I just did a hand edit of the patch before applying.
Great, appreciate that
--
Pavel Begunkov
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2020-02-24 15:58 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-02-24 8:30 [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 1/3] io_uring: don't call work.func from sync ctx Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 2/3] io_uring: don't do full *prep_worker() from io-wq Pavel Begunkov
2020-02-24 15:30 ` Jens Axboe
2020-02-24 15:48 ` Pavel Begunkov
2020-02-24 15:57 ` Jens Axboe
2020-02-24 15:57 ` Pavel Begunkov
2020-02-24 8:30 ` [PATCH v3 3/3] io_uring: remove req->in_async Pavel Begunkov
2020-02-24 9:17 ` [PATCH v3 0/3] async punting improvements for io_uring Pavel Begunkov
2020-02-24 15:30 ` Jens Axboe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox