From: Bijan Mottahedeh <[email protected]>
To: [email protected], [email protected], [email protected]
Subject: [PATCH v2 05/13] io_uring: separate ref_list from fixed_rsrc_data
Date: Mon, 7 Dec 2020 14:15:44 -0800 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
Uplevel ref_list and make it common to all resources. This is to
allow one common ref_list to be used for both files, and buffers
in upcoming patches.
Signed-off-by: Bijan Mottahedeh <[email protected]>
---
fs/io_uring.c | 77 ++++++++++++++++++++++++++++-------------------------------
1 file changed, 36 insertions(+), 41 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 33b2ff6..1ed63bc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -231,8 +231,6 @@ struct fixed_rsrc_data {
struct fixed_rsrc_ref_node *node;
struct percpu_ref refs;
struct completion done;
- struct list_head ref_list;
- spinlock_t lock;
};
struct io_buffer {
@@ -398,8 +396,10 @@ struct io_ring_ctx {
struct list_head inflight_list;
} ____cacheline_aligned_in_smp;
- struct delayed_work file_put_work;
- struct llist_head file_put_llist;
+ struct delayed_work rsrc_put_work;
+ struct llist_head rsrc_put_llist;
+ struct list_head rsrc_ref_list;
+ spinlock_t rsrc_ref_lock;
struct work_struct exit_work;
struct io_restriction restrictions;
@@ -1024,7 +1024,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
static struct file *io_file_get(struct io_submit_state *state,
struct io_kiocb *req, int fd, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
-static void io_file_put_work(struct work_struct *work);
+static void io_rsrc_put_work(struct work_struct *work);
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
struct iovec **iovec, struct iov_iter *iter,
@@ -1325,8 +1325,10 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->timeout_list);
spin_lock_init(&ctx->inflight_lock);
INIT_LIST_HEAD(&ctx->inflight_list);
- INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
- init_llist_head(&ctx->file_put_llist);
+ spin_lock_init(&ctx->rsrc_ref_lock);
+ INIT_LIST_HEAD(&ctx->rsrc_ref_list);
+ INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
+ init_llist_head(&ctx->rsrc_put_llist);
return ctx;
err:
if (ctx->fallback_req)
@@ -7267,16 +7269,16 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
if (!data)
return -ENXIO;
- spin_lock_bh(&data->lock);
+ spin_lock_bh(&ctx->rsrc_ref_lock);
ref_node = data->node;
- spin_unlock_bh(&data->lock);
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
if (ref_node)
percpu_ref_kill(&ref_node->refs);
percpu_ref_kill(&data->refs);
/* wait for all refs nodes to complete */
- flush_delayed_work(&ctx->file_put_work);
+ flush_delayed_work(&ctx->rsrc_put_work);
wait_for_completion(&data->done);
__io_sqe_files_unregister(ctx);
@@ -7617,30 +7619,25 @@ static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
percpu_ref_put(&rsrc_data->refs);
}
-static void io_rsrc_put_work(struct llist_node *node)
+static void io_rsrc_put_work(struct work_struct *work)
{
- struct fixed_rsrc_ref_node *ref_node;
- struct llist_node *next;
+ struct io_ring_ctx *ctx;
+ struct llist_node *node;
+
+ ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
+ node = llist_del_all(&ctx->rsrc_put_llist);
while (node) {
- next = node->next;
+ struct fixed_rsrc_ref_node *ref_node;
+ struct llist_node *next = node->next;
+
ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
__io_rsrc_put_work(ref_node);
node = next;
}
}
-static void io_file_put_work(struct work_struct *work)
-{
- struct io_ring_ctx *ctx;
- struct llist_node *node;
-
- ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
- node = llist_del_all(&ctx->file_put_llist);
- io_rsrc_put_work(node);
-}
-
-static void io_file_data_ref_zero(struct percpu_ref *ref)
+static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
{
struct fixed_rsrc_ref_node *ref_node;
struct fixed_rsrc_data *data;
@@ -7652,27 +7649,27 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
data = ref_node->rsrc_data;
ctx = data->ctx;
- spin_lock_bh(&data->lock);
+ spin_lock_bh(&ctx->rsrc_ref_lock);
ref_node->done = true;
- while (!list_empty(&data->ref_list)) {
- ref_node = list_first_entry(&data->ref_list,
+ while (!list_empty(&ctx->rsrc_ref_list)) {
+ ref_node = list_first_entry(&ctx->rsrc_ref_list,
struct fixed_rsrc_ref_node, node);
/* recycle ref nodes in order */
if (!ref_node->done)
break;
list_del(&ref_node->node);
- first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
+ first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
}
- spin_unlock_bh(&data->lock);
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
if (percpu_ref_is_dying(&data->refs))
delay = 0;
if (!delay)
- mod_delayed_work(system_wq, &ctx->file_put_work, 0);
+ mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
else if (first_add)
- queue_delayed_work(system_wq, &ctx->file_put_work, delay);
+ queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
}
static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node(
@@ -7684,7 +7681,7 @@ static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node(
if (!ref_node)
return ERR_PTR(-ENOMEM);
- if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
+ if (percpu_ref_init(&ref_node->refs, io_rsrc_data_ref_zero,
0, GFP_KERNEL)) {
kfree(ref_node);
return ERR_PTR(-ENOMEM);
@@ -7725,8 +7722,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return -ENOMEM;
file_data->ctx = ctx;
init_completion(&file_data->done);
- INIT_LIST_HEAD(&file_data->ref_list);
- spin_lock_init(&file_data->lock);
nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
@@ -7788,9 +7783,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
}
file_data->node = ref_node;
- spin_lock_bh(&file_data->lock);
- list_add_tail(&ref_node->node, &file_data->ref_list);
- spin_unlock_bh(&file_data->lock);
+ spin_lock_bh(&ctx->rsrc_ref_lock);
+ list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
percpu_ref_get(&file_data->refs);
return ret;
out_fput:
@@ -7952,10 +7947,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (needs_switch) {
percpu_ref_kill(&data->node->refs);
- spin_lock_bh(&data->lock);
- list_add_tail(&ref_node->node, &data->ref_list);
+ spin_lock_bh(&ctx->rsrc_ref_lock);
+ list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
data->node = ref_node;
- spin_unlock_bh(&data->lock);
+ spin_unlock_bh(&ctx->rsrc_ref_lock);
percpu_ref_get(&ctx->file_data->refs);
} else
destroy_fixed_file_ref_node(ref_node);
--
1.8.3.1
next prev parent reply other threads:[~2020-12-07 22:17 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-07 22:15 [PATCH v2 00/13] io_uring: buffer registration enhancements Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 01/13] io_uring: modularize io_sqe_buffer_register Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 02/13] io_uring: modularize io_sqe_buffers_register Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 03/13] io_uring: generalize fixed file functionality Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 04/13] io_uring: rename fixed_file variables to fixed_rsrc Bijan Mottahedeh
2020-12-07 22:15 ` Bijan Mottahedeh [this message]
2020-12-07 22:15 ` [PATCH v2 06/13] io_uring: generalize fixed_file_ref_node functionality Bijan Mottahedeh
2020-12-16 14:53 ` Pavel Begunkov
2020-12-18 18:06 ` Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 07/13] io_uring: add rsrc_ref locking routines Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 08/13] io_uring: implement fixed buffers registration similar to fixed files Bijan Mottahedeh
2020-12-16 14:58 ` Pavel Begunkov
2020-12-18 18:06 ` Bijan Mottahedeh
2020-12-16 14:59 ` Pavel Begunkov
2020-12-07 22:15 ` [PATCH v2 09/13] io_uring: create common fixed_rsrc_ref_node handling routines Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 10/13] io_uring: generalize files_update functionlity to rsrc_update Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 11/13] io_uring: support buffer registration updates Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 12/13] io_uring: create common fixed_rsrc_data allocation routines Bijan Mottahedeh
2020-12-07 22:15 ` [PATCH v2 13/13] io_uring: support buffer registration sharing Bijan Mottahedeh
2020-12-16 15:29 ` Pavel Begunkov
2020-12-18 18:06 ` Bijan Mottahedeh
2021-01-07 0:50 ` Bijan Mottahedeh
2021-01-11 5:19 ` Pavel Begunkov
2021-01-12 21:50 ` Bijan Mottahedeh
2020-12-14 19:09 ` [PATCH v2 00/13] io_uring: buffer registration enhancements Bijan Mottahedeh
2020-12-14 19:29 ` Jens Axboe
2020-12-14 19:43 ` Bijan Mottahedeh
2020-12-14 19:47 ` Jens Axboe
2020-12-14 20:59 ` Pavel Begunkov
2020-12-18 18:06 ` Bijan Mottahedeh
2020-12-16 15:34 ` Pavel Begunkov
2020-12-18 18:06 ` Bijan Mottahedeh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1607379352-68109-6-git-send-email-bijan.mottahedeh@oracle.com \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox