From: Jens Axboe <[email protected]>
To: [email protected]
Cc: [email protected], [email protected],
[email protected], Jens Axboe <[email protected]>
Subject: [PATCH 6/9] sched: add a sched_work list
Date: Thu, 20 Feb 2020 13:31:48 -0700 [thread overview]
Message-ID: <[email protected]> (raw)
In-Reply-To: <[email protected]>
This is similar to the task_works, and uses the same infrastructure, but
the sched_work list is run when the task is being scheduled in or out.
The intended use case here is for core code to be able to add work
that should be automatically run by the task, without the task needing
to do anything. This is done outside of the task, one example would be
from waitqueue handlers, or anything else that is invoked out-of-band
from the task itself.
Signed-off-by: Jens Axboe <[email protected]>
---
include/linux/sched.h | 4 ++-
include/linux/task_work.h | 5 ++++
kernel/sched/core.c | 16 ++++++++--
kernel/task_work.c | 62 ++++++++++++++++++++++++++++++++++++---
4 files changed, 80 insertions(+), 7 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04278493bf15..da15112c1140 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -648,6 +648,7 @@ struct task_struct {
/* Per task flags (PF_*), defined further below: */
unsigned int flags;
unsigned int ptrace;
+ int on_rq;
#ifdef CONFIG_SMP
struct llist_node wake_entry;
@@ -670,13 +671,14 @@ struct task_struct {
int recent_used_cpu;
int wake_cpu;
#endif
- int on_rq;
int prio;
int static_prio;
int normal_prio;
unsigned int rt_priority;
+ struct callback_head *sched_work;
+
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index bd9a6a91c097..e0c56f461df6 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -17,9 +17,14 @@ int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
void task_work_run(void);
+int sched_work_add(struct task_struct *task, struct callback_head *work);
+struct callback_head *sched_work_cancel(struct task_struct *, task_work_func_t);
+void sched_work_run(void);
+
static inline void exit_task_work(struct task_struct *task)
{
task_work_run();
+ sched_work_run();
}
#endif /* _LINUX_TASK_WORK_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c7bab13f9caa..9e0f754e0630 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2678,6 +2678,7 @@ int wake_up_state(struct task_struct *p, unsigned int state)
static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
{
p->on_rq = 0;
+ p->sched_work = NULL;
p->se.on_rq = 0;
p->se.exec_start = 0;
@@ -4102,8 +4103,13 @@ void __noreturn do_task_dead(void)
cpu_relax();
}
-static void sched_out_update(struct task_struct *tsk)
+static bool sched_out_update(struct task_struct *tsk)
{
+ if (unlikely(tsk->sched_work)) {
+ sched_work_run();
+ return true;
+ }
+
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
@@ -4119,6 +4125,8 @@ static void sched_out_update(struct task_struct *tsk)
io_wq_worker_sleeping(tsk);
preempt_enable_no_resched();
}
+
+ return false;
}
static void sched_in_update(struct task_struct *tsk)
@@ -4129,6 +4137,8 @@ static void sched_in_update(struct task_struct *tsk)
else
io_wq_worker_running(tsk);
}
+ if (unlikely(tsk->sched_work))
+ sched_work_run();
}
static inline void sched_submit_work(struct task_struct *tsk)
@@ -4136,7 +4146,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
if (!tsk->state)
return;
- sched_out_update(tsk);
+ /* if we processed work, we could be runnable again. check. */
+ if (sched_out_update(tsk) && !tsk->state)
+ return;
if (tsk_is_pi_blocked(tsk))
return;
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 3445421266e7..ba62485d5b3d 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,7 +3,14 @@
#include <linux/task_work.h>
#include <linux/tracehook.h>
-static struct callback_head work_exited; /* all we need is ->next == NULL */
+static void task_exit_func(struct callback_head *head)
+{
+}
+
+static struct callback_head work_exited = {
+ .next = NULL,
+ .func = task_exit_func,
+};
static int __task_work_add(struct task_struct *task,
struct callback_head **headptr,
@@ -53,6 +60,28 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
return ret;
}
+/**
+ * sched_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+ * @work: the callback to run
+ * @notify: send the notification if true
+ *
+ * Queue @work for sched_work_run() below.
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task is either scheduled
+ * in or out.
+ *
+ * Note: there is no ordering guarantee on works queued here.
+ *
+ * RETURNS:
+ * 0 if succeeds or -ESRCH.
+ */
+int
+sched_work_add(struct task_struct *task, struct callback_head *work)
+{
+ return __task_work_add(task, &task->sched_work, work);
+}
+
static struct callback_head *__task_work_cancel(struct task_struct *task,
struct callback_head **headptr,
task_work_func_t func)
@@ -98,10 +127,27 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
return __task_work_cancel(task, &task->task_works, func);
}
-static void __task_work_run(struct task_struct *task,
- struct callback_head **headptr)
+/**
+ * sched_work_cancel - cancel a pending work added by sched_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+sched_work_cancel(struct task_struct *task, task_work_func_t func)
+{
+ return __task_work_cancel(task, &task->sched_work, func);
+}
+
+static void __task_work_run(struct callback_head **headptr)
{
struct callback_head *work, *head, *next;
+ struct task_struct *task = current;
for (;;) {
/*
@@ -148,5 +194,13 @@ static void __task_work_run(struct task_struct *task,
*/
void task_work_run(void)
{
- __task_work_run(current, ¤t->task_works);
+ __task_work_run(¤t->task_works);
+}
+
+/**
+ * sched_work_run - execute the works added by sched_work_add()
+ */
+void sched_work_run()
+{
+ __task_work_run(¤t->sched_work);
}
--
2.25.1
next prev parent reply other threads:[~2020-02-20 20:32 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-20 20:31 [PATCHSET 0/9] io_uring: use polled async retry Jens Axboe
2020-02-20 20:31 ` [PATCH 1/9] io_uring: consider any io_read/write -EAGAIN as final Jens Axboe
2020-02-20 20:31 ` [PATCH 2/9] io_uring: io_accept() should hold on to submit reference on retry Jens Axboe
2020-02-20 20:31 ` [PATCH 3/9] sched: move io-wq/workqueue worker sched in/out into helpers Jens Axboe
2020-02-20 20:31 ` [PATCH 4/9] task_work_run: don't take ->pi_lock unconditionally Jens Axboe
2020-02-20 20:31 ` [PATCH 5/9] kernel: abstract out task work helpers Jens Axboe
2020-02-20 21:07 ` Peter Zijlstra
2020-02-20 21:08 ` Jens Axboe
2020-02-20 20:31 ` Jens Axboe [this message]
2020-02-20 21:17 ` [PATCH 6/9] sched: add a sched_work list Peter Zijlstra
2020-02-20 21:53 ` Jens Axboe
2020-02-20 22:02 ` Jens Axboe
2020-02-20 20:31 ` [PATCH 7/9] io_uring: add per-task callback handler Jens Axboe
2020-02-20 22:02 ` Jann Horn
2020-02-20 22:14 ` Jens Axboe
2020-02-20 22:18 ` Jens Axboe
2020-02-20 22:25 ` Jann Horn
2020-02-20 22:23 ` Jens Axboe
2020-02-20 22:38 ` Jann Horn
2020-02-20 22:56 ` Jens Axboe
2020-02-20 22:58 ` Jann Horn
2020-02-20 23:02 ` Jens Axboe
2020-02-20 22:23 ` Jann Horn
2020-02-20 23:00 ` Jens Axboe
2020-02-20 23:12 ` Jann Horn
2020-02-20 23:22 ` Jens Axboe
2020-02-21 1:29 ` Jann Horn
2020-02-21 17:32 ` Jens Axboe
2020-02-21 19:24 ` Jann Horn
2020-02-21 20:18 ` Jens Axboe
2020-02-20 22:56 ` Jann Horn
2020-02-21 10:47 ` Peter Zijlstra
2020-02-21 14:49 ` Jens Axboe
2020-02-21 15:02 ` Jann Horn
2020-02-21 16:12 ` Peter Zijlstra
2020-02-21 16:23 ` Peter Zijlstra
2020-02-21 20:13 ` Jens Axboe
2020-02-21 13:51 ` Pavel Begunkov
2020-02-21 14:50 ` Jens Axboe
2020-02-21 18:30 ` Pavel Begunkov
2020-02-21 19:10 ` Jens Axboe
2020-02-21 19:22 ` Pavel Begunkov
2020-02-23 6:00 ` Jens Axboe
2020-02-23 6:26 ` Jens Axboe
2020-02-23 11:02 ` Pavel Begunkov
2020-02-23 14:49 ` Jens Axboe
2020-02-23 14:58 ` Jens Axboe
2020-02-23 15:07 ` Jens Axboe
2020-02-23 18:04 ` Pavel Begunkov
2020-02-23 18:06 ` Jens Axboe
2020-02-23 17:55 ` Pavel Begunkov
2020-02-20 20:31 ` [PATCH 8/9] io_uring: mark requests that we can do poll async in io_op_defs Jens Axboe
2020-02-20 20:31 ` [PATCH 9/9] io_uring: use poll driven retry for files that support it Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
[email protected] \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox