2020-08-28 04:53:35 +00:00
|
|
|
From 100c0ec5ffe14f7b9c0afb47d8cb6c08f249466c Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <100c0ec5ffe14f7b9c0afb47d8cb6c08f249466c.1596234183.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <378ee68279f6a7631221f2670a9298620148690d.1596234183.git.zanussi@kernel.org>
|
|
|
|
References: <378ee68279f6a7631221f2670a9298620148690d.1596234183.git.zanussi@kernel.org>
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Sat, 25 Jun 2011 09:21:04 +0200
|
2020-07-15 20:05:29 +00:00
|
|
|
Subject: [PATCH 109/329] sched: Add saved_state for tasks blocked on sleeping
|
2019-04-08 23:49:20 +00:00
|
|
|
locks
|
2020-08-28 04:53:35 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.135-rt60.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
Spinlocks are state preserving in !RT. RT changes the state when a
|
|
|
|
task gets blocked on a lock. So we need to remember the state before
|
|
|
|
the lock contention. If a regular wakeup (not a RTmutex related
|
|
|
|
wakeup) happens, the saved_state is updated to running. When the lock
|
|
|
|
sleep is done, the saved state is restored.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
include/linux/sched.h | 3 +++
|
|
|
|
kernel/sched/core.c | 33 ++++++++++++++++++++++++++++++++-
|
|
|
|
kernel/sched/sched.h | 1 +
|
2018-08-27 14:32:32 +00:00
|
|
|
3 files changed, 36 insertions(+), 1 deletion(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
2020-06-22 13:14:16 +00:00
|
|
|
index 9a0e01e48602..42846a65df9a 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/include/linux/sched.h
|
|
|
|
+++ b/include/linux/sched.h
|
|
|
|
@@ -600,6 +600,8 @@ struct task_struct {
|
|
|
|
#endif
|
|
|
|
/* -1 unrunnable, 0 runnable, >0 stopped: */
|
|
|
|
volatile long state;
|
|
|
|
+ /* saved state for "spinlock sleepers" */
|
|
|
|
+ volatile long saved_state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This begins the randomizable portion of task_struct. Only
|
2019-11-25 00:04:39 +00:00
|
|
|
@@ -1621,6 +1623,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
|
|
|
|
extern int wake_up_process(struct task_struct *tsk);
|
|
|
|
+extern int wake_up_lock_sleeper(struct task_struct *tsk);
|
|
|
|
extern void wake_up_new_task(struct task_struct *tsk);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
2020-08-28 04:53:35 +00:00
|
|
|
index 3600139cb690..53d9f8927619 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/sched/core.c
|
|
|
|
+++ b/kernel/sched/core.c
|
2019-04-30 12:45:19 +00:00
|
|
|
@@ -1999,8 +1999,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
2018-08-27 14:32:32 +00:00
|
|
|
*/
|
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
|
|
smp_mb__after_spinlock();
|
|
|
|
- if (!(p->state & state))
|
|
|
|
+ if (!(p->state & state)) {
|
|
|
|
+ /*
|
|
|
|
+ * The task might be running due to a spinlock sleeper
|
|
|
|
+ * wakeup. Check the saved state and set it to running
|
|
|
|
+ * if the wakeup condition is true.
|
|
|
|
+ */
|
|
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
|
|
|
|
+ if (p->saved_state & state) {
|
|
|
|
+ p->saved_state = TASK_RUNNING;
|
|
|
|
+ success = 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If this is a regular wakeup, then we can unconditionally
|
|
|
|
+ * clear the saved state of a "lock sleeper".
|
|
|
|
+ */
|
|
|
|
+ if (!(wake_flags & WF_LOCK_SLEEPER))
|
|
|
|
+ p->saved_state = TASK_RUNNING;
|
|
|
|
|
|
|
|
trace_sched_waking(p);
|
|
|
|
|
2019-04-30 12:45:19 +00:00
|
|
|
@@ -2164,6 +2183,18 @@ int wake_up_process(struct task_struct *p)
|
2018-08-27 14:32:32 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(wake_up_process);
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
|
|
|
|
+ * @p: The process to be woken up.
|
|
|
|
+ *
|
|
|
|
+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
|
|
|
|
+ * the nature of the wakeup.
|
|
|
|
+ */
|
|
|
|
+int wake_up_lock_sleeper(struct task_struct *p)
|
|
|
|
+{
|
|
|
|
+ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
int wake_up_state(struct task_struct *p, unsigned int state)
|
|
|
|
{
|
|
|
|
return try_to_wake_up(p, state, 0);
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
2020-06-22 13:14:16 +00:00
|
|
|
index 5f0eb4565957..96823c632599 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/sched/sched.h
|
|
|
|
+++ b/kernel/sched/sched.h
|
2020-06-22 13:14:16 +00:00
|
|
|
@@ -1445,6 +1445,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
|
2018-08-27 14:32:32 +00:00
|
|
|
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
|
|
|
|
#define WF_FORK 0x02 /* Child wakeup after fork */
|
|
|
|
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
|
|
|
|
+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To aid in avoiding the subversion of "niceness" due to uneven distribution
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|