2020-03-06 11:44:27 +00:00
|
|
|
From 2a9fed89a7bea6fbe31e717ab5f277405e20826e Mon Sep 17 00:00:00 2001
|
2019-01-11 14:52:15 +00:00
|
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Date: Mon, 7 Jan 2019 13:52:31 +0100
|
2020-04-09 19:44:24 +00:00
|
|
|
Subject: [PATCH 011/328] sched/fair: Robustify CFS-bandwidth timer locking
|
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.106-rt46.tar.xz
|
2019-01-11 14:52:15 +00:00
|
|
|
|
|
|
|
Traditionally hrtimer callbacks were run with IRQs disabled, but with
|
|
|
|
the introduction of HRTIMER_MODE_SOFT it is possible they run from
|
|
|
|
SoftIRQ context, which does _NOT_ have IRQs disabled.
|
|
|
|
|
|
|
|
Allow for the CFS bandwidth timers (period_timer and slack_timer) to
|
|
|
|
be ran from SoftIRQ context; this entails removing the assumption that
|
|
|
|
IRQs are already disabled from the locking.
|
|
|
|
|
|
|
|
While mainline doesn't strictly need this, -RT forces all timers not
|
|
|
|
explicitly marked with MODE_HARD into MODE_SOFT and trips over this.
|
|
|
|
And marking these timers as MODE_HARD doesn't make sense as they're
|
|
|
|
not required for RT operation and can potentially be quite expensive.
|
|
|
|
|
|
|
|
Cc: Ingo Molnar <mingo@redhat.com>
|
|
|
|
Cc: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
Reported-by: Tom Putzeys <tom.putzeys@be.atlascopco.com>
|
|
|
|
Tested-by: Mike Galbraith <efault@gmx.de>
|
|
|
|
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
|
|
|
|
Link: https://lkml.kernel.org/r/20190107125231.GE14122@hirez.programming.kicks-ass.net
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
kernel/sched/fair.c | 30 ++++++++++++++++--------------
|
2019-01-11 14:52:15 +00:00
|
|
|
1 file changed, 16 insertions(+), 14 deletions(-)
|
|
|
|
|
2020-01-03 23:36:11 +00:00
|
|
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
2020-02-21 18:07:43 +00:00
|
|
|
index 7f4f4ab5bfef..0f1ba3d72336 100644
|
2019-01-11 14:52:15 +00:00
|
|
|
--- a/kernel/sched/fair.c
|
|
|
|
+++ b/kernel/sched/fair.c
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4576,7 +4576,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
|
2019-01-11 14:52:15 +00:00
|
|
|
struct rq *rq = rq_of(cfs_rq);
|
|
|
|
struct rq_flags rf;
|
|
|
|
|
|
|
|
- rq_lock(rq, &rf);
|
|
|
|
+ rq_lock_irqsave(rq, &rf);
|
|
|
|
if (!cfs_rq_throttled(cfs_rq))
|
|
|
|
goto next;
|
|
|
|
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4595,7 +4595,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
|
2019-01-11 14:52:15 +00:00
|
|
|
unthrottle_cfs_rq(cfs_rq);
|
|
|
|
|
|
|
|
next:
|
|
|
|
- rq_unlock(rq, &rf);
|
|
|
|
+ rq_unlock_irqrestore(rq, &rf);
|
|
|
|
|
|
|
|
if (!remaining)
|
|
|
|
break;
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4611,7 +4611,7 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
|
2019-01-11 14:52:15 +00:00
|
|
|
* period the timer is deactivated until scheduling resumes; cfs_b->idle is
|
|
|
|
* used to track this state.
|
|
|
|
*/
|
|
|
|
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
|
|
|
|
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
|
|
|
|
{
|
2019-12-01 08:34:48 +00:00
|
|
|
u64 runtime;
|
2019-01-11 14:52:15 +00:00
|
|
|
int throttled;
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4651,10 +4651,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
|
2019-01-11 14:52:15 +00:00
|
|
|
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
|
|
|
|
runtime = cfs_b->runtime;
|
|
|
|
cfs_b->distribute_running = 1;
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
/* we can't nest cfs_b->lock while distributing bandwidth */
|
2019-12-01 08:34:48 +00:00
|
|
|
runtime = distribute_cfs_runtime(cfs_b, runtime);
|
2019-01-11 14:52:15 +00:00
|
|
|
- raw_spin_lock(&cfs_b->lock);
|
|
|
|
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
|
|
|
|
|
|
|
|
cfs_b->distribute_running = 0;
|
|
|
|
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4762,16 +4762,17 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
2019-01-11 14:52:15 +00:00
|
|
|
static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
|
|
|
|
{
|
|
|
|
u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
/* confirm we're still not at a refresh boundary */
|
|
|
|
- raw_spin_lock(&cfs_b->lock);
|
|
|
|
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
|
|
|
|
if (cfs_b->distribute_running) {
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4781,17 +4782,17 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
|
2019-01-11 14:52:15 +00:00
|
|
|
if (runtime)
|
|
|
|
cfs_b->distribute_running = 1;
|
|
|
|
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
|
|
|
|
if (!runtime)
|
|
|
|
return;
|
|
|
|
|
2019-12-01 08:34:48 +00:00
|
|
|
runtime = distribute_cfs_runtime(cfs_b, runtime);
|
2019-01-11 14:52:15 +00:00
|
|
|
|
|
|
|
- raw_spin_lock(&cfs_b->lock);
|
|
|
|
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
|
2019-12-01 08:34:48 +00:00
|
|
|
cfs_b->runtime -= min(runtime, cfs_b->runtime);
|
2019-01-11 14:52:15 +00:00
|
|
|
cfs_b->distribute_running = 0;
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4871,11 +4872,12 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
2019-01-11 14:52:15 +00:00
|
|
|
{
|
|
|
|
struct cfs_bandwidth *cfs_b =
|
|
|
|
container_of(timer, struct cfs_bandwidth, period_timer);
|
|
|
|
+ unsigned long flags;
|
|
|
|
int overrun;
|
|
|
|
int idle = 0;
|
2019-04-28 17:55:53 +00:00
|
|
|
int count = 0;
|
2019-01-11 14:52:15 +00:00
|
|
|
|
|
|
|
- raw_spin_lock(&cfs_b->lock);
|
|
|
|
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
|
|
|
|
for (;;) {
|
|
|
|
overrun = hrtimer_forward_now(timer, cfs_b->period);
|
|
|
|
if (!overrun)
|
2020-02-21 18:07:43 +00:00
|
|
|
@@ -4911,11 +4913,11 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
2019-04-28 17:55:53 +00:00
|
|
|
count = 0;
|
|
|
|
}
|
2019-01-11 14:52:15 +00:00
|
|
|
|
|
|
|
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
|
|
|
|
+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
|
|
|
|
}
|
|
|
|
if (idle)
|
|
|
|
cfs_b->period_active = 0;
|
|
|
|
- raw_spin_unlock(&cfs_b->lock);
|
|
|
|
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
|
|
|
|
|
|
|
|
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
|
|
|
|
}
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-04-09 19:44:24 +00:00
|
|
|
2.25.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|