[rt] Update to 3.8.11-rt8

svn path=/dists/sid/linux/; revision=20060
This commit is contained in:
Ben Hutchings 2013-05-09 00:22:02 +00:00
parent 8d67495dbc
commit 82a0dd1f08
22 changed files with 113 additions and 39 deletions

4
debian/changelog vendored
View File

@ -46,6 +46,10 @@ linux (3.8.12-1) UNRELEASED; urgency=low
- [powerpc] Add nic-shared-modules as common dependency of nic-modules
and nic-extra-modules
- Unify module list for nic-shared-modules across architectures [!m68k]
* [rt] Update to 3.8.11-rt8:
- time/timekeeping: shadow tk->cycle_last together with clock->cycle_last
- sched/workqueue: Only wake up idle workers if not blocked on sleeping
spin lock
-- Ben Hutchings <ben@decadent.org.uk> Mon, 06 May 2013 03:59:09 +0100

View File

@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3489,10 +3489,10 @@ void complete(struct completion *x)
@@ -3491,10 +3491,10 @@ void complete(struct completion *x)
{
unsigned long flags;
@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete);
@@ -3509,10 +3509,10 @@ void complete_all(struct completion *x)
@@ -3511,10 +3511,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete_all);
@@ -3520,20 +3520,20 @@ static inline long __sched
@@ -3522,20 +3522,20 @@ static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state)
{
if (!x->done) {
@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!x->done)
return timeout;
}
@@ -3546,9 +3546,9 @@ wait_for_common(struct completion *x, lo
@@ -3548,9 +3548,9 @@ wait_for_common(struct completion *x, lo
{
might_sleep();
@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return timeout;
}
@@ -3679,12 +3679,12 @@ bool try_wait_for_completion(struct comp
@@ -3681,12 +3681,12 @@ bool try_wait_for_completion(struct comp
unsigned long flags;
int ret = 1;
@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -3702,10 +3702,10 @@ bool completion_done(struct completion *
@@ -3704,10 +3704,10 @@ bool completion_done(struct completion *
unsigned long flags;
int ret = 1;

View File

@ -461,7 +461,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -4874,6 +4874,84 @@ void do_set_cpus_allowed(struct task_str
@@ -4876,6 +4876,84 @@ void do_set_cpus_allowed(struct task_str
cpumask_copy(&p->cpus_allowed, new_mask);
}

View File

@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4943,6 +4943,7 @@ void __cpuinit init_idle(struct task_str
@@ -4945,6 +4945,7 @@ void __cpuinit init_idle(struct task_str
rcu_read_unlock();
rq->curr = rq->idle = idle;

View File

@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
+-rt6
+-rt8

View File

@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7393,7 +7393,8 @@ void __might_sleep(const char *file, int
@@ -7395,7 +7395,8 @@ void __might_sleep(const char *file, int
static unsigned long prev_jiffy; /* ratelimiting */
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */

View File

@ -116,7 +116,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
* Callback to arch code if there's nosmp or maxcpus=0 on the
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4736,7 +4736,7 @@ void __cpuinit init_idle(struct task_str
@@ -4738,7 +4738,7 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@ -125,7 +125,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
@@ -4792,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
@@ -4794,7 +4794,7 @@ int set_cpus_allowed_ptr(struct task_str
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@ -134,7 +134,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4811,6 +4811,7 @@ out:
@@ -4813,6 +4813,7 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
@ -142,7 +142,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
void migrate_disable(void)
{
struct task_struct *p = current;
@@ -4903,6 +4904,7 @@ void migrate_enable(void)
@@ -4905,6 +4906,7 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);

View File

@ -13,7 +13,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4831,7 +4831,19 @@ void migrate_disable(void)
@@ -4833,7 +4833,19 @@ void migrate_disable(void)
preempt_enable();
return;
}
@ -34,7 +34,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
p->migrate_disable = 1;
mask = tsk_cpus_allowed(p);
@@ -4842,7 +4854,7 @@ void migrate_disable(void)
@@ -4844,7 +4856,7 @@ void migrate_disable(void)
p->sched_class->set_cpus_allowed(p, mask);
p->nr_cpus_allowed = cpumask_weight(mask);
}
@ -43,7 +43,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
preempt_enable();
}
EXPORT_SYMBOL(migrate_disable);
@@ -4870,7 +4882,11 @@ void migrate_enable(void)
@@ -4872,7 +4884,11 @@ void migrate_enable(void)
return;
}
@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
p->migrate_disable = 0;
mask = tsk_cpus_allowed(p);
@@ -4882,7 +4898,7 @@ void migrate_enable(void)
@@ -4884,7 +4900,7 @@ void migrate_enable(void)
p->nr_cpus_allowed = cpumask_weight(mask);
}

View File

@ -310,7 +310,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(migrate_enable);
#else
@@ -3117,6 +3164,7 @@ need_resched:
@@ -3119,6 +3166,7 @@ need_resched:
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rq->skip_clock_update = 0;
if (likely(prev != next)) {
@@ -3253,6 +3301,14 @@ asmlinkage void __sched notrace preempt_
@@ -3255,6 +3303,14 @@ asmlinkage void __sched notrace preempt_
if (likely(ti->preempt_count || irqs_disabled()))
return;
@ -333,7 +333,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
/*
@@ -4864,7 +4920,9 @@ void __cpuinit init_idle(struct task_str
@@ -4866,7 +4922,9 @@ void __cpuinit init_idle(struct task_str
/* Set the preempt count _outside_ the spinlocks! */
task_thread_info(idle)->preempt_count = 0;

View File

@ -169,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -4736,7 +4867,7 @@ void __cpuinit init_idle(struct task_str
@@ -4738,7 +4869,7 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (p->sched_class && p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
@@ -4811,124 +4942,6 @@ out:
@@ -4813,124 +4944,6 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

View File

@ -14,7 +14,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4119,10 +4119,13 @@ recheck:
@@ -4121,10 +4121,13 @@ recheck:
}
/*

View File

@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
dump_stack();
add_taint(TAINT_WARN);
}
@@ -7310,6 +7322,13 @@ void __might_sleep(const char *file, int
@@ -7312,6 +7324,13 @@ void __might_sleep(const char *file, int
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);

View File

@ -67,7 +67,7 @@ Cc: stable-rt@vger.kernel.org
* This can be both boosting and unboosting. task->pi_lock must be held.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3766,7 +3766,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
@@ -3768,7 +3768,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
@ -77,7 +77,7 @@ Cc: stable-rt@vger.kernel.org
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
@@ -3989,20 +3990,25 @@ static struct task_struct *find_process_
@@ -3991,20 +3992,25 @@ static struct task_struct *find_process_
return pid ? find_task_by_vpid(pid) : current;
}
@ -107,7 +107,7 @@ Cc: stable-rt@vger.kernel.org
}
/*
@@ -4024,6 +4030,7 @@ static bool check_same_owner(struct task
@@ -4026,6 +4032,7 @@ static bool check_same_owner(struct task
static int __sched_setscheduler(struct task_struct *p, int policy,
const struct sched_param *param, bool user)
{
@ -115,7 +115,7 @@ Cc: stable-rt@vger.kernel.org
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
@@ -4151,6 +4158,25 @@ recheck:
@@ -4153,6 +4160,25 @@ recheck:
task_rq_unlock(rq, p, &flags);
goto recheck;
}
@ -141,7 +141,7 @@ Cc: stable-rt@vger.kernel.org
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -4158,9 +4184,6 @@ recheck:
@@ -4160,9 +4186,6 @@ recheck:
if (running)
p->sched_class->put_prev_task(rq, p);
@ -151,7 +151,7 @@ Cc: stable-rt@vger.kernel.org
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
@@ -4173,7 +4196,6 @@ recheck:
@@ -4175,7 +4198,6 @@ recheck:
*/
enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
}

View File

@ -50,7 +50,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4166,8 +4166,13 @@ recheck:
@@ -4168,8 +4168,13 @@ recheck:
if (running)
p->sched_class->set_curr_task(rq);

View File

@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4736,11 +4736,12 @@ void __cpuinit init_idle(struct task_str
@@ -4738,11 +4738,12 @@ void __cpuinit init_idle(struct task_str
#ifdef CONFIG_SMP
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
@@ -4791,7 +4792,7 @@ int set_cpus_allowed_ptr(struct task_str
@@ -4793,7 +4794,7 @@ int set_cpus_allowed_ptr(struct task_str
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -4810,6 +4811,83 @@ out:
@@ -4812,6 +4813,83 @@ out:
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

View File

@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4888,12 +4888,14 @@ void migrate_enable(void)
@@ -4890,12 +4890,14 @@ void migrate_enable(void)
*/
rq = this_rq();
raw_spin_lock_irqsave(&rq->lock, flags);

View File

@ -51,7 +51,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
cpumask_t cpus_allowed;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4819,6 +4819,17 @@ void migrate_disable(void)
@@ -4821,6 +4821,17 @@ void migrate_disable(void)
unsigned long flags;
struct rq *rq;
@ -69,7 +69,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
preempt_disable();
if (p->migrate_disable) {
p->migrate_disable++;
@@ -4867,6 +4878,16 @@ void migrate_enable(void)
@@ -4869,6 +4880,16 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;

View File

@ -0,0 +1,39 @@
From b24ee416f22bd2a2325b8f6afa5a4065dd3560e9 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 18 Mar 2013 15:12:49 -0400
Subject: [PATCH] sched/workqueue: Only wake up idle workers if not blocked on
sleeping spin lock
In -rt, most spin_locks() turn into mutexes. One of these spin_lock
conversions is performed on the workqueue gcwq->lock. When the idle
worker is worken, the first thing it will do is grab that same lock and
it too will block, possibly jumping into the same code, but because
nr_running would already be decremented it prevents an infinite loop.
But this is still a waste of CPU cycles, and it doesn't follow the method
of mainline, as new workers should only be woken when a worker thread is
truly going to sleep, and not just blocked on a spin_lock().
Check the saved_state too before waking up new workers.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/sched/core.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2955,8 +2955,10 @@ need_resched:
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
+ * Only call wake up if prev isn't blocked on a sleeping
+ * spin lock.
*/
- if (prev->flags & PF_WQ_WORKER) {
+ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);

View File

@ -73,6 +73,7 @@ timekeeping-make-jiffies-lock-internal.patch
timekeeping-move-lock-out-of-timekeeper.patch
timekeeping-split-timekeeper-lock.patch
timekeeping-store-cycle-last-in-timekeeper.patch
time-timekeeping-shadow-tk-cycle_last-together-with-.patch
timekeeping-delay-clock-cycle-last-update.patch
timekeeping-implement-shadow-timekeeper.patch
timekeeping-shorten-seq-count-region.patch
@ -341,6 +342,7 @@ cond-resched-lock-rt-tweak.patch
sched-disable-ttwu-queue.patch
sched-disable-rt-group-sched-on-rt.patch
sched-ttwu-ensure-success-return-is-correct.patch
sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
# STOP MACHINE
stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch

View File

@ -0,0 +1,27 @@
From c27eb2e0ab0b5acd96a4b62288976f1b72789b3e Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 30 Apr 2013 18:53:55 +0200
Subject: [PATCH] time/timekeeping: shadow tk->cycle_last together with
clock->cycle_last
Commit ("timekeeping: Store cycle_last value in timekeeper struct as
well") introduced a tk-> based cycle_last values which needs to be reset
on resume path as well or else ktime_get() will think that time
increased.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/time/timekeeping.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -770,7 +770,7 @@ static void timekeeping_resume(void)
__timekeeping_inject_sleeptime(tk, &ts);
}
/* re-base the last cycle value */
- tk->clock->cycle_last = tk->clock->read(tk->clock);
+ tk->cycle_last = tk->clock->cycle_last = tk->clock->read(tk->clock);
tk->ntp_error = 0;
timekeeping_suspended = 0;
timekeeping_update(tk, false);

View File

@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
@@ -773,7 +779,7 @@ static void timekeeping_resume(void)
tk->clock->cycle_last = tk->clock->read(tk->clock);
tk->cycle_last = tk->clock->cycle_last = tk->clock->read(tk->clock);
tk->ntp_error = 0;
timekeeping_suspended = 0;
- timekeeping_update(tk, false);

View File

@ -73,6 +73,7 @@ features/all/rt/timekeeping-make-jiffies-lock-internal.patch
features/all/rt/timekeeping-move-lock-out-of-timekeeper.patch
features/all/rt/timekeeping-split-timekeeper-lock.patch
features/all/rt/timekeeping-store-cycle-last-in-timekeeper.patch
features/all/rt/time-timekeeping-shadow-tk-cycle_last-together-with-.patch
features/all/rt/timekeeping-delay-clock-cycle-last-update.patch
features/all/rt/timekeeping-implement-shadow-timekeeper.patch
features/all/rt/timekeeping-shorten-seq-count-region.patch
@ -341,6 +342,7 @@ features/all/rt/cond-resched-lock-rt-tweak.patch
features/all/rt/sched-disable-ttwu-queue.patch
features/all/rt/sched-disable-rt-group-sched-on-rt.patch
features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch
features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
# STOP MACHINE
features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch