[rt] reenable rt featureset

svn path=/dists/trunk/linux/; revision=19458
This commit is contained in:
Uwe Kleine-König 2012-10-24 14:29:13 +00:00
parent b5aba14e0d
commit 6baf5be73e
295 changed files with 8534 additions and 8104 deletions

3
debian/changelog vendored
View File

@ -9,6 +9,9 @@ linux (3.6.3-1~experimental.1) UNRELEASED; urgency=low
[ Ben Hutchings ]
* aufs: Update to aufs3.x-rcN-20120827
[ Uwe Kleine-König ]
* reenable the rt featureset using 3.6.3-rt6
-- Bastian Blank <waldi@debian.org> Thu, 04 Oct 2012 17:50:39 +0200
linux (3.5.5-1~experimental.1) experimental; urgency=low

View File

@ -1,7 +1,7 @@
[base]
featuresets:
none
# rt
rt
kernel-arch: x86
[build]

View File

@ -24,7 +24,7 @@ arches:
compiler: gcc-4.6
featuresets:
none
# rt
rt
[featureset-rt_base]
enabled: true

View File

@ -1,7 +1,7 @@
[base]
featuresets:
none
# rt
rt
kernel-arch: x86
[description]

View File

@ -1,35 +0,0 @@
From c9cf7a34892f415c473689ecd7cb82815481e39c Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 7 Jun 2012 09:49:40 -0400
Subject: [001/256] Revert "workqueue: skip nr_running sanity check in
worker_enter_idle() if trustee is active"
This reverts commit 24312d34c95702e51240f58c073db30630170fbf.
Reported-by: Ibrahim Umar <iambaim@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/workqueue.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7da267c..5abf42f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1210,13 +1210,8 @@ static void worker_enter_idle(struct worker *worker)
} else
wake_up_all(&gcwq->trustee_wait);
- /*
- * Sanity check nr_running. Because trustee releases gcwq->lock
- * between setting %WORKER_ROGUE and zapping nr_running, the
- * warning may trigger spuriously. Check iff trustee is idle.
- */
- WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
- gcwq->nr_workers == gcwq->nr_idle &&
+ /* sanity check nr_running */
+ WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
atomic_read(get_gcwq_nr_running(gcwq->cpu)));
}

View File

@ -1,134 +0,0 @@
From 82753a6da41f383c6649d5e7de36db554dea7b12 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Mon, 28 Nov 2011 19:51:51 +0100
Subject: [002/256] slab, lockdep: Annotate all slab caches
Currently we only annotate the kmalloc caches, annotate all of them.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hans Schillstrom <hans@schillstrom.com>
Cc: Christoph Lameter <cl@gentwo.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Sitsofe Wheeler <sitsofe@yahoo.com>
Cc: linux-mm@kvack.org
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-10bey2cgpcvtbdkgigaoab8w@git.kernel.org
---
mm/slab.c | 52 ++++++++++++++++++++++++++++------------------------
1 file changed, 28 insertions(+), 24 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index e901a36..1fd4b4d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -611,6 +611,12 @@ int slab_is_available(void)
return g_cpucache_up >= EARLY;
}
+/*
+ * Guard access to the cache-chain.
+ */
+static DEFINE_MUTEX(cache_chain_mutex);
+static struct list_head cache_chain;
+
#ifdef CONFIG_LOCKDEP
/*
@@ -672,38 +678,41 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
slab_set_debugobj_lock_classes_node(cachep, node);
}
-static void init_node_lock_keys(int q)
+static void init_lock_keys(struct kmem_cache *cachep, int node)
{
- struct cache_sizes *s = malloc_sizes;
+ struct kmem_list3 *l3;
if (g_cpucache_up < LATE)
return;
- for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
- struct kmem_list3 *l3;
+ l3 = cachep->nodelists[node];
+ if (!l3 || OFF_SLAB(cachep))
+ return;
- l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
+ slab_set_lock_classes(cachep, &on_slab_l3_key, &on_slab_alc_key, node);
+}
- slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
- &on_slab_alc_key, q);
- }
+static void init_node_lock_keys(int node)
+{
+ struct kmem_cache *cachep;
+
+ list_for_each_entry(cachep, &cache_chain, next)
+ init_lock_keys(cachep, node);
}
-static inline void init_lock_keys(void)
+static inline void init_cachep_lock_keys(struct kmem_cache *cachep)
{
int node;
for_each_node(node)
- init_node_lock_keys(node);
+ init_lock_keys(cachep, node);
}
#else
-static void init_node_lock_keys(int q)
+static void init_node_lock_keys(int node)
{
}
-static inline void init_lock_keys(void)
+static void init_cachep_lock_keys(struct kmem_cache *cachep)
{
}
@@ -716,12 +725,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
}
#endif
-/*
- * Guard access to the cache-chain.
- */
-static DEFINE_MUTEX(cache_chain_mutex);
-static struct list_head cache_chain;
-
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1685,14 +1688,13 @@ void __init kmem_cache_init_late(void)
g_cpucache_up = LATE;
- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
+ list_for_each_entry(cachep, &cache_chain, next) {
+ init_cachep_lock_keys(cachep);
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
+ }
mutex_unlock(&cache_chain_mutex);
/* Done! */
@@ -2544,6 +2546,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
slab_set_debugobj_lock_classes(cachep);
}
+ init_cachep_lock_keys(cachep);
+
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
oops:

View File

@ -1,103 +0,0 @@
From fd87afd117d048ba4d78f4b910901074408a799a Mon Sep 17 00:00:00 2001
From: Mike Galbraith <mgalbraith@suse.de>
Date: Tue, 8 May 2012 12:20:58 +0200
Subject: [004/256] tick: Add tick skew boot option
Let the user decide whether power consumption or jitter is the
more important consideration for their machines.
Quoting removal commit af5ab277ded04bd9bc6b048c5a2f0e7d70ef0867:
"Historically, Linux has tried to make the regular timer tick on the
various CPUs not happen at the same time, to avoid contention on
xtime_lock.
Nowadays, with the tickless kernel, this contention no longer happens
since time keeping and updating are done differently. In addition,
this skew is actually hurting power consumption in a measurable way on
many-core systems."
Problems:
- Contrary to the above, systems do encounter contention on both
xtime_lock and RCU structure locks when the tick is synchronized.
- Moderate sized RT systems suffer intolerable jitter due to the tick
being synchronized.
- SGI reports the same for their large systems.
- Fully utilized systems reap no power saving benefit from skew removal,
but do suffer from resulting induced lock contention.
- 0209f649 rcu: limit rcu_node leaf-level fanout
This patch was born to combat lock contention which testing showed
to have been _induced by_ skew removal. Skew the tick, contention
disappeared virtually completely.
[ tglx: build fix ]
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Link: http://lkml.kernel.org/r/1336472458.21924.78.camel@marge.simpson.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
Documentation/kernel-parameters.txt | 9 +++++++++
kernel/time/tick-sched.c | 18 ++++++++++++++++++
2 files changed, 27 insertions(+)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1601e5..1e0150e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2426,6 +2426,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
sched_debug [KNL] Enables verbose scheduler debug messages.
+ skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
+ xtime_lock contention on larger systems, and/or RCU lock
+ contention on all systems with CONFIG_MAXSMP set.
+ Format: { "0" | "1" }
+ 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
+ 1 -- enable.
+ Note: increases power consumption, thus should only be
+ enabled if running jitter sensitive (HPC/RT) workloads.
+
security= [SECURITY] Choose a security module to enable at boot.
If this boot parameter is not specified, only the first
security module asking for security registration will be
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a3a5b9..efd3866 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -814,6 +814,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
return HRTIMER_RESTART;
}
+static int sched_skew_tick;
+
+static int __init skew_tick(char *str)
+{
+ get_option(&str, &sched_skew_tick);
+
+ return 0;
+}
+early_param("skew_tick", skew_tick);
+
/**
* tick_setup_sched_timer - setup the tick emulation timer
*/
@@ -831,6 +841,14 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
+ /* Offset the tick to avert xtime_lock contention. */
+ if (sched_skew_tick) {
+ u64 offset = ktime_to_ns(tick_period) >> 1;
+ do_div(offset, num_possible_cpus());
+ offset *= smp_processor_id();
+ hrtimer_add_expires_ns(&ts->sched_timer, offset);
+ }
+
for (;;) {
hrtimer_forward(&ts->sched_timer, now, tick_period);
hrtimer_start_expires(&ts->sched_timer,

View File

@ -1,263 +0,0 @@
From 5d2ed4b009866a4c6fbb6f8cf371e56403ca75de Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Jun 2011 19:47:03 +0200
Subject: [007/256] sched: Distangle worker accounting from rq-%3Elock
The worker accounting for cpu bound workers is plugged into the core
scheduler code and the wakeup code. This is not a hard requirement and
can be avoided by keeping track of the state in the workqueue code
itself.
Keep track of the sleeping state in the worker itself and call the
notifier before entering the core scheduler. There might be false
positives when the task is woken between that call and actually
scheduling, but that's not really different from scheduling and being
woken immediately after switching away. There is also no harm from
updating nr_running when the task returns from scheduling instead of
accounting it in the wakeup code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/sched/core.c | 66 +++++++++++----------------------------------
kernel/workqueue.c | 67 +++++++++++++++++++++-------------------------
kernel/workqueue_sched.h | 5 ++--
3 files changed, 47 insertions(+), 91 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2000e06..2db74b7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1399,10 +1399,6 @@ static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
{
activate_task(rq, p, en_flags);
p->on_rq = 1;
-
- /* if a worker is waking up, notify workqueue */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
}
/*
@@ -1642,40 +1638,6 @@ out:
}
/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p)
-{
- struct rq *rq = task_rq(p);
-
- BUG_ON(rq != this_rq());
- BUG_ON(p == current);
- lockdep_assert_held(&rq->lock);
-
- if (!raw_spin_trylock(&p->pi_lock)) {
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
- }
-
- if (!(p->state & TASK_NORMAL))
- goto out;
-
- if (!p->on_rq)
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
- ttwu_do_wakeup(rq, p, 0);
- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
-}
-
-/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
@@ -3200,19 +3162,6 @@ need_resched:
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
-
- /*
- * If a worker went to sleep, notify and ask workqueue
- * whether it wants to wake up a task to maintain
- * concurrency.
- */
- if (prev->flags & PF_WQ_WORKER) {
- struct task_struct *to_wakeup;
-
- to_wakeup = wq_worker_sleeping(prev, cpu);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
- }
}
switch_count = &prev->nvcsw;
}
@@ -3255,6 +3204,14 @@ static inline void sched_submit_work(struct task_struct *tsk)
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
+
+ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
+ */
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_sleeping(tsk);
+
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
@@ -3263,12 +3220,19 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
+static inline void sched_update_worker(struct task_struct *tsk)
+{
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+}
+
asmlinkage void __sched schedule(void)
{
struct task_struct *tsk = current;
sched_submit_work(tsk);
__schedule();
+ sched_update_worker(tsk);
}
EXPORT_SYMBOL(schedule);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5abf42f..50e0d00 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -137,6 +137,7 @@ struct worker {
unsigned int flags; /* X: flags */
int id; /* I: worker id */
struct work_struct rebind_work; /* L: rebind worker to cpu */
+ int sleeping; /* None */
};
/*
@@ -655,66 +656,58 @@ static void wake_up_worker(struct global_cwq *gcwq)
}
/**
- * wq_worker_waking_up - a worker is waking up
- * @task: task waking up
- * @cpu: CPU @task is waking up to
+ * wq_worker_running - a worker is running again
+ * @task: task returning from sleep
*
- * This function is called during try_to_wake_up() when a worker is
- * being awoken.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
+ * This function is called when a worker returns from schedule()
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
+void wq_worker_running(struct task_struct *task)
{
struct worker *worker = kthread_data(task);
+ if (!worker->sleeping)
+ return;
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(get_gcwq_nr_running(cpu));
+ atomic_inc(get_gcwq_nr_running(smp_processor_id()));
+ worker->sleeping = 0;
}
/**
* wq_worker_sleeping - a worker is going to sleep
* @task: task going to sleep
- * @cpu: CPU in question, must be the current CPU number
- *
- * This function is called during schedule() when a busy worker is
- * going to sleep. Worker on the same cpu can be woken up by
- * returning pointer to its task.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
*
- * RETURNS:
- * Worker task on @cpu to wake up, %NULL if none.
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu)
+void wq_worker_sleeping(struct task_struct *task)
{
- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
- struct global_cwq *gcwq = get_gcwq(cpu);
- atomic_t *nr_running = get_gcwq_nr_running(cpu);
+ struct worker *worker = kthread_data(task);
+ struct global_cwq *gcwq;
+ int cpu;
if (worker->flags & WORKER_NOT_RUNNING)
- return NULL;
+ return;
+
+ if (WARN_ON_ONCE(worker->sleeping))
+ return;
- /* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
+ worker->sleeping = 1;
+ cpu = smp_processor_id();
+ gcwq = get_gcwq(cpu);
+ spin_lock_irq(&gcwq->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
- *
- * NOT_RUNNING is clear. This means that trustee is not in
- * charge and we're running on the local cpu w/ rq lock held
- * and preemption disabled, which in turn means that none else
- * could be manipulating idle_list, so dereferencing idle_list
- * without gcwq lock is safe.
*/
- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
- to_wakeup = first_worker(gcwq);
- return to_wakeup ? to_wakeup->task : NULL;
+ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
+ !list_empty(&gcwq->worklist)) {
+ worker = first_worker(gcwq);
+ if (worker)
+ wake_up_process(worker->task);
+ }
+ spin_unlock_irq(&gcwq->lock);
}
/**
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h
index 2d10fc9..3bf73e2 100644
--- a/kernel/workqueue_sched.h
+++ b/kernel/workqueue_sched.h
@@ -4,6 +4,5 @@
* Scheduler hooks for concurrency managed workqueue. Only to be
* included from sched.c and workqueue.c.
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu);
+void wq_worker_running(struct task_struct *task);
+void wq_worker_sleeping(struct task_struct *task);

View File

@ -1,24 +0,0 @@
From c660f897922204ee48e592122be0f02cbca2e045 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 21:32:10 +0200
Subject: [008/256] mips-enable-interrupts-in-signal.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/mips/kernel/signal.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index d5a338a..ab4e20a 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -588,6 +588,9 @@ static void do_signal(struct pt_regs *regs)
if (!user_mode(regs))
return;
+ local_irq_enable();
+ preempt_check_resched();
+
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else

View File

@ -1,24 +0,0 @@
From 903dd2be6f21b3845bbf13d15454347d0a30f0dc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 16 Jul 2011 16:27:13 +0200
Subject: [009/256] arm-enable-interrupts-in-signal-code.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/kernel/signal.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index d68d1b6..13db45b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -617,6 +617,9 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (!user_mode(regs))
return;
+ local_irq_enable();
+ preempt_check_resched();
+
/*
* If we were from a system call, check for system call restarting...
*/

View File

@ -1,522 +0,0 @@
From 2404f6318f3cbc6b9a85c47edb27919cae54c952 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Mar 2012 15:14:06 +0100
Subject: [012/256] timekeeping: Split xtime_lock
xtime_lock is going to be split apart in mainline, so we can shorten
the seqcount protected regions and avoid updating seqcount in some
code pathes. This is a straight forward split, so we can avoid the
whole mess with raw seqlocks for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/jiffies.c | 4 +-
kernel/time/tick-common.c | 10 ++--
kernel/time/tick-internal.h | 3 +-
kernel/time/tick-sched.c | 16 +++---
kernel/time/timekeeping.c | 114 +++++++++++++++++++++++++------------------
5 files changed, 87 insertions(+), 60 deletions(-)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154..21940eb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ec..39de540 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b9..c91100d 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,5 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
+extern raw_spinlock_t xtime_lock;
+extern seqcount_t xtime_seq;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index efd3866..a607a7c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(ktime_t now)
return;
/* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}
/*
@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
return period;
}
@@ -316,11 +320,11 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqcount_begin(&xtime_seq);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqcount_retry(&xtime_seq, seq));
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d42574df..e8e95ee 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -70,8 +70,9 @@ struct timekeeper {
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
- /* Seqlock for all timekeeper values */
- seqlock_t lock;
+ /* Open coded seqlock for all timekeeper values */
+ seqcount_t seq;
+ raw_spinlock_t lock;
};
static struct timekeeper timekeeper;
@@ -80,7 +81,8 @@ static struct timekeeper timekeeper;
* This read-write spinlock protects us from races in SMP while
* playing with xtime.
*/
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
+seqcount_t xtime_seq;
/* flag for if timekeeping is suspended */
@@ -228,7 +230,7 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
nsecs = timekeeping_get_ns();
@@ -236,7 +238,7 @@ void getnstimeofday(struct timespec *ts)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -251,7 +253,7 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
secs = timekeeper.xtime.tv_sec +
timekeeper.wall_to_monotonic.tv_sec;
nsecs = timekeeper.xtime.tv_nsec +
@@ -260,7 +262,7 @@ ktime_t ktime_get(void)
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -286,14 +288,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -321,7 +323,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do {
u32 arch_offset;
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts_raw = timekeeper.raw_time;
*ts_real = timekeeper.xtime;
@@ -334,7 +336,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
nsecs_raw += arch_offset;
nsecs_real += arch_offset;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -373,7 +375,8 @@ int do_settimeofday(const struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -385,7 +388,8 @@ int do_settimeofday(const struct timespec *tv)
timekeeper.xtime = *tv;
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -409,7 +413,8 @@ int timekeeping_inject_offset(struct timespec *ts)
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -419,7 +424,8 @@ int timekeeping_inject_offset(struct timespec *ts)
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -440,7 +446,8 @@ static int change_clocksource(void *data)
new = (struct clocksource *) data;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) {
@@ -451,7 +458,8 @@ static int change_clocksource(void *data)
}
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
return 0;
}
@@ -498,11 +506,11 @@ void getrawmonotonic(struct timespec *ts)
s64 nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
nsecs = timekeeping_get_ns_raw();
*ts = timekeeper.raw_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
timespec_add_ns(ts, nsecs);
}
@@ -518,11 +526,11 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -535,11 +543,11 @@ u64 timekeeping_max_deferment(void)
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
ret = timekeeper.clock->max_idle_ns;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return ret;
}
@@ -586,11 +594,13 @@ void __init timekeeping_init(void)
read_persistent_clock(&now);
read_boot_clock(&boot);
- seqlock_init(&timekeeper.lock);
+ raw_spin_lock_init(&timekeeper.lock);
+ seqcount_init(&timekeeper.seq);
ntp_init();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
@@ -608,7 +618,8 @@ void __init timekeeping_init(void)
-boot.tv_sec, -boot.tv_nsec);
timekeeper.total_sleep_time.tv_sec = 0;
timekeeper.total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/* time in seconds when suspend began */
@@ -657,7 +668,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
@@ -665,7 +677,8 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
timekeeping_update(true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -688,7 +701,8 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -698,7 +712,8 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -716,7 +731,8 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -739,7 +755,8 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -998,7 +1015,8 @@ static void update_wall_time(void)
int shift = 0, maxshift;
unsigned long flags;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ raw_spin_lock_irqsave(&timekeeper.lock, flags);
+ write_seqcount_begin(&timekeeper.seq);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
@@ -1086,8 +1104,8 @@ static void update_wall_time(void)
timekeeping_update(false);
out:
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
-
+ write_seqcount_end(&timekeeper.seq);
+ raw_spin_unlock_irqrestore(&timekeeper.lock, flags);
}
/**
@@ -1133,13 +1151,13 @@ void get_monotonic_boottime(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*ts = timekeeper.xtime;
tomono = timekeeper.wall_to_monotonic;
sleep = timekeeper.total_sleep_time;
nsecs = timekeeping_get_ns();
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
@@ -1190,10 +1208,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return now;
}
@@ -1205,11 +1223,11 @@ struct timespec get_monotonic_coarse(void)
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
now = timekeeper.xtime;
mono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1241,11 +1259,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
*xtim = timekeeper.xtime;
*wtom = timekeeper.wall_to_monotonic;
*sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
}
/**
@@ -1257,9 +1275,9 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqcount_begin(&timekeeper.seq);
wtom = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqcount_retry(&timekeeper.seq, seq));
return timespec_to_ktime(wtom);
}
@@ -1274,7 +1292,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ raw_spin_lock(&xtime_lock);
+ write_seqcount_begin(&xtime_seq);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_seqcount_end(&xtime_seq);
+ raw_spin_unlock(&xtime_lock);
}

View File

@ -1,27 +0,0 @@
From 83f61475fc133df6805eb883f2598c255527acfd Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
Subject: [029/256] drivers/net: Use disable_irq_nosync() in 8139too
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/realtek/8139too.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d..7ce74f6 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2240,7 +2240,7 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
*/
static void rtl8139_poll_controller(struct net_device *dev)
{
- disable_irq(dev->irq);
+ disable_irq_nosync(dev->irq);
rtl8139_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}

View File

@ -1,29 +0,0 @@
From 6f4dcb0881ab4b7ed02d1a1f4b6936243821c950 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 21 Jul 2009 16:07:37 +0200
Subject: [080/256] genirq: Disable random call on preempt-rt
The random call introduces high latencies and is almost
unused. Disable it for -rt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/irq/handle.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index bdb1803..982793c 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -172,8 +172,11 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
+#ifndef CONFIG_PREEMPT_RT_FULL
+ /* FIXME: Can we unbreak that ? */
if (random & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
+#endif
if (!noirqdebug)
note_interrupt(irq, desc, retval);

View File

@ -1,53 +0,0 @@
From 0cf4a0f832a996e67a51ea51c9ae73ddcf60b76d Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 11 Oct 2011 23:56:23 -0400
Subject: [091/256] slab: Fix __do_drain to use the right array cache
The array cache in __do_drain() was using the cpu_cache_get() function
which uses smp_processor_id() to get the proper array. On mainline, this
is fine as __do_drain() is called by for_each_cpu() which runs
__do_drain() on the CPU it is processing. In RT locks are used instead
and __do_drain() is only called from a single CPU. This can cause the
accounting to be off and trigger the following bug:
slab error in kmem_cache_destroy(): cache `nfs_write_data': Can't free all objects
Pid: 2905, comm: rmmod Not tainted 3.0.6-test-rt17+ #78
Call Trace:
[<ffffffff810fb623>] kmem_cache_destroy+0xa0/0xdf
[<ffffffffa03aaffb>] nfs_destroy_writepagecache+0x49/0x4e [nfs]
[<ffffffffa03c0fe0>] exit_nfs_fs+0xe/0x46 [nfs]
[<ffffffff8107af09>] sys_delete_module+0x1ba/0x22c
[<ffffffff8109429d>] ? audit_syscall_entry+0x11c/0x148
[<ffffffff814b6442>] system_call_fastpath+0x16/0x1b
This can be easily triggered by a simple while loop:
# while :; do modprobe nfs; rmmod nfs; done
The proper function to use is cpu_cache_get_on_cpu(). It works for both
RT and non-RT as the non-RT passes in smp_processor_id() into
__do_drain().
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Cc: Clark Williams <clark@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1318391783.13262.11.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
mm/slab.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/slab.c b/mm/slab.c
index dc84364..341748b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2638,7 +2638,7 @@ static void __do_drain(void *arg, unsigned int cpu)
struct array_cache *ac;
int node = cpu_to_mem(cpu);
- ac = cpu_cache_get(cachep);
+ ac = cpu_cache_get_on_cpu(cachep, cpu);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->nodelists[node]->list_lock);

View File

@ -1,25 +0,0 @@
From e6884b5683aea73e316c560ce2c86b58905e25e0 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 7 Jun 2012 07:46:55 -0400
Subject: [102/256] panic-disable-random-on-rt
---
kernel/panic.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/kernel/panic.c b/kernel/panic.c
index 9ed023b..3c3ace0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -363,9 +363,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
if (!oops_id)
get_random_bytes(&oops_id, sizeof(oops_id));
else
+#endif
oops_id++;
return 0;

View File

@ -1,23 +0,0 @@
From 23d7a4fb4b0d55e0c90963b2880651d8bb57c6aa Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 15 Jul 2011 16:24:45 +0200
Subject: [106/256] net-ipv4-route-use-locks-on-up-rt.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
net/ipv4/route.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..eea5d9e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -250,7 +250,7 @@ struct rt_hash_bucket {
};
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
- defined(CONFIG_PROVE_LOCKING)
+ defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT_FULL)
/*
* Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
* The size of this table is a power of two and depends on the number of CPUS.

View File

@ -1,65 +0,0 @@
From 2f92092b4be1b601d635ff03c522be83b8458c28 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 24 Jun 2011 20:39:24 +0200
Subject: [107/256] workqueue-avoid-the-lock-in-cpu-dying.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/workqueue.c | 30 ++++++++++++++++++++----------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 50e0d00..3fd5280 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3505,6 +3505,25 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
kthread_stop(new_trustee);
return NOTIFY_BAD;
}
+ break;
+ case CPU_POST_DEAD:
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ break;
+ case CPU_DYING:
+ /*
+ * We access this lockless. We are on the dying CPU
+ * and called from stomp machine.
+ *
+ * Before this, the trustee and all workers except for
+ * the ones which are still executing works from
+ * before the last CPU down must be on the cpu. After
+ * this, they'll all be diasporas.
+ */
+ gcwq->flags |= GCWQ_DISASSOCIATED;
+ default:
+ goto out;
}
/* some are called w/ irq disabled, don't disturb irq status */
@@ -3524,16 +3543,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
gcwq->first_idle = new_worker;
break;
- case CPU_DYING:
- /*
- * Before this, the trustee and all workers except for
- * the ones which are still executing works from
- * before the last CPU down must be on the cpu. After
- * this, they'll all be diasporas.
- */
- gcwq->flags |= GCWQ_DISASSOCIATED;
- break;
-
case CPU_POST_DEAD:
gcwq->trustee_state = TRUSTEE_BUTCHER;
/* fall through */
@@ -3567,6 +3576,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
spin_unlock_irqrestore(&gcwq->lock, flags);
+out:
return notifier_from_errno(0);
}

View File

@ -1,47 +0,0 @@
From 8dfa8ea091ef006a7e1dd855cc37328cb4794c5d Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:20 -0500
Subject: [109/256] timers: preempt-rt support
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/timer.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/kernel/timer.c b/kernel/timer.c
index d7ad9d0..05cee31 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1344,6 +1344,22 @@ unsigned long get_next_timer_interrupt(unsigned long now)
*/
if (cpu_is_offline(smp_processor_id()))
return now + NEXT_TIMER_MAX_DELTA;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * On PREEMPT_RT we cannot sleep here. If the trylock does not
+ * succeed then we return the worst-case 'expires in 1 tick'
+ * value:
+ */
+ if (spin_trylock(&base->lock)) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+ spin_unlock(&base->lock);
+ } else {
+ expires = now + 1;
+ }
+#else
spin_lock(&base->lock);
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
@@ -1352,7 +1368,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
if (time_before_eq(expires, now))
return now;
-
+#endif
return cmp_next_hrtimer_event(now, expires);
}
#endif

View File

@ -1,51 +0,0 @@
From 8c251d0d40bf371f4f2c7421075c3d6007dc33e2 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:32 -0500
Subject: [110/256] timers: fix timer hotplug on -rt
Here we are in the CPU_DEAD notifier, and we must not sleep nor
enable interrupts.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/timer.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/kernel/timer.c b/kernel/timer.c
index 05cee31..7ba0602 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1764,6 +1764,7 @@ static void __cpuinit migrate_timers(int cpu)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
+ unsigned long flags;
int i;
BUG_ON(cpu_online(cpu));
@@ -1773,8 +1774,11 @@ static void __cpuinit migrate_timers(int cpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock_irq(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ local_irq_save(flags);
+ while (!spin_trylock(&new_base->lock))
+ cpu_relax();
+ while (!spin_trylock(&old_base->lock))
+ cpu_relax();
BUG_ON(old_base->running_timer);
@@ -1788,7 +1792,9 @@ static void __cpuinit migrate_timers(int cpu)
}
spin_unlock(&old_base->lock);
- spin_unlock_irq(&new_base->lock);
+ spin_unlock(&new_base->lock);
+ local_irq_restore(flags);
+
put_cpu_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -1,38 +0,0 @@
From 440e42303f19c9686c15b9c8321a96d0c09e46b1 Mon Sep 17 00:00:00 2001
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Thu, 13 Oct 2011 15:52:30 +0800
Subject: [118/256] hrtimer: Add missing debug_activate() aid [Was: Re:
[ANNOUNCE] 3.0.6-rt17]
On Fri, Oct 07, 2011 at 10:25:25AM -0700, Fernando Lopez-Lezcano wrote:
> On 10/06/2011 06:15 PM, Thomas Gleixner wrote:
> >Dear RT Folks,
> >
> >I'm pleased to announce the 3.0.6-rt17 release.
>
> Hi and thanks again. So far this one is not hanging which is very
> good news. But I still see the hrtimer_fixup_activate warnings I
> reported for rt16...
Hi Fernando,
I think below patch will smooth your concern?
Thanks,
Yong
---
kernel/hrtimer.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 358442b..d363df8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1042,6 +1042,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* remove it again and report a failure. This avoids
* stale base->first entries.
*/
+ debug_deactivate(timer);
__remove_hrtimer(timer, new_base,
timer->state & HRTIMER_STATE_CALLBACK, 0);
}

View File

@ -1,434 +0,0 @@
From 2d5eddded739b6a17401f4bdb0d3f09e18f7fa2f Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 27 Sep 2011 13:56:50 -0400
Subject: [156/256] ring-buffer: Convert reader_lock from raw_spin_lock into
spin_lock
The reader_lock is mostly taken in normal context with interrupts enabled.
But because ftrace_dump() can happen anywhere, it is used as a spin lock
and in some cases a check to in_nmi() is performed to determine if the
ftrace_dump() was initiated from an NMI and if it is, the lock is not taken.
But having the lock as a raw_spin_lock() causes issues with the real-time
kernel as the lock is held during allocation and freeing of the buffer.
As memory locks convert into mutexes, keeping the reader_lock as a spin_lock
causes problems.
Converting the reader_lock is not straight forward as we must still deal
with the ftrace_dump() happening not only from an NMI but also from
true interrupt context in PREEPMT_RT.
Two wrapper functions are created to take and release the reader lock:
int read_buffer_lock(cpu_buffer, unsigned long *flags)
void read_buffer_unlock(cpu_buffer, unsigned long flags, int locked)
The read_buffer_lock() returns 1 if it actually took the lock, disables
interrupts and updates the flags. The only time it returns 0 is in the
case of a ftrace_dump() happening in an unsafe context.
The read_buffer_unlock() checks the return of locked and will simply
unlock the spin lock if it was successfully taken.
Instead of just having this in specific cases that the NMI might call
into, all instances of the reader_lock is converted to the wrapper
functions to make this a bit simpler to read and less error prone.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark@redhat.com>
Link: http://lkml.kernel.org/r/1317146210.26514.33.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/trace/ring_buffer.c | 151 ++++++++++++++++++++++++--------------------
1 file changed, 81 insertions(+), 70 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index cf8d11e..24efd16 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -446,7 +446,7 @@ struct ring_buffer_per_cpu {
int cpu;
atomic_t record_disabled;
struct ring_buffer *buffer;
- raw_spinlock_t reader_lock; /* serialize readers */
+ spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
@@ -1017,6 +1017,44 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
return -ENOMEM;
}
+static inline int ok_to_lock(void)
+{
+ if (in_nmi())
+ return 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (in_atomic())
+ return 0;
+#endif
+ return 1;
+}
+
+static int
+read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long *flags)
+{
+ /*
+ * If an NMI die dumps out the content of the ring buffer
+ * do not grab locks. We also permanently disable the ring
+ * buffer too. A one time deal is all you get from reading
+ * the ring buffer from an NMI.
+ */
+ if (!ok_to_lock()) {
+ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
+ return 1;
+ tracing_off_permanent();
+ return 0;
+ }
+ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
+ return 1;
+}
+
+static void
+read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long flags, int locked)
+{
+ if (locked)
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+}
static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{
@@ -1032,7 +1070,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
- raw_spin_lock_init(&cpu_buffer->reader_lock);
+ spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -1227,9 +1265,11 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
{
struct buffer_page *bpage;
struct list_head *p;
+ unsigned long flags;
unsigned i;
+ int locked;
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1247,7 +1287,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
rb_check_pages(cpu_buffer);
out:
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
static void
@@ -1256,9 +1296,11 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
{
struct buffer_page *bpage;
struct list_head *p;
+ unsigned long flags;
unsigned i;
+ int locked;
- raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) {
@@ -1273,7 +1315,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_check_pages(cpu_buffer);
out:
- raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
/**
@@ -2714,7 +2756,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
/*
* if the tail is on reader_page, oldest time stamp is on the reader
* page
@@ -2724,7 +2766,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
else
bpage = rb_set_head_page(cpu_buffer);
ret = bpage->page->time_stamp;
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
}
@@ -2888,15 +2930,16 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ int locked;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
rb_iter_reset(iter);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
@@ -3314,21 +3357,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
-static inline int rb_ok_to_lock(void)
-{
- /*
- * If an NMI die dumps out the content of the ring buffer
- * do not grab locks. We also permanently disable the ring
- * buffer too. A one time deal is all you get from reading
- * the ring buffer from an NMI.
- */
- if (likely(!in_nmi()))
- return 1;
-
- tracing_off_permanent();
- return 0;
-}
-
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
@@ -3346,22 +3374,17 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
- int dolock;
+ int locked;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- dolock = rb_ok_to_lock();
again:
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
@@ -3383,11 +3406,12 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ int locked;
again:
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_iter_peek(iter, ts);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (event && event->type_len == RINGBUF_TYPE_PADDING)
goto again;
@@ -3413,9 +3437,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
unsigned long flags;
- int dolock;
-
- dolock = rb_ok_to_lock();
+ int locked;
again:
/* might be called in atomic */
@@ -3425,9 +3447,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
goto out;
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) {
@@ -3435,9 +3455,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
rb_advance_reader(cpu_buffer);
}
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
+
out:
preempt_enable();
@@ -3522,17 +3541,18 @@ ring_buffer_read_start(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ int locked;
if (!iter)
return;
cpu_buffer = iter->cpu_buffer;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
arch_spin_unlock(&cpu_buffer->lock);
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
@@ -3566,8 +3586,9 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
+ int locked;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
@@ -3578,7 +3599,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
rb_advance_iter(iter);
out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
return event;
}
@@ -3643,13 +3664,14 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
+ int locked;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return;
atomic_inc(&cpu_buffer->record_disabled);
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
@@ -3661,7 +3683,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
arch_spin_unlock(&cpu_buffer->lock);
out:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
atomic_dec(&cpu_buffer->record_disabled);
}
@@ -3688,22 +3710,16 @@ int ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ int locked;
int cpu;
int ret;
- dolock = rb_ok_to_lock();
-
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
if (!ret)
return 0;
@@ -3722,22 +3738,16 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- int dolock;
+ int locked;
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
- dolock = rb_ok_to_lock();
-
cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
- raw_spin_lock(&cpu_buffer->reader_lock);
+ locked = read_buffer_lock(cpu_buffer, &flags);
ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
- raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
return ret;
}
@@ -3912,6 +3922,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
unsigned int commit;
unsigned int read;
u64 save_timestamp;
+ int locked;
int ret = -1;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -3933,7 +3944,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (!bpage)
goto out;
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ locked = read_buffer_lock(cpu_buffer, &flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
@@ -4057,7 +4068,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
out_unlock:
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ read_buffer_unlock(cpu_buffer, flags, locked);
out:
return ret;

View File

@ -1,39 +0,0 @@
From 64a36b17bf05d2ce7d7bca56896b295bcad0756f Mon Sep 17 00:00:00 2001
From: John Kacur <jkacur@redhat.com>
Date: Mon, 14 Nov 2011 02:44:42 +0100
Subject: [186/256] rcu: Fix macro substitution for synchronize_rcu_bh() on RT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
kernel/rcutorture.c:492: error: synchronize_rcu_bh undeclared here (not in a function)
synchronize_rcu_bh() is not just called as a normal function, but can
also be referenced as a function pointer. When CONFIG_PREEMPT_RT_FULL
is enabled, synchronize_rcu_bh() is defined as synchronize_rcu(), but
needs to be defined without the parenthesis because the compiler will
complain when synchronize_rcu_bh is referenced as a function pointer
and not a function.
Signed-off-by: John Kacur <jkacur@redhat.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/1321235083-21756-1-git-send-email-jkacur@redhat.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rcutree.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5b2d03e..ba517b5 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -60,7 +60,7 @@ static inline void exit_rcu(void)
#ifndef CONFIG_PREEMPT_RT_FULL
extern void synchronize_rcu_bh(void);
#else
-# define synchronize_rcu_bh() synchronize_rcu()
+# define synchronize_rcu_bh synchronize_rcu
#endif
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);

View File

@ -1,125 +0,0 @@
From 1ae3c12ddb6ad4e84d6d507e724921d55373a649 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 11:02:21 +0200
Subject: [191/256] lglocks-rt.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/lglock.h | 99 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 99 insertions(+)
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402c..52b289f 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -71,6 +71,8 @@
extern void name##_global_lock_online(void); \
extern void name##_global_unlock_online(void); \
+#ifndef CONFIG_PREEMPT_RT_FULL
+
#define DEFINE_LGLOCK(name) \
\
DEFINE_SPINLOCK(name##_cpu_lock); \
@@ -197,4 +199,101 @@
preempt_enable(); \
} \
EXPORT_SYMBOL(name##_global_unlock);
+
+#else /* !PREEMPT_RT_FULL */
+#define DEFINE_LGLOCK(name) \
+ \
+ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ rt_mutex_init(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+ void name##_local_lock(void) { \
+ struct rt_mutex *lock; \
+ migrate_disable(); \
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
+ lock = &__get_cpu_var(name##_lock); \
+ __rt_spin_lock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_lock); \
+ \
+ void name##_local_unlock(void) { \
+ struct rt_mutex *lock; \
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
+ lock = &__get_cpu_var(name##_lock); \
+ __rt_spin_unlock(lock); \
+ migrate_enable(); \
+ } \
+ EXPORT_SYMBOL(name##_local_unlock); \
+ \
+ void name##_local_lock_cpu(int cpu) { \
+ struct rt_mutex *lock; \
+ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
+ lock = &per_cpu(name##_lock, cpu); \
+ __rt_spin_lock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_lock_cpu); \
+ \
+ void name##_local_unlock_cpu(int cpu) { \
+ struct rt_mutex *lock; \
+ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
+ lock = &per_cpu(name##_lock, cpu); \
+ __rt_spin_unlock(lock); \
+ } \
+ EXPORT_SYMBOL(name##_local_unlock_cpu); \
+ \
+ void name##_global_lock_online(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+ for_each_online_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_lock_online); \
+ \
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+ for_each_online_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
+ void name##_global_lock(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_lock); \
+ \
+ void name##_global_unlock(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+ for_each_possible_cpu(i) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock);
+#endif /* PRREMPT_RT_FULL */
+
#endif

View File

@ -1,807 +0,0 @@
From 5e6a742920db44d54cbd1826318a2af372c36bf4 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Fri, 30 Sep 2011 11:57:58 +0200
Subject: [208/256] workqueue: Fix cpuhotplug trainwreck
The current workqueue code does crazy stuff on cpu unplug, it relies on
forced affine breakage, thereby violating per-cpu expectations. Worse,
it tries to re-attach to a cpu if the thing comes up again before all
previously queued works are finished. This breaks (admittedly bonkers)
cpu-hotplug use that relies on a down-up cycle to push all usage away.
Introduce a new WQ_NON_AFFINE flag that indicates a per-cpu workqueue
will not respect cpu affinity and use this to migrate all its pending
works to whatever cpu is doing cpu-down.
This also adds a warning for queue_on_cpu() users which warns when its
used on WQ_NON_AFFINE workqueues for the API implies you care about
what cpu things are ran on when such workqueues cannot guarantee this.
For the rest, simply flush all per-cpu works and don't mess about.
This also means that currently all workqueues that are manually
flushing things on cpu-down in order to provide the per-cpu guarantee
no longer need to do so.
In short, we tell the WQ what we want it to do, provide validation for
this and loose ~250 lines of code.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 6 +-
include/linux/workqueue.h | 5 +-
kernel/workqueue.c | 556 ++++++++++++---------------------------------
3 files changed, 152 insertions(+), 415 deletions(-)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 00d2f6f8..a6bda1b 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -75,8 +75,10 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- /* prepare workqueues for other notifiers */
- CPU_PRI_WORKQUEUE = 5,
+
+ CPU_PRI_WORKQUEUE_ACTIVE = 5, /* prepare workqueues for others */
+ CPU_PRI_NORMAL = 0,
+ CPU_PRI_WORKQUEUE_INACTIVE = -5, /* flush workqueues after others */
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index af15545..9849be1 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -254,9 +254,10 @@ enum {
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_NON_AFFINE = 1 << 6, /* free to move works around cpus */
- WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
- WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
+ WQ_DRAINING = 1 << 7, /* internal: workqueue is draining */
+ WQ_RESCUER = 1 << 8, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 33d1095..ba977c4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
+#include <linux/delay.h>
#include "workqueue_sched.h"
@@ -57,20 +58,10 @@ enum {
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
- WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
- WORKER_REBIND = 1 << 5, /* mom is home, come back */
- WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
- WORKER_UNBOUND = 1 << 7, /* worker is unbound */
+ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */
+ WORKER_UNBOUND = 1 << 5, /* worker is unbound */
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
- WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
-
- /* gcwq->trustee_state */
- TRUSTEE_START = 0, /* start */
- TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
- TRUSTEE_BUTCHER = 2, /* butcher workers */
- TRUSTEE_RELEASE = 3, /* release workers */
- TRUSTEE_DONE = 4, /* trustee is done */
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
@@ -84,7 +75,6 @@ enum {
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
- TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
/*
* Rescue workers are used only on emergencies and shared by
@@ -136,7 +126,6 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
- struct work_struct rebind_work; /* L: rebind worker to cpu */
int sleeping; /* None */
};
@@ -164,10 +153,8 @@ struct global_cwq {
struct ida worker_ida; /* L: for worker IDs */
- struct task_struct *trustee; /* L: for gcwq shutdown */
- unsigned int trustee_state; /* L: trustee state */
- wait_queue_head_t trustee_wait; /* trustee wait */
struct worker *first_idle; /* L: first idle worker */
+ wait_queue_head_t idle_wait;
} ____cacheline_aligned_in_smp;
/*
@@ -969,13 +956,38 @@ static bool is_chained_work(struct workqueue_struct *wq)
return false;
}
-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
- struct work_struct *work)
+static void ___queue_work(struct workqueue_struct *wq, struct global_cwq *gcwq,
+ struct work_struct *work)
{
- struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
unsigned int work_flags;
+
+ /* gcwq determined, get cwq and queue */
+ cwq = get_cwq(gcwq->cpu, wq);
+ trace_workqueue_queue_work(gcwq->cpu, cwq, work);
+
+ BUG_ON(!list_empty(&work->entry));
+
+ cwq->nr_in_flight[cwq->work_color]++;
+ work_flags = work_color_to_flags(cwq->work_color);
+
+ if (likely(cwq->nr_active < cwq->max_active)) {
+ trace_workqueue_activate_work(work);
+ cwq->nr_active++;
+ worklist = gcwq_determine_ins_pos(gcwq, cwq);
+ } else {
+ work_flags |= WORK_STRUCT_DELAYED;
+ worklist = &cwq->delayed_works;
+ }
+
+ insert_work(cwq, work, worklist, work_flags);
+}
+
+static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ struct global_cwq *gcwq;
unsigned long flags;
debug_work_activate(work);
@@ -1021,27 +1033,32 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
spin_lock_irqsave(&gcwq->lock, flags);
}
- /* gcwq determined, get cwq and queue */
- cwq = get_cwq(gcwq->cpu, wq);
- trace_workqueue_queue_work(cpu, cwq, work);
+ ___queue_work(wq, gcwq, work);
- BUG_ON(!list_empty(&work->entry));
+ spin_unlock_irqrestore(&gcwq->lock, flags);
+}
- cwq->nr_in_flight[cwq->work_color]++;
- work_flags = work_color_to_flags(cwq->work_color);
+/**
+ * queue_work_on - queue work on specific cpu
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns 0 if @work was already on a queue, non-zero otherwise.
+ *
+ * We queue the work to a specific CPU, the caller must ensure it
+ * can't go away.
+ */
+static int
+__queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+{
+ int ret = 0;
- if (likely(cwq->nr_active < cwq->max_active)) {
- trace_workqueue_activate_work(work);
- cwq->nr_active++;
- worklist = gcwq_determine_ins_pos(gcwq, cwq);
- } else {
- work_flags |= WORK_STRUCT_DELAYED;
- worklist = &cwq->delayed_works;
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = 1;
}
-
- insert_work(cwq, work, worklist, work_flags);
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
+ return ret;
}
/**
@@ -1058,34 +1075,19 @@ int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
- ret = queue_work_on(get_cpu_light(), wq, work);
+ ret = __queue_work_on(get_cpu_light(), wq, work);
put_cpu_light();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
-/**
- * queue_work_on - queue work on specific cpu
- * @cpu: CPU number to execute work on
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns 0 if @work was already on a queue, non-zero otherwise.
- *
- * We queue the work to a specific CPU, the caller must ensure it
- * can't go away.
- */
int
queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
- int ret = 0;
+ WARN_ON(wq->flags & WQ_NON_AFFINE);
- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- __queue_work(cpu, wq, work);
- ret = 1;
- }
- return ret;
+ return __queue_work_on(cpu, wq, work);
}
EXPORT_SYMBOL_GPL(queue_work_on);
@@ -1131,6 +1133,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
+ WARN_ON((wq->flags & WQ_NON_AFFINE) && cpu != -1);
+
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
@@ -1196,12 +1200,13 @@ static void worker_enter_idle(struct worker *worker)
/* idle_list is LIFO */
list_add(&worker->entry, &gcwq->idle_list);
- if (likely(!(worker->flags & WORKER_ROGUE))) {
- if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
- mod_timer(&gcwq->idle_timer,
- jiffies + IDLE_WORKER_TIMEOUT);
- } else
- wake_up_all(&gcwq->trustee_wait);
+ if (gcwq->nr_idle == gcwq->nr_workers)
+ wake_up_all(&gcwq->idle_wait);
+
+ if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) {
+ mod_timer(&gcwq->idle_timer,
+ jiffies + IDLE_WORKER_TIMEOUT);
+ }
/* sanity check nr_running */
WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
@@ -1293,23 +1298,6 @@ __acquires(&gcwq->lock)
}
}
-/*
- * Function for worker->rebind_work used to rebind rogue busy workers
- * to the associated cpu which is coming back online. This is
- * scheduled by cpu up but can race with other cpu hotplug operations
- * and may be executed twice without intervening cpu down.
- */
-static void worker_rebind_fn(struct work_struct *work)
-{
- struct worker *worker = container_of(work, struct worker, rebind_work);
- struct global_cwq *gcwq = worker->gcwq;
-
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_REBIND);
-
- spin_unlock_irq(&gcwq->lock);
-}
-
static struct worker *alloc_worker(void)
{
struct worker *worker;
@@ -1318,7 +1306,6 @@ static struct worker *alloc_worker(void)
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
- INIT_WORK(&worker->rebind_work, worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
@@ -1658,13 +1645,6 @@ static bool manage_workers(struct worker *worker)
gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
- /*
- * The trustee might be waiting to take over the manager
- * position, tell it we're done.
- */
- if (unlikely(gcwq->trustee))
- wake_up_all(&gcwq->trustee_wait);
-
return ret;
}
@@ -3205,171 +3185,71 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
- * This is solved by allowing a gcwq to be detached from CPU, running
- * it with unbound (rogue) workers and allowing it to be reattached
- * later if the cpu comes back online. A separate thread is created
- * to govern a gcwq in such state and is called the trustee of the
- * gcwq.
- *
- * Trustee states and their descriptions.
- *
- * START Command state used on startup. On CPU_DOWN_PREPARE, a
- * new trustee is started with this state.
- *
- * IN_CHARGE Once started, trustee will enter this state after
- * assuming the manager role and making all existing
- * workers rogue. DOWN_PREPARE waits for trustee to
- * enter this state. After reaching IN_CHARGE, trustee
- * tries to execute the pending worklist until it's empty
- * and the state is set to BUTCHER, or the state is set
- * to RELEASE.
- *
- * BUTCHER Command state which is set by the cpu callback after
- * the cpu has went down. Once this state is set trustee
- * knows that there will be no new works on the worklist
- * and once the worklist is empty it can proceed to
- * killing idle workers.
- *
- * RELEASE Command state which is set by the cpu callback if the
- * cpu down has been canceled or it has come online
- * again. After recognizing this state, trustee stops
- * trying to drain or butcher and clears ROGUE, rebinds
- * all remaining workers back to the cpu and releases
- * manager role.
- *
- * DONE Trustee will enter this state after BUTCHER or RELEASE
- * is complete.
- *
- * trustee CPU draining
- * took over down complete
- * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
- * | | ^
- * | CPU is back online v return workers |
- * ----------------> RELEASE --------------
*/
-/**
- * trustee_wait_event_timeout - timed event wait for trustee
- * @cond: condition to wait for
- * @timeout: timeout in jiffies
- *
- * wait_event_timeout() for trustee to use. Handles locking and
- * checks for RELEASE request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * Positive indicating left time if @cond is satisfied, 0 if timed
- * out, -1 if canceled.
- */
-#define trustee_wait_event_timeout(cond, timeout) ({ \
- long __ret = (timeout); \
- while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
- __ret) { \
- spin_unlock_irq(&gcwq->lock); \
- __wait_event_timeout(gcwq->trustee_wait, (cond) || \
- (gcwq->trustee_state == TRUSTEE_RELEASE), \
- __ret); \
- spin_lock_irq(&gcwq->lock); \
- } \
- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
-})
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct global_cwq *gcwq = get_gcwq(cpu);
+ struct worker *uninitialized_var(new_worker);
+ unsigned long flags;
-/**
- * trustee_wait_event - event wait for trustee
- * @cond: condition to wait for
- *
- * wait_event() for trustee to use. Automatically handles locking and
- * checks for CANCEL request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * 0 if @cond is satisfied, -1 if canceled.
- */
-#define trustee_wait_event(cond) ({ \
- long __ret1; \
- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
- __ret1 < 0 ? -1 : 0; \
-})
+ action &= ~CPU_TASKS_FROZEN;
-static int __cpuinit trustee_thread(void *__gcwq)
-{
- struct global_cwq *gcwq = __gcwq;
- struct worker *worker;
- struct work_struct *work;
- struct hlist_node *pos;
- long rc;
- int i;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ new_worker = create_worker(gcwq, false);
+ if (!new_worker)
+ return NOTIFY_BAD;
+ }
- BUG_ON(gcwq->cpu != smp_processor_id());
+ /* some are called w/ irq disabled, don't disturb irq status */
+ spin_lock_irqsave(&gcwq->lock, flags);
- spin_lock_irq(&gcwq->lock);
- /*
- * Claim the manager position and make all workers rogue.
- * Trustee must be bound to the target cpu and can't be
- * cancelled.
- */
- BUG_ON(gcwq->cpu != smp_processor_id());
- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
- BUG_ON(rc < 0);
+ switch (action) {
+ case CPU_UP_PREPARE:
+ BUG_ON(gcwq->first_idle);
+ gcwq->first_idle = new_worker;
+ break;
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
+ case CPU_UP_CANCELED:
+ destroy_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
- list_for_each_entry(worker, &gcwq->idle_list, entry)
- worker->flags |= WORKER_ROGUE;
+ case CPU_ONLINE:
+ spin_unlock_irq(&gcwq->lock);
+ kthread_bind(gcwq->first_idle->task, cpu);
+ spin_lock_irq(&gcwq->lock);
+ gcwq->flags |= GCWQ_MANAGE_WORKERS;
+ start_worker(gcwq->first_idle);
+ gcwq->first_idle = NULL;
+ break;
+ }
- for_each_busy_worker(worker, i, pos, gcwq)
- worker->flags |= WORKER_ROGUE;
+ spin_unlock_irqrestore(&gcwq->lock, flags);
- /*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the rogue flag. This is
- * necessary as scheduler callbacks may be invoked from other
- * cpus.
- */
- spin_unlock_irq(&gcwq->lock);
- schedule();
- spin_lock_irq(&gcwq->lock);
+ return notifier_from_errno(0);
+}
- /*
- * Sched callbacks are disabled now. Zap nr_running. After
- * this, nr_running stays zero and need_more_worker() and
- * keep_working() are always true as long as the worklist is
- * not empty.
- */
- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
+static void flush_gcwq(struct global_cwq *gcwq)
+{
+ struct work_struct *work, *nw;
+ struct worker *worker, *n;
+ LIST_HEAD(non_affine_works);
- spin_unlock_irq(&gcwq->lock);
- del_timer_sync(&gcwq->idle_timer);
spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
+ struct workqueue_struct *wq = get_work_cwq(work)->wq;
- /*
- * We're now in charge. Notify and proceed to drain. We need
- * to keep the gcwq running during the whole CPU down
- * procedure as other cpu hotunplug callbacks may need to
- * flush currently running tasks.
- */
- gcwq->trustee_state = TRUSTEE_IN_CHARGE;
- wake_up_all(&gcwq->trustee_wait);
+ if (wq->flags & WQ_NON_AFFINE)
+ list_move(&work->entry, &non_affine_works);
+ }
- /*
- * The original cpu is in the process of dying and may go away
- * anytime now. When that happens, we and all workers would
- * be migrated to other cpus. Try draining any left work. We
- * want to get it over with ASAP - spam rescuers, wake up as
- * many idlers as necessary and create new ones till the
- * worklist is empty. Note that if the gcwq is frozen, there
- * may be frozen works in freezable cwqs. Don't declare
- * completion while frozen.
- */
- while (gcwq->nr_workers != gcwq->nr_idle ||
- gcwq->flags & GCWQ_FREEZING ||
- gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
+ while (!list_empty(&gcwq->worklist)) {
int nr_works = 0;
list_for_each_entry(work, &gcwq->worklist, entry) {
@@ -3383,200 +3263,55 @@ static int __cpuinit trustee_thread(void *__gcwq)
wake_up_process(worker->task);
}
+ spin_unlock_irq(&gcwq->lock);
+
if (need_to_create_worker(gcwq)) {
- spin_unlock_irq(&gcwq->lock);
- worker = create_worker(gcwq, false);
- spin_lock_irq(&gcwq->lock);
- if (worker) {
- worker->flags |= WORKER_ROGUE;
+ worker = create_worker(gcwq, true);
+ if (worker)
start_worker(worker);
- }
}
- /* give a breather */
- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
- break;
- }
-
- /*
- * Either all works have been scheduled and cpu is down, or
- * cpu down has already been canceled. Wait for and butcher
- * all workers till we're canceled.
- */
- do {
- rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
- while (!list_empty(&gcwq->idle_list))
- destroy_worker(list_first_entry(&gcwq->idle_list,
- struct worker, entry));
- } while (gcwq->nr_workers && rc >= 0);
-
- /*
- * At this point, either draining has completed and no worker
- * is left, or cpu down has been canceled or the cpu is being
- * brought back up. There shouldn't be any idle one left.
- * Tell the remaining busy ones to rebind once it finishes the
- * currently scheduled works by scheduling the rebind_work.
- */
- WARN_ON(!list_empty(&gcwq->idle_list));
+ wait_event_timeout(gcwq->idle_wait,
+ gcwq->nr_idle == gcwq->nr_workers, HZ/10);
- for_each_busy_worker(worker, i, pos, gcwq) {
- struct work_struct *rebind_work = &worker->rebind_work;
+ spin_lock_irq(&gcwq->lock);
+ }
- /*
- * Rebind_work may race with future cpu hotplug
- * operations. Use a separate flag to mark that
- * rebinding is scheduled.
- */
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
+ WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
+ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
+ destroy_worker(worker);
- debug_work_activate(rebind_work);
- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
+ WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
- /* relinquish manager role */
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
-
- /* notify completion */
- gcwq->trustee = NULL;
- gcwq->trustee_state = TRUSTEE_DONE;
- wake_up_all(&gcwq->trustee_wait);
spin_unlock_irq(&gcwq->lock);
- return 0;
-}
-/**
- * wait_trustee_state - wait for trustee to enter the specified state
- * @gcwq: gcwq the trustee of interest belongs to
- * @state: target state to wait for
- *
- * Wait for the trustee to reach @state. DONE is already matched.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by cpu_callback.
- */
-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
-{
- if (!(gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE)) {
- spin_unlock_irq(&gcwq->lock);
- __wait_event(gcwq->trustee_wait,
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
+ gcwq = get_gcwq(get_cpu());
+ spin_lock_irq(&gcwq->lock);
+ list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
+ list_del_init(&work->entry);
+ ___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
+ spin_unlock_irq(&gcwq->lock);
+ put_cpu();
}
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
- struct task_struct *new_trustee = NULL;
- struct worker *uninitialized_var(new_worker);
- unsigned long flags;
action &= ~CPU_TASKS_FROZEN;
- switch (action) {
- case CPU_DOWN_PREPARE:
- new_trustee = kthread_create(trustee_thread, gcwq,
- "workqueue_trustee/%d\n", cpu);
- if (IS_ERR(new_trustee))
- return notifier_from_errno(PTR_ERR(new_trustee));
- kthread_bind(new_trustee, cpu);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- new_worker = create_worker(gcwq, false);
- if (!new_worker) {
- if (new_trustee)
- kthread_stop(new_trustee);
- return NOTIFY_BAD;
- }
- break;
- case CPU_POST_DEAD:
- case CPU_UP_CANCELED:
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- break;
- case CPU_DYING:
- /*
- * We access this lockless. We are on the dying CPU
- * and called from stomp machine.
- *
- * Before this, the trustee and all workers except for
- * the ones which are still executing works from
- * before the last CPU down must be on the cpu. After
- * this, they'll all be diasporas.
- */
- gcwq->flags |= GCWQ_DISASSOCIATED;
- default:
- goto out;
- }
-
- /* some are called w/ irq disabled, don't disturb irq status */
- spin_lock_irqsave(&gcwq->lock, flags);
-
- switch (action) {
- case CPU_DOWN_PREPARE:
- /* initialize trustee and tell it to acquire the gcwq */
- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
- gcwq->trustee = new_trustee;
- gcwq->trustee_state = TRUSTEE_START;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- gcwq->first_idle = new_worker;
- break;
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ flush_gcwq(gcwq);
+ break;
+ }
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
- case CPU_UP_CANCELED:
- destroy_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- gcwq->flags &= ~GCWQ_DISASSOCIATED;
- if (gcwq->trustee_state != TRUSTEE_DONE) {
- gcwq->trustee_state = TRUSTEE_RELEASE;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_DONE);
- }
-
- /*
- * Trustee is done and there might be no worker left.
- * Put the first_idle in and request a real manager to
- * take a look.
- */
- spin_unlock_irq(&gcwq->lock);
- kthread_bind(gcwq->first_idle->task, cpu);
- spin_lock_irq(&gcwq->lock);
- gcwq->flags |= GCWQ_MANAGE_WORKERS;
- start_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
- break;
- }
-
- spin_unlock_irqrestore(&gcwq->lock, flags);
-
-out:
return notifier_from_errno(0);
}
@@ -3773,7 +3508,8 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE);
+ hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
@@ -3796,9 +3532,7 @@ static int __init init_workqueues(void)
(unsigned long)gcwq);
ida_init(&gcwq->worker_ida);
-
- gcwq->trustee_state = TRUSTEE_DONE;
- init_waitqueue_head(&gcwq->trustee_wait);
+ init_waitqueue_head(&gcwq->idle_wait);
}
/* create the initial worker */

View File

@ -1,103 +0,0 @@
From 6d2f6a81b2bd6fb3f5d53cf855661e6d718ddb88 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Mon, 3 Oct 2011 12:43:25 +0200
Subject: [209/256] workqueue: Fix PF_THREAD_BOUND abuse
PF_THREAD_BOUND is set by kthread_bind() and means the thread is bound
to a particular cpu for correctness. The workqueue code abuses this
flag and blindly sets it for all created threads, including those that
are free to migrate.
Restore the original semantics now that the worst abuse in the
cpu-hotplug path are gone. The only icky bit is the rescue thread for
per-cpu workqueues, this cannot use kthread_bind() but will use
set_cpus_allowed_ptr() to migrate itself to the desired cpu.
Set and clear PF_THREAD_BOUND manually here.
XXX: I think worker_maybe_bind_and_lock()/worker_unbind_and_unlock()
should also do a get_online_cpus(), this would likely allow us to
remove the while loop.
XXX: should probably repurpose GCWQ_DISASSOCIATED to warn on adding
works after CPU_DOWN_PREPARE -- its dual use to mark unbound gcwqs is
a tad annoying though.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/workqueue.c | 29 ++++++++++++++++++++---------
1 file changed, 20 insertions(+), 9 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ba977c4..dc50d5d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1283,8 +1283,14 @@ __acquires(&gcwq->lock)
return false;
if (task_cpu(task) == gcwq->cpu &&
cpumask_equal(&current->cpus_allowed,
- get_cpu_mask(gcwq->cpu)))
+ get_cpu_mask(gcwq->cpu))) {
+ /*
+ * Since we're binding to a particular cpu and need to
+ * stay there for correctness, mark us PF_THREAD_BOUND.
+ */
+ task->flags |= PF_THREAD_BOUND;
return true;
+ }
spin_unlock_irq(&gcwq->lock);
/*
@@ -1298,6 +1304,18 @@ __acquires(&gcwq->lock)
}
}
+static void worker_unbind_and_unlock(struct worker *worker)
+{
+ struct global_cwq *gcwq = worker->gcwq;
+ struct task_struct *task = worker->task;
+
+ /*
+ * Its no longer required we're PF_THREAD_BOUND, the work is done.
+ */
+ task->flags &= ~PF_THREAD_BOUND;
+ spin_unlock_irq(&gcwq->lock);
+}
+
static struct worker *alloc_worker(void)
{
struct worker *worker;
@@ -1360,15 +1378,9 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
if (IS_ERR(worker->task))
goto fail;
- /*
- * A rogue worker will become a regular one if CPU comes
- * online later on. Make sure every worker has
- * PF_THREAD_BOUND set.
- */
if (bind && !on_unbound_cpu)
kthread_bind(worker->task, gcwq->cpu);
else {
- worker->task->flags |= PF_THREAD_BOUND;
if (on_unbound_cpu)
worker->flags |= WORKER_UNBOUND;
}
@@ -2045,7 +2057,7 @@ repeat:
if (keep_working(gcwq))
wake_up_worker(gcwq);
- spin_unlock_irq(&gcwq->lock);
+ worker_unbind_and_unlock(rescuer);
}
schedule();
@@ -2995,7 +3007,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (IS_ERR(rescuer->task))
goto err;
- rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}

View File

@ -1,82 +0,0 @@
From 0e6b40521d705113621a8ef227822305c6f24937 Mon Sep 17 00:00:00 2001
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Sun, 16 Oct 2011 18:56:46 +0800
Subject: [210/256] workqueue: Use get_cpu_light() in flush_gcwq()
BUG: sleeping function called from invalid context at kernel/rtmutex.c:645
in_atomic(): 1, irqs_disabled(): 0, pid: 1739, name: bash
Pid: 1739, comm: bash Not tainted 3.0.6-rt17-00284-gb76d419 #3
Call Trace:
[<c06e3b5d>] ? printk+0x1d/0x20
[<c01390b6>] __might_sleep+0xe6/0x110
[<c06e633c>] rt_spin_lock+0x1c/0x30
[<c01655a6>] flush_gcwq+0x236/0x320
[<c021c651>] ? kfree+0xe1/0x1a0
[<c05b7178>] ? __cpufreq_remove_dev+0xf8/0x260
[<c0183fad>] ? rt_down_write+0xd/0x10
[<c06cd91e>] workqueue_cpu_down_callback+0x26/0x2d
[<c06e9d65>] notifier_call_chain+0x45/0x60
[<c0171cfe>] __raw_notifier_call_chain+0x1e/0x30
[<c014c9b4>] __cpu_notify+0x24/0x40
[<c06cbc6f>] _cpu_down+0xdf/0x330
[<c06cbef0>] cpu_down+0x30/0x50
[<c06cd6b0>] store_online+0x50/0xa7
[<c06cd660>] ? acpi_os_map_memory+0xec/0xec
[<c04f2faa>] sysdev_store+0x2a/0x40
[<c02887a4>] sysfs_write_file+0xa4/0x100
[<c0229ab2>] vfs_write+0xa2/0x170
[<c0288700>] ? sysfs_poll+0x90/0x90
[<c0229d92>] sys_write+0x42/0x70
[<c06ecedf>] sysenter_do_call+0x12/0x2d
CPU 1 is now offline
SMP alternatives: switching to UP code
SMP alternatives: switching to SMP code
Booting Node 0 Processor 1 APIC 0x1
smpboot cpu 1: start_ip = 9b000
Initializing CPU#1
BUG: sleeping function called from invalid context at kernel/rtmutex.c:645
in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: kworker/0:0
Pid: 0, comm: kworker/0:0 Not tainted 3.0.6-rt17-00284-gb76d419 #3
Call Trace:
[<c06e3b5d>] ? printk+0x1d/0x20
[<c01390b6>] __might_sleep+0xe6/0x110
[<c06e633c>] rt_spin_lock+0x1c/0x30
[<c06cd85b>] workqueue_cpu_up_callback+0x56/0xf3
[<c06e9d65>] notifier_call_chain+0x45/0x60
[<c0171cfe>] __raw_notifier_call_chain+0x1e/0x30
[<c014c9b4>] __cpu_notify+0x24/0x40
[<c014c9ec>] cpu_notify+0x1c/0x20
[<c06e1d43>] notify_cpu_starting+0x1e/0x20
[<c06e0aad>] smp_callin+0xfb/0x10e
[<c06e0ad9>] start_secondary+0x19/0xd7
NMI watchdog enabled, takes one hw-pmu counter.
Switched to NOHz mode on CPU #1
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Link: http://lkml.kernel.org/r/1318762607-2261-5-git-send-email-yong.zhang0@gmail.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/workqueue.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dc50d5d..d3da5a8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3297,14 +3297,14 @@ static void flush_gcwq(struct global_cwq *gcwq)
spin_unlock_irq(&gcwq->lock);
- gcwq = get_gcwq(get_cpu());
+ gcwq = get_gcwq(get_cpu_light());
spin_lock_irq(&gcwq->lock);
list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
list_del_init(&work->entry);
___queue_work(get_work_cwq(work)->wq, gcwq, work);
}
spin_unlock_irq(&gcwq->lock);
- put_cpu();
+ put_cpu_light();
}
static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,

View File

@ -1,28 +0,0 @@
From b6236cc57ae8c04321374b19c836dd6cd0311577 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 4 Nov 2011 18:58:24 +0100
Subject: [211/256] hotplug-stuff.patch
Do not take lock for non handled cases (might be atomic context)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/workqueue.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d3da5a8..bc867e8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3215,6 +3215,11 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
new_worker = create_worker(gcwq, false);
if (!new_worker)
return NOTIFY_BAD;
+ case CPU_UP_CANCELED:
+ case CPU_ONLINE:
+ break;
+ default:
+ return notifier_from_errno(0);
}
/* some are called w/ irq disabled, don't disturb irq status */

View File

@ -1,37 +0,0 @@
From 2c1ab83538ba04ad97c40e6901264116946bd2e6 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
Subject: [212/256] debugobjects-rt.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 0ab9ae8..84c2f50 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -310,7 +310,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (preempt_count() == 0 && !irqs_disabled())
+#endif
+ fill_pool();
db = get_bucket((unsigned long) addr);
@@ -1053,9 +1056,9 @@ static int __init debug_objects_replace_static_objects(void)
}
}
+ local_irq_enable();
printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
obj_pool_used);
- local_irq_enable();
return 0;
free:
hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {

View File

@ -1,85 +0,0 @@
From b059c158cfd24162673d862d1d59700da17b74f9 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:43:07 +0200
Subject: [216/256] console-make-rt-friendly.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/printk.c | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
diff --git a/kernel/printk.c b/kernel/printk.c
index 9a27a94..28baa36 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -507,6 +507,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
+ migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
@@ -515,6 +516,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
(con->flags & CON_ANYTIME)))
con->write(con, &LOG_BUF(start), end - start);
}
+ migrate_enable();
}
#ifdef CONFIG_EARLY_PRINTK
@@ -839,12 +841,18 @@ static inline int can_use_console(unsigned int cpu)
* interrupts disabled. It should return with 'lockbuf_lock'
* released but interrupts still disabled.
*/
-static int console_trylock_for_printk(unsigned int cpu)
+static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
__releases(&logbuf_lock)
{
int retval = 0, wake = 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
+ !preempt_count();
+#else
+ int lock = 1;
+#endif
- if (console_trylock()) {
+ if (lock && console_trylock()) {
retval = 1;
/*
@@ -1021,8 +1029,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* will release 'logbuf_lock' regardless of whether it
* actually gets the semaphore or not.
*/
- if (console_trylock_for_printk(this_cpu))
+ if (console_trylock_for_printk(this_cpu, flags)) {
+#ifndef CONFIG_PREEMPT_RT_FULL
console_unlock();
+#else
+ raw_local_irq_restore(flags);
+ console_unlock();
+ raw_local_irq_save(flags);
+#endif
+ }
lockdep_on();
out_restore_irqs:
@@ -1345,11 +1360,16 @@ again:
_con_start = con_start;
_log_end = log_end;
con_start = log_end; /* Flush */
+#ifndef CONFIG_PREEMPT_RT_FULL
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(_con_start, _log_end);
start_critical_timings();
local_irq_restore(flags);
+#else
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ call_console_drivers(_con_start, _log_end);
+#endif
}
console_locked = 0;

View File

@ -1,55 +0,0 @@
From c701c74b786949d0f2b02212ae0bd18800818a1b Mon Sep 17 00:00:00 2001
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Wed, 16 May 2012 18:09:36 -0700
Subject: [217/256] fix printk flush of messages
Updates console-make-rt-friendly.patch
#ifdef CONFIG_PREEMPT_RT_FULL, printk() output is never flushed by
printk() because:
# some liberties taken in this pseudo-code to make it easier to follow
printk()
vprintk()
raw_spin_lock(&logbuf_lock)
# increment preempt_count():
preempt_disable()
result = console_trylock_for_printk()
retval = 0
# lock will always be false, because preempt_count() will be >= 1
lock = ... && !preempt_count()
if (lock)
retval = 1
return retval
# result will always be false since lock will always be false
if (result)
console_unlock()
# this is where the printk() output would be flushed
On system boot some printk() output is flushed because register_console()
and tty_open() call console_unlock().
This change also fixes the problem that was previously fixed by
preempt-rt-allow-immediate-magic-sysrq-output-for-preempt_rt_full.patch
Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
Cc: Frank <Frank_Rowand@sonyusa.com>
Link: http://lkml.kernel.org/r/4FB44FD0.4090800@am.sony.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/printk.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/printk.c b/kernel/printk.c
index 28baa36..7109711 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -847,7 +847,7 @@ static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
int retval = 0, wake = 0;
#ifdef CONFIG_PREEMPT_RT_FULL
int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
- !preempt_count();
+ (preempt_count() <= 1);
#else
int lock = 1;
#endif

View File

@ -1,26 +0,0 @@
From 74ab33822266a1afaf9c656a8d5a59355df989d7 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 7 Jun 2012 07:47:08 -0400
Subject: [218/256] power-use-generic-rwsem-on-rt
---
arch/powerpc/Kconfig | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index feab3ba..981d71f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
config RWSEM_GENERIC_SPINLOCK
bool
+ default y if PREEMPT_RT_FULL
config RWSEM_XCHGADD_ALGORITHM
bool
- default y
+ default y if !PREEMPT_RT_FULL
config GENERIC_LOCKBREAK
bool

View File

@ -1,113 +0,0 @@
From 123b09bb43cd35bb64a91c8695a71df65d1e0f78 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 12 Nov 2011 14:00:48 +0100
Subject: [230/256] scsi-fcoe-rt-aware.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/scsi/fcoe/fcoe.c | 16 ++++++++--------
drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++--
drivers/scsi/libfc/fc_exch.c | 4 ++--
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 335e851..7f791b9 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1222,7 +1222,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
- unsigned targ_cpu = get_cpu();
+ unsigned targ_cpu = get_cpu_light();
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@@ -1278,7 +1278,7 @@ static void fcoe_percpu_thread_destroy(unsigned int cpu)
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
- put_cpu();
+ put_cpu_light();
#else
/*
* This a non-SMP scenario where the singular Rx thread is
@@ -1494,11 +1494,11 @@ err2:
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- int rc;
+ int rc, cpu = get_cpu_light();
- fps = &get_cpu_var(fcoe_percpu);
+ fps = &per_cpu(fcoe_percpu, cpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- put_cpu_var(fcoe_percpu);
+ put_cpu_light();
return rc;
}
@@ -1738,7 +1738,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu_light());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
@@ -1770,13 +1770,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
- put_cpu();
+ put_cpu_light();
fc_exch_recv(lport, fp);
return;
}
drop:
stats->ErrorFrames++;
- put_cpu();
+ put_cpu_light();
kfree_skb(skb);
}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 249a106..753fcb9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -719,7 +719,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long sel_time = 0;
struct fcoe_dev_stats *stats;
- stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
+ stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu_light());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
@@ -752,7 +752,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
- put_cpu();
+ put_cpu_light();
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
fip->sel_time = sel_time;
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index aceffad..fb4e6ce 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -724,10 +724,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = get_cpu();
+ cpu = get_cpu_light();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
- put_cpu();
+ put_cpu_light();
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {

View File

@ -1,73 +0,0 @@
From 818f63a2fd1445b9723502079135aa8101486de1 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 1 Mar 2012 13:55:28 -0500
Subject: [235/256] timer: Fix hotplug for -rt
Revert the RT patch:
Author: Ingo Molnar <mingo@elte.hu>
Date: Fri Jul 3 08:30:32 2009 -0500
timers: fix timer hotplug on -rt
Here we are in the CPU_DEAD notifier, and we must not sleep nor
enable interrupts.
There's no problem with sleeping in this notifier.
But the get_cpu_var() had to be converted to a get_local_var().
Replace the previous fix with the get_local_var() convert.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190344.948157137@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/timer.c | 16 +++++-----------
1 file changed, 5 insertions(+), 11 deletions(-)
diff --git a/kernel/timer.c b/kernel/timer.c
index 935f46d..5413ff6 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1793,21 +1793,17 @@ static void __cpuinit migrate_timers(int cpu)
{
struct tvec_base *old_base;
struct tvec_base *new_base;
- unsigned long flags;
int i;
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
- new_base = get_cpu_var(tvec_bases);
+ new_base = get_local_var(tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- local_irq_save(flags);
- while (!spin_trylock(&new_base->lock))
- cpu_relax();
- while (!spin_trylock(&old_base->lock))
- cpu_relax();
+ spin_lock_irq(&new_base->lock);
+ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
BUG_ON(old_base->running_timer);
@@ -1821,10 +1817,8 @@ static void __cpuinit migrate_timers(int cpu)
}
spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
- local_irq_restore(flags);
-
- put_cpu_var(tvec_bases);
+ spin_unlock_irq(&new_base->lock);
+ put_local_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -1,45 +0,0 @@
From 5f34c2fecd000fa56735d08a735a1ae2e4d41f9c Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 1 Mar 2012 13:55:29 -0500
Subject: [236/256] futex/rt: Fix possible lockup when taking pi_lock in proxy
handler
When taking the pi_lock, we must disable interrupts because the
pi_lock can also be taken in an interrupt handler.
Use raw_spin_lock_irq() instead of raw_spin_lock().
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.165160680@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/rtmutex.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 921c90b..3bff726 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1365,14 +1365,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* PI_REQUEUE_INPROGRESS, so that if the task is waking up
* it will know that we are in the process of requeuing it.
*/
- raw_spin_lock(&task->pi_lock);
+ raw_spin_lock_irq(&task->pi_lock);
if (task->pi_blocked_on) {
- raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
raw_spin_unlock(&lock->wait_lock);
return -EAGAIN;
}
task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
- raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
#endif
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);

View File

@ -1,35 +0,0 @@
From d1cd4f9d663188da0abaca7e35186955434030c5 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 1 Mar 2012 13:55:32 -0500
Subject: [237/256] ring-buffer/rt: Check for irqs disabled before grabbing
reader lock
In RT the reader lock is a mutex and we can not grab it when preemption is
disabled. The in_atomic() check that is there does not check if irqs are
disabled. Add that check as well.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.786365803@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/trace/ring_buffer.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 24efd16..e59be41 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1022,7 +1022,7 @@ static inline int ok_to_lock(void)
if (in_nmi())
return 0;
#ifdef CONFIG_PREEMPT_RT_FULL
- if (in_atomic())
+ if (in_atomic() || irqs_disabled())
return 0;
#endif
return 1;

View File

@ -1,109 +0,0 @@
From 3a56741dc30e11b2df958377c2bc9a6f603ef990 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 1 Mar 2012 13:55:30 -0500
Subject: [239/256] lglock/rt: Use non-rt for_each_cpu() in -rt code
Currently the RT version of the lglocks() does a for_each_online_cpu()
in the name##_global_lock_online() functions. Non-rt uses its own
mask for this, and for good reason.
A task may grab a *_global_lock_online(), and in the mean time, one
of the CPUs goes offline. Now when that task does a *_global_unlock_online()
it releases all the locks *except* the one that went offline.
Now if that CPU were to come back on line, its lock is now owned by a
task that never released it when it should have.
This causes all sorts of fun errors. Like owners of a lock no longer
existing, or sleeping on IO, waiting to be woken up by a task that
happens to be blocked on the lock it never released.
Convert the RT versions to use the lglock specific cpumasks. As once
a CPU comes on line, the mask is set, and never cleared even when the
CPU goes offline. The locks for that CPU will still be taken and released.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/20120301190345.374756214@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/lglock.h | 35 ++++++++++++++++++++++++++++++++---
1 file changed, 32 insertions(+), 3 deletions(-)
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 52b289f..cdfcef3 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -203,9 +203,31 @@
#else /* !PREEMPT_RT_FULL */
#define DEFINE_LGLOCK(name) \
\
- DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
+ DEFINE_SPINLOCK(name##_cpu_lock); \
+ cpumask_t name##_cpus __read_mostly; \
DEFINE_LGLOCK_LOCKDEP(name); \
\
+ static int \
+ name##_lg_cpu_callback(struct notifier_block *nb, \
+ unsigned long action, void *hcpu) \
+ { \
+ switch (action & ~CPU_TASKS_FROZEN) { \
+ case CPU_UP_PREPARE: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_set((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ break; \
+ case CPU_UP_CANCELED: case CPU_DEAD: \
+ spin_lock(&name##_cpu_lock); \
+ cpu_clear((unsigned long)hcpu, name##_cpus); \
+ spin_unlock(&name##_cpu_lock); \
+ } \
+ return NOTIFY_OK; \
+ } \
+ static struct notifier_block name##_lg_cpu_notifier = { \
+ .notifier_call = name##_lg_cpu_callback, \
+ }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -214,6 +236,11 @@
lock = &per_cpu(name##_lock, i); \
rt_mutex_init(lock); \
} \
+ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
+ get_online_cpus(); \
+ for_each_online_cpu(i) \
+ cpu_set(i, name##_cpus); \
+ put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -254,7 +281,8 @@
void name##_global_lock_online(void) { \
int i; \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_online_cpu(i) { \
+ spin_lock(&name##_cpu_lock); \
+ for_each_cpu(i, &name##_cpus) { \
struct rt_mutex *lock; \
lock = &per_cpu(name##_lock, i); \
__rt_spin_lock(lock); \
@@ -265,11 +293,12 @@
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_online_cpu(i) { \
+ for_each_cpu(i, &name##_cpus) { \
struct rt_mutex *lock; \
lock = &per_cpu(name##_lock, i); \
__rt_spin_unlock(lock); \
} \
+ spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\

View File

@ -1,44 +0,0 @@
From efceff18a2779d9e65f0bc83629e9f5ac3988a15 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 22 May 2012 21:15:10 +0200
Subject: [248/256] mips-remove-smp-reserve-lock.patch
Instead of making the lock raw, remove it as it protects nothing.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
arch/mips/cavium-octeon/smp.c | 6 ------
1 file changed, 6 deletions(-)
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 97e7ce9..4b93048 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
extern void fixup_irqs(void);
-static DEFINE_SPINLOCK(smp_reserve_lock);
-
static int octeon_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
if (cpu == 0)
return -EBUSY;
- spin_lock(&smp_reserve_lock);
-
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_irq_disable();
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- spin_unlock(&smp_reserve_lock);
-
return 0;
}

View File

@ -1,29 +0,0 @@
From c5cdefa21e0ebd1e86aace4e07ea1114b840ff00 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt@redhat.com>
Date: Fri, 22 Jun 2012 07:25:14 -0400
Subject: [254/256] spinlock/rt: Include bug.h in spinlock_rt.h to satisfy
BUG() usage
Some archs (powerpc) failed to compile because of the reference to
BUG() in spinlock_rt.h. Although bug.h is included in many headers,
if spinlock.h is included in a C file before that, it will fail to
compile.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
include/linux/spinlock_rt.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 3b555b4..0618387 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -5,6 +5,8 @@
#error Do not include directly. Use spinlock.h
#endif
+#include <linux/bug.h>
+
extern void
__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);

View File

@ -1,40 +0,0 @@
From c5d1b28f48fea29be4499539a40a3259398bc4d1 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jun 2012 19:53:17 +0200
Subject: [255/256] Subject: powerpc: Mark low level irq handlers NO_THREAD
These low level handlers cannot be threaded. Mark them NO_THREAD
Reported-by: leroy christophe <christophe.leroy@c-s.fr>
Tested-by: leroy christophe <christophe.leroy@c-s.fr>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
arch/powerpc/platforms/8xx/m8xx_setup.c | 1 +
arch/powerpc/sysdev/cpm1.c | 1 +
2 files changed, 2 insertions(+)
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 1e12108..806cbbd 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -43,6 +43,7 @@ static irqreturn_t timebase_interrupt(int irq, void *dev)
static struct irqaction tbint_irqaction = {
.handler = timebase_interrupt,
+ .flags = IRQF_NO_THREAD,
.name = "tbint",
};
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index d4fa03f..5e6ff38 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -120,6 +120,7 @@ static irqreturn_t cpm_error_interrupt(int irq, void *dev)
static struct irqaction cpm_error_irqaction = {
.handler = cpm_error_interrupt,
+ .flags = IRQF_NO_THREAD,
.name = "error",
};

View File

@ -1,16 +0,0 @@
From e74823c5171f3a6d4eb4d957105a85ae4848054b Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt@redhat.com>
Date: Fri, 22 Jun 2012 19:44:35 -0400
Subject: [256/256] Linux 3.4.4-rt13 REBASE
---
localversion-rt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/localversion-rt b/localversion-rt
index 700c857..9f7d0bd 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt8
+-rt13

View File

@ -1,19 +1,19 @@
From 8f93e949d590fddc2be4a3cb7c55ed382d6a55a3 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 21 Jul 2009 22:54:51 +0200
Subject: [068/256] acpi: Do not disable interrupts on PREEMPT_RT
Subject: acpi: Do not disable interrupts on PREEMPT_RT
Use the local_irq_*_nort() variants.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/acpi.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d..c1c23d2 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
Index: linux-stable/arch/x86/include/asm/acpi.h
===================================================================
--- linux-stable.orig/arch/x86/include/asm/acpi.h
+++ linux-stable/arch/x86/include/asm/acpi.h
@@ -51,8 +51,8 @@
#define ACPI_ASM_MACROS

View File

@ -1,8 +1,6 @@
From 778005b19875f30c628c68ebc54229ca06619fcd Mon Sep 17 00:00:00 2001
Subject: mm: Fixup all fault handlers to check current->pagefault_disable
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 17 Mar 2011 11:32:28 +0100
Subject: [033/256] mm: Fixup all fault handlers to check
current->pagefault_disable
Necessary for decoupling pagefault disable from preempt count.
@ -23,20 +21,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/powerpc/mm/fault.c | 2 +-
arch/s390/mm/fault.c | 6 ++++--
arch/score/mm/fault.c | 2 +-
arch/sh/mm/fault_32.c | 2 +-
arch/sparc/mm/fault_32.c | 4 ++--
arch/sh/mm/fault.c | 2 +-
arch/sparc/mm/fault_32.c | 2 +-
arch/sparc/mm/fault_64.c | 2 +-
arch/tile/mm/fault.c | 2 +-
arch/um/kernel/trap.c | 2 +-
arch/x86/mm/fault.c | 2 +-
arch/xtensa/mm/fault.c | 2 +-
22 files changed, 27 insertions(+), 24 deletions(-)
22 files changed, 26 insertions(+), 23 deletions(-)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1..0ddf6c0 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -106,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
Index: linux-stable/arch/alpha/mm/fault.c
===================================================================
--- linux-stable.orig/arch/alpha/mm/fault.c
+++ linux-stable/arch/alpha/mm/fault.c
@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns
/* If we're in an interrupt context, or have no user context,
we must not take the fault. */
@ -45,11 +43,11 @@ index 5eecab1..0ddf6c0 100644
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5bb4835..40bd40f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
Index: linux-stable/arch/arm/mm/fault.c
===================================================================
--- linux-stable.orig/arch/arm/mm/fault.c
+++ linux-stable/arch/arm/mm/fault.c
@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -58,11 +56,11 @@ index 5bb4835..40bd40f 100644
goto no_context;
/*
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index f7040a1..623a027 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
Index: linux-stable/arch/avr32/mm/fault.c
===================================================================
--- linux-stable.orig/arch/avr32/mm/fault.c
+++ linux-stable/arch/avr32/mm/fault.c
@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l
* If we're in an interrupt or have no user context, we must
* not take the fault...
*/
@ -72,11 +70,11 @@ index f7040a1..623a027 100644
goto no_context;
local_irq_enable();
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index b4760d8..1c1630e 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -112,7 +112,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
Index: linux-stable/arch/cris/mm/fault.c
===================================================================
--- linux-stable.orig/arch/cris/mm/fault.c
+++ linux-stable/arch/cris/mm/fault.c
@@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str
* user context, we must not take the fault.
*/
@ -84,12 +82,12 @@ index b4760d8..1c1630e 100644
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 331c1e2..6372088 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
retry:
Index: linux-stable/arch/frv/mm/fault.c
===================================================================
--- linux-stable.orig/arch/frv/mm/fault.c
+++ linux-stable/arch/frv/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -98,11 +96,11 @@ index 331c1e2..6372088 100644
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02d29c2..721dbb9 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -88,7 +88,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
Index: linux-stable/arch/ia64/mm/fault.c
===================================================================
--- linux-stable.orig/arch/ia64/mm/fault.c
+++ linux-stable/arch/ia64/mm/fault.c
@@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
@ -111,11 +109,11 @@ index 02d29c2..721dbb9 100644
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 3cdfa9c..1eec8af 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
Index: linux-stable/arch/m32r/mm/fault.c
===================================================================
--- linux-stable.orig/arch/m32r/mm/fault.c
+++ linux-stable/arch/m32r/mm/fault.c
@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user context or are running in an
* atomic region then we must not take the fault..
*/
@ -124,11 +122,11 @@ index 3cdfa9c..1eec8af 100644
goto bad_area_nosemaphore;
/* When running in the kernel we expect faults to occur only to
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 6b020a8..7a4cc68 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -84,7 +84,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
Index: linux-stable/arch/m68k/mm/fault.c
===================================================================
--- linux-stable.orig/arch/m68k/mm/fault.c
+++ linux-stable/arch/m68k/mm/fault.c
@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -136,12 +134,12 @@ index 6b020a8..7a4cc68 100644
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265..621ad49 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
retry:
Index: linux-stable/arch/microblaze/mm/fault.c
===================================================================
--- linux-stable.orig/arch/microblaze/mm/fault.c
+++ linux-stable/arch/microblaze/mm/fault.c
@@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
@ -150,11 +148,11 @@ index c38a265..621ad49 100644
if (kernel_mode(regs))
goto bad_area_nosemaphore;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index c14f6df..3468e69 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
Index: linux-stable/arch/mips/mm/fault.c
===================================================================
--- linux-stable.orig/arch/mips/mm/fault.c
+++ linux-stable/arch/mips/mm/fault.c
@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -163,11 +161,11 @@ index c14f6df..3468e69 100644
goto bad_area_nosemaphore;
retry:
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 90f346f..ff5acf5 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
Index: linux-stable/arch/mn10300/mm/fault.c
===================================================================
--- linux-stable.orig/arch/mn10300/mm/fault.c
+++ linux-stable/arch/mn10300/mm/fault.c
@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -176,11 +174,11 @@ index 90f346f..ff5acf5 100644
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 18162ce..09ecc8a 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
Index: linux-stable/arch/parisc/mm/fault.c
===================================================================
--- linux-stable.orig/arch/parisc/mm/fault.c
+++ linux-stable/arch/parisc/mm/fault.c
@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
unsigned long acc_type;
int fault;
@ -189,11 +187,11 @@ index 18162ce..09ecc8a 100644
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 08ffcf5..6642dff 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
Index: linux-stable/arch/powerpc/mm/fault.c
===================================================================
--- linux-stable.orig/arch/powerpc/mm/fault.c
+++ linux-stable/arch/powerpc/mm/fault.c
@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
@ -202,11 +200,11 @@ index 08ffcf5..6642dff 100644
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4e66860..f7d796f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -283,7 +283,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
Index: linux-stable/arch/s390/mm/fault.c
===================================================================
--- linux-stable.orig/arch/s390/mm/fault.c
+++ linux-stable/arch/s390/mm/fault.c
@@ -286,7 +286,8 @@ static inline int do_exception(struct pt
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
@ -216,7 +214,7 @@ index 4e66860..f7d796f 100644
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -415,7 +416,8 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
@@ -423,7 +424,8 @@ void __kprobes do_asce_exception(struct
unsigned long trans_exc_code;
trans_exc_code = regs->int_parm_long;
@ -226,11 +224,11 @@ index 4e66860..f7d796f 100644
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 47b600e..4c12824 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
Index: linux-stable/arch/score/mm/fault.c
===================================================================
--- linux-stable.orig/arch/score/mm/fault.c
+++ linux-stable/arch/score/mm/fault.c
@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
@ -239,78 +237,11 @@ index 47b600e..4c12824 100644
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index e99b104..8c3ae42 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
down_read(&mm->mmap_sem);
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index df3155a..ffaa99e 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
- goto no_context;
+ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 1fe0429..fbd5e11 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_enabled)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 22e58f5..f683aad 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -355,7 +355,7 @@ static int handle_page_fault(struct pt_regs *regs,
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
*/
- if (in_atomic() || !mm) {
+ if (in_atomic() || !mm || current->pagefault_disabled) {
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index dafc947..a283400 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
* If the fault was during atomic operation, don't take the fault, just
* fail.
*/
- if (in_atomic())
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto out_nosemaphore;
down_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 3ecfd1a..bb891f2 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
Index: linux-stable/arch/sh/mm/fault.c
===================================================================
--- linux-stable.orig/arch/sh/mm/fault.c
+++ linux-stable/arch/sh/mm/fault.c
@@ -445,7 +445,7 @@ asmlinkage void __kprobes do_page_fault(
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
@ -319,11 +250,76 @@ index 3ecfd1a..bb891f2 100644
bad_area_nosemaphore(regs, error_code, address);
return;
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b17885a..cc58b7c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -56,7 +56,7 @@ void do_page_fault(struct pt_regs *regs)
Index: linux-stable/arch/sparc/mm/fault_32.c
===================================================================
--- linux-stable.orig/arch/sparc/mm/fault_32.c
+++ linux-stable/arch/sparc/mm/fault_32.c
@@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_disabled)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Index: linux-stable/arch/sparc/mm/fault_64.c
===================================================================
--- linux-stable.orig/arch/sparc/mm/fault_64.c
+++ linux-stable/arch/sparc/mm/fault_64.c
@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fau
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (in_atomic() || !mm || current->pagefault_enabled)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Index: linux-stable/arch/tile/mm/fault.c
===================================================================
--- linux-stable.orig/arch/tile/mm/fault.c
+++ linux-stable/arch/tile/mm/fault.c
@@ -359,7 +359,7 @@ static int handle_page_fault(struct pt_r
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
*/
- if (in_atomic() || !mm) {
+ if (in_atomic() || !mm || current->pagefault_disabled) {
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
Index: linux-stable/arch/um/kernel/trap.c
===================================================================
--- linux-stable.orig/arch/um/kernel/trap.c
+++ linux-stable/arch/um/kernel/trap.c
@@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr
* If the fault was during atomic operation, don't take the fault, just
* fail.
*/
- if (in_atomic())
+ if (in_atomic() || current->pagefault_disabled)
goto out_nosemaphore;
retry:
Index: linux-stable/arch/x86/mm/fault.c
===================================================================
--- linux-stable.orig/arch/x86/mm/fault.c
+++ linux-stable/arch/x86/mm/fault.c
@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsi
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
- if (unlikely(in_atomic() || !mm)) {
+ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
Index: linux-stable/arch/xtensa/mm/fault.c
===================================================================
--- linux-stable.orig/arch/xtensa/mm/fault.c
+++ linux-stable/arch/xtensa/mm/fault.c
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
/* If we're in an interrupt or have no user
* context, we must not take the fault..
*/

View File

@ -1,7 +1,6 @@
From 4e48dbb83d55f79960fe418e48e37ba72ce33b9a Mon Sep 17 00:00:00 2001
Subject: arm: Allow forced irq threading
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 16 Jul 2011 13:15:20 +0200
Subject: [017/256] arm: Allow forced irq threading
All timer interrupts and the perf interrupt are marked NO_THREAD, so
its safe to allow forced interrupt threading.
@ -11,12 +10,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/arm/Kconfig | 1 +
1 file changed, 1 insertion(+)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -31,6 +31,7 @@
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
Index: linux-stable/arch/arm/Kconfig
===================================================================
--- linux-stable.orig/arch/arm/Kconfig
+++ linux-stable/arch/arm/Kconfig
@@ -40,6 +40,7 @@ config ARM
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
+ select IRQ_FORCED_THREADING
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP

View File

@ -1,8 +1,6 @@
From 58f015923d7d9267139ff8bce42f2f6215db1540 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Sat, 6 Mar 2010 17:47:10 +0100
Subject: [026/256] ARM: AT91: PIT: Remove irq handler when clock event is
unused
Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
Setup and remove the interrupt handler in clock event mode selection.
This avoids calling the (shared) interrupt handler when the device is
@ -10,16 +8,17 @@ not used.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/mach-at91/at91rm9200_time.c | 1 +
arch/arm/mach-at91/at91sam926x_time.c | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40..49aea48 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -130,6 +130,7 @@ clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
Index: linux-stable/arch/arm/mach-at91/at91rm9200_time.c
===================================================================
--- linux-stable.orig/arch/arm/mach-at91/at91rm9200_time.c
+++ linux-stable/arch/arm/mach-at91/at91rm9200_time.c
@@ -130,6 +130,7 @@ clkevt32k_mode(enum clock_event_mode mod
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@ -27,10 +26,10 @@ index 104ca40..49aea48 100644
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index a94758b..dd300f3 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c
===================================================================
--- linux-stable.orig/arch/arm/mach-at91/at91sam926x_time.c
+++ linux-stable/arch/arm/mach-at91/at91sam926x_time.c
@@ -67,7 +67,7 @@ static struct clocksource pit_clk = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@ -40,7 +39,7 @@ index a94758b..dd300f3 100644
/*
* Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
*/
@@ -76,6 +76,8 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
@@ -76,6 +76,8 @@ pit_clkevt_mode(enum clock_event_mode mo
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@ -49,7 +48,7 @@ index a94758b..dd300f3 100644
/* update clocksource counter */
pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
@@ -88,6 +90,7 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
@@ -88,6 +90,7 @@ pit_clkevt_mode(enum clock_event_mode mo
case CLOCK_EVT_MODE_UNUSED:
/* disable irq, leaving the clocksource active */
pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);

View File

@ -1,21 +1,21 @@
From e1833c8a323d39284e16ea1d483a43ea3e210291 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 May 2010 18:29:35 +0200
Subject: [221/256] ARM: at91: tclib: Default to tclib timer for RT
Subject: ARM: at91: tclib: Default to tclib timer for RT
RT is not too happy about the shared timer interrupt in AT91
devices. Default to tclib timer for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/misc/Kconfig | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index eead6ce..ecda1c4 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -72,6 +72,7 @@ config AB8500_PWM
Index: linux-stable/drivers/misc/Kconfig
===================================================================
--- linux-stable.orig/drivers/misc/Kconfig
+++ linux-stable/drivers/misc/Kconfig
@@ -73,6 +73,7 @@ config AB8500_PWM
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
@ -23,7 +23,7 @@ index eead6ce..ecda1c4 100644
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
@@ -104,7 +105,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
@@ -105,7 +106,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
bool "TC Block use 32 KiHz clock"
depends on ATMEL_TCB_CLKSRC

View File

@ -1,7 +1,7 @@
From 98d149e1f5d94d6314e5bbc07dd4bd0b1c2f8684 Mon Sep 17 00:00:00 2001
Subject: preempt-rt: Convert arm boot_lock to raw
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: [018/256] preempt-rt: Convert arm boot_lock to raw
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
@ -27,10 +27,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
5 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 36c3984..77499ea 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
Index: linux-stable/arch/arm/mach-exynos/platsmp.c
===================================================================
--- linux-stable.orig/arch/arm/mach-exynos/platsmp.c
+++ linux-stable/arch/arm/mach-exynos/platsmp.c
@@ -62,7 +62,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
@ -40,7 +40,7 @@ index 36c3984..77499ea 100644
void __cpuinit platform_secondary_init(unsigned int cpu)
{
@@ -82,8 +82,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -82,8 +82,8 @@ void __cpuinit platform_secondary_init(u
/*
* Synchronise with the boot thread.
*/
@ -51,7 +51,7 @@ index 36c3984..77499ea 100644
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -94,7 +94,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -94,7 +94,7 @@ int __cpuinit boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
@ -60,7 +60,7 @@ index 36c3984..77499ea 100644
/*
* The secondary processor is waiting to be released from
@@ -123,7 +123,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -123,7 +123,7 @@ int __cpuinit boot_secondary(unsigned in
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
@ -69,7 +69,7 @@ index 36c3984..77499ea 100644
return -ETIMEDOUT;
}
}
@@ -151,7 +151,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -151,7 +151,7 @@ int __cpuinit boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@ -78,10 +78,10 @@ index 36c3984..77499ea 100644
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index db0117e..87daf5f 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
Index: linux-stable/arch/arm/mach-msm/platsmp.c
===================================================================
--- linux-stable.orig/arch/arm/mach-msm/platsmp.c
+++ linux-stable/arch/arm/mach-msm/platsmp.c
@@ -40,7 +40,7 @@ extern void msm_secondary_startup(void);
*/
volatile int pen_release = -1;
@ -91,7 +91,7 @@ index db0117e..87daf5f 100644
static inline int get_core_count(void)
{
@@ -70,8 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -70,8 +70,8 @@ void __cpuinit platform_secondary_init(u
/*
* Synchronise with the boot thread.
*/
@ -102,7 +102,7 @@ index db0117e..87daf5f 100644
}
static __cpuinit void prepare_cold_cpu(unsigned int cpu)
@@ -108,7 +108,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -108,7 +108,7 @@ int __cpuinit boot_secondary(unsigned in
* set synchronisation state between this boot processor
* and the secondary one
*/
@ -111,7 +111,7 @@ index db0117e..87daf5f 100644
/*
* The secondary processor is waiting to be released from
@@ -142,7 +142,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -142,7 +142,7 @@ int __cpuinit boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@ -120,11 +120,11 @@ index db0117e..87daf5f 100644
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index deffbf1..81ca676 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -34,7 +34,7 @@
Index: linux-stable/arch/arm/mach-omap2/omap-smp.c
===================================================================
--- linux-stable.orig/arch/arm/mach-omap2/omap-smp.c
+++ linux-stable/arch/arm/mach-omap2/omap-smp.c
@@ -42,7 +42,7 @@
/* SCU base address */
static void __iomem *scu_base;
@ -133,7 +133,7 @@ index deffbf1..81ca676 100644
void __iomem *omap4_get_scu_base(void)
{
@@ -65,8 +65,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -73,8 +73,8 @@ void __cpuinit platform_secondary_init(u
/*
* Synchronise with the boot thread.
*/
@ -144,7 +144,7 @@ index deffbf1..81ca676 100644
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -77,7 +77,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -87,7 +87,7 @@ int __cpuinit boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
@ -153,7 +153,7 @@ index deffbf1..81ca676 100644
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
@@ -117,7 +117,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -131,7 +131,7 @@ int __cpuinit boot_secondary(unsigned in
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@ -162,11 +162,11 @@ index deffbf1..81ca676 100644
return 0;
}
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index eff5842..acc9da2 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -58,7 +58,7 @@ static void __iomem *scu_base_addr(void)
Index: linux-stable/arch/arm/mach-ux500/platsmp.c
===================================================================
--- linux-stable.orig/arch/arm/mach-ux500/platsmp.c
+++ linux-stable/arch/arm/mach-ux500/platsmp.c
@@ -56,7 +56,7 @@ static void __iomem *scu_base_addr(void)
return NULL;
}
@ -175,7 +175,7 @@ index eff5842..acc9da2 100644
void __cpuinit platform_secondary_init(unsigned int cpu)
{
@@ -78,8 +78,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -76,8 +76,8 @@ void __cpuinit platform_secondary_init(u
/*
* Synchronise with the boot thread.
*/
@ -186,7 +186,7 @@ index eff5842..acc9da2 100644
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -90,7 +90,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -88,7 +88,7 @@ int __cpuinit boot_secondary(unsigned in
* set synchronisation state between this boot processor
* and the secondary one
*/
@ -195,7 +195,7 @@ index eff5842..acc9da2 100644
/*
* The secondary processor is waiting to be released from
@@ -111,7 +111,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -109,7 +109,7 @@ int __cpuinit boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
@ -204,11 +204,11 @@ index eff5842..acc9da2 100644
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 49c7db4..1f7a3d2 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -38,7 +38,7 @@ static void __cpuinit write_pen_release(int val)
Index: linux-stable/arch/arm/plat-versatile/platsmp.c
===================================================================
--- linux-stable.orig/arch/arm/plat-versatile/platsmp.c
+++ linux-stable/arch/arm/plat-versatile/platsmp.c
@@ -38,7 +38,7 @@ static void __cpuinit write_pen_release(
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
@ -217,7 +217,7 @@ index 49c7db4..1f7a3d2 100644
void __cpuinit platform_secondary_init(unsigned int cpu)
{
@@ -58,8 +58,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -58,8 +58,8 @@ void __cpuinit platform_secondary_init(u
/*
* Synchronise with the boot thread.
*/
@ -228,7 +228,7 @@ index 49c7db4..1f7a3d2 100644
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
@ -237,7 +237,7 @@ index 49c7db4..1f7a3d2 100644
/*
* This is really belt and braces; we hold unintended secondary
@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/

View File

@ -1,18 +1,17 @@
From 258667122b870cded6481bb169c1cc4fc28d6913 Mon Sep 17 00:00:00 2001
Subject: arm-disable-highmem-on-rt.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Jul 2011 17:09:28 +0200
Subject: [220/256] arm-disable-highmem-on-rt.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f6fa4db..be61c6e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1725,7 +1725,7 @@ config HAVE_ARCH_PFN_VALID
Index: linux-stable/arch/arm/Kconfig
===================================================================
--- linux-stable.orig/arch/arm/Kconfig
+++ linux-stable/arch/arm/Kconfig
@@ -1737,7 +1737,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"

View File

@ -1,7 +1,6 @@
From a3c959e3bc9ff28e2ed8468d32b36137a7cfcd05 Mon Sep 17 00:00:00 2001
Subject: arm: Mark pmu interupt IRQF_NO_THREAD
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 16 Mar 2011 14:45:31 +0100
Subject: [016/256] arm: Mark pmu interupt IRQF_NO_THREAD
PMU interrupt must not be threaded. Remove IRQF_DISABLED while at it
as we run all handlers with interrupts disabled anyway.
@ -11,11 +10,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/arm/kernel/perf_event.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 186c8cb..b2216b7 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -433,7 +433,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
Index: linux-stable/arch/arm/kernel/perf_event.c
===================================================================
--- linux-stable.orig/arch/arm/kernel/perf_event.c
+++ linux-stable/arch/arm/kernel/perf_event.c
@@ -430,7 +430,7 @@ armpmu_reserve_hardware(struct arm_pmu *
}
err = request_irq(irq, handle_irq,

View File

@ -1,27 +1,26 @@
From b681b3c0f275d25de13484b9235c11b2bd2cac7f Mon Sep 17 00:00:00 2001
Subject: arm-omap-make-wakeupgen_lock-raw.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 11 Apr 2012 11:26:38 +0200
Subject: [019/256] arm-omap-make-wakeupgen_lock-raw.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/mach-omap2/omap-wakeupgen.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 42cd7fb..dbc2914 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -43,7 +43,7 @@
Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c
===================================================================
--- linux-stable.orig/arch/arm/mach-omap2/omap-wakeupgen.c
+++ linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -45,7 +45,7 @@
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
-static DEFINE_SPINLOCK(wakeupgen_lock);
+static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
static unsigned int irq_target_cpu[NR_IRQS];
/*
@@ -128,9 +128,9 @@ static void wakeupgen_mask(struct irq_data *d)
static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS;
@@ -133,9 +133,9 @@ static void wakeupgen_mask(struct irq_da
{
unsigned long flags;
@ -33,7 +32,7 @@ index 42cd7fb..dbc2914 100644
}
/*
@@ -140,9 +140,9 @@ static void wakeupgen_unmask(struct irq_data *d)
@@ -145,9 +145,9 @@ static void wakeupgen_unmask(struct irq_
{
unsigned long flags;
@ -45,7 +44,7 @@ index 42cd7fb..dbc2914 100644
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -183,7 +183,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
@@ -188,7 +188,7 @@ static void wakeupgen_irqmask_all(unsign
{
unsigned long flags;
@ -54,7 +53,7 @@ index 42cd7fb..dbc2914 100644
if (set) {
_wakeupgen_save_masks(cpu);
_wakeupgen_set_all(cpu, WKG_MASK_ALL);
@@ -191,7 +191,7 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
@@ -196,7 +196,7 @@ static void wakeupgen_irqmask_all(unsign
_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
_wakeupgen_restore_masks(cpu);
}

View File

@ -1,22 +1,22 @@
From 8924ef79ece1396ff4aef7027ac8fdf279349515 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt@redhat.com>
Date: Fri, 3 Jul 2009 08:44:29 -0500
Subject: [064/256] ata: Do not disable interrupts in ide code for preempt-rt
Subject: ata: Do not disable interrupts in ide code for preempt-rt
Use the local_irq_*_nort variants.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/ata/libata-sff.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index d8af325..ad3130d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
Index: linux-stable/drivers/ata/libata-sff.c
===================================================================
--- linux-stable.orig/drivers/ata/libata-sff.c
+++ linux-stable/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
unsigned long flags;
unsigned int consumed;
@ -28,7 +28,7 @@ index d8af325..ad3130d 100644
return consumed;
}
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
unsigned long flags;
/* FIXME: use a bounce buffer */
@ -37,7 +37,7 @@ index d8af325..ad3130d 100644
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
do_write);
kunmap_atomic(buf);

View File

@ -1,7 +1,6 @@
From 907830916646eccd2a8170dbfbc8f1f9482c1bf3 Mon Sep 17 00:00:00 2001
Subject: block: Shorten interrupt disabled regions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Jun 2011 19:47:02 +0200
Subject: [006/256] block: Shorten interrupt disabled regions
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
@ -47,11 +46,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
block/blk-core.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 1f61b74..0d947d0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -302,7 +302,11 @@ void __blk_run_queue(struct request_queue *q)
Index: linux-stable/block/blk-core.c
===================================================================
--- linux-stable.orig/block/blk-core.c
+++ linux-stable/block/blk-core.c
@@ -304,7 +304,11 @@ void __blk_run_queue(struct request_queu
{
if (unlikely(blk_queue_stopped(q)))
return;
@ -64,7 +63,7 @@ index 1f61b74..0d947d0 100644
q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -2779,11 +2783,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
@@ -2902,11 +2906,11 @@ static void queue_unplugged(struct reque
* this lock).
*/
if (from_schedule) {
@ -78,7 +77,7 @@ index 1f61b74..0d947d0 100644
}
}
@@ -2809,7 +2813,6 @@ static void flush_plug_callbacks(struct blk_plug *plug)
@@ -2956,7 +2960,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@ -86,7 +85,7 @@ index 1f61b74..0d947d0 100644
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -2830,11 +2833,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -2977,11 +2980,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@ -98,7 +97,7 @@ index 1f61b74..0d947d0 100644
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -2847,7 +2845,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -2994,7 +2992,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@ -107,7 +106,7 @@ index 1f61b74..0d947d0 100644
}
/*
@@ -2874,8 +2872,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -3021,8 +3019,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);

View File

@ -1,30 +1,19 @@
From e3f58ac8f56b9d85886a6feadf63b46e91200290 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: [061/256] bug: BUG_ON/WARN_ON variants dependend on RT/!RT
Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/asm-generic/bug.h | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 2520a6e..0e41ade 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,10 @@
#include <linux/compiler.h>
+#ifndef __ASSEMBLY__
+extern void __WARN_ON(const char *func, const char *file, const int line);
+#endif /* __ASSEMBLY__ */
+
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
@@ -202,4 +206,18 @@ extern void warn_slowpath_null(const char *file, const int line);
---
include/asm-generic/bug.h | 14 ++++++++++++++
1 file changed, 14 insertions(+)
Index: linux-stable/include/asm-generic/bug.h
===================================================================
--- linux-stable.orig/include/asm-generic/bug.h
+++ linux-stable/include/asm-generic/bug.h
@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha
# define WARN_ON_SMP(x) ({0;})
#endif
@ -42,4 +31,6 @@ index 2520a6e..0e41ade 100644
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif

View File

@ -1,23 +1,22 @@
From 961f5156b0d24dcda2e19f2469f85cff6bd8f220 Mon Sep 17 00:00:00 2001
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: [027/256] clocksource: TCLIB: Allow higher clock rates for clock
events
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/clocksource/tcb_clksrc.c | 44 ++++++++++++++++++++++----------------
drivers/misc/Kconfig | 11 ++++++++--
drivers/clocksource/tcb_clksrc.c | 44 +++++++++++++++++++++++----------------
drivers/misc/Kconfig | 11 +++++++--
2 files changed, 35 insertions(+), 20 deletions(-)
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 32cb929..ac0bb2e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
Index: linux-stable/drivers/clocksource/tcb_clksrc.c
===================================================================
--- linux-stable.orig/drivers/clocksource/tcb_clksrc.c
+++ linux-stable/drivers/clocksource/tcb_clksrc.c
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
@ -36,7 +35,7 @@ index 32cb929..ac0bb2e 100644
void __iomem *regs;
};
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
@ -50,7 +49,7 @@ index 32cb929..ac0bb2e 100644
static u32 timer_clock;
static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod
case CLOCK_EVT_MODE_PERIODIC:
clk_enable(tcd->clk);
@ -65,7 +64,7 @@ index 32cb929..ac0bb2e 100644
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod
case CLOCK_EVT_MODE_ONESHOT:
clk_enable(tcd->clk);
@ -74,7 +73,7 @@ index 32cb929..ac0bb2e 100644
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt = {
@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt =
.features = CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
@ -98,7 +97,7 @@ index 32cb929..ac0bb2e 100644
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
@@ -194,11 +193,17 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
@@ -194,11 +193,17 @@ static void __init setup_clkevents(struc
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
@ -133,11 +132,11 @@ index 32cb929..ac0bb2e 100644
return 0;
}
arch_initcall(tcb_clksrc_init);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c779509..504daa3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -87,8 +87,7 @@ config ATMEL_TCB_CLKSRC
Index: linux-stable/drivers/misc/Kconfig
===================================================================
--- linux-stable.orig/drivers/misc/Kconfig
+++ linux-stable/drivers/misc/Kconfig
@@ -88,8 +88,7 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
@ -147,7 +146,7 @@ index c779509..504daa3 100644
config ATMEL_TCB_CLKSRC_BLOCK
int
@@ -102,6 +101,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
@@ -103,6 +102,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.

View File

@ -1,17 +1,16 @@
From 7fa364c6068b2175556853dc3b7e6194030758a3 Mon Sep 17 00:00:00 2001
Subject: cond-resched-lock-rt-tweak.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
Subject: [131/256] cond-resched-lock-rt-tweak.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b76e0c8..a6b9a6d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
Index: linux-stable/include/linux/sched.h
===================================================================
--- linux-stable.orig/include/linux/sched.h
+++ linux-stable/include/linux/sched.h
@@ -2689,7 +2689,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);

View File

@ -1,7 +1,6 @@
From b45ecdef4d57b4179dca4c3360543c1d65f16fbb Mon Sep 17 00:00:00 2001
Subject: cond-resched-softirq-fix.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Jul 2011 09:56:44 +0200
Subject: [130/256] cond-resched-softirq-fix.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -9,11 +8,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/sched/core.c | 2 ++
2 files changed, 6 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d177da5..b76e0c8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2700,12 +2700,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
Index: linux-stable/include/linux/sched.h
===================================================================
--- linux-stable.orig/include/linux/sched.h
+++ linux-stable/include/linux/sched.h
@@ -2700,12 +2700,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@ -30,11 +29,11 @@ index d177da5..b76e0c8 100644
/*
* Does a critical section need to be broken due to another
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dbe9a7d..e88cccc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4605,6 +4605,7 @@ int __cond_resched_lock(spinlock_t *lock)
Index: linux-stable/kernel/sched/core.c
===================================================================
--- linux-stable.orig/kernel/sched/core.c
+++ linux-stable/kernel/sched/core.c
@@ -4832,6 +4832,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@ -42,7 +41,7 @@ index dbe9a7d..e88cccc 100644
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -4618,6 +4619,7 @@ int __sched __cond_resched_softirq(void)
@@ -4845,6 +4846,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);

View File

@ -1,7 +1,6 @@
From c6bd5675cda088dacd4f09bf7a6ebbaafa581589 Mon Sep 17 00:00:00 2001
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 2 Mar 2012 10:36:57 -0500
Subject: [240/256] cpu: Make hotplug.lock a "sleeping" spinlock on RT
Date: Fri, 02 Mar 2012 10:36:57 -0500
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
@ -23,11 +22,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/cpu.c | 35 ++++++++++++++++++++++++++---------
1 file changed, 26 insertions(+), 9 deletions(-)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 37b2212..d79d33a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@ -40,7 +39,7 @@ index 37b2212..d79d33a 100644
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -54,10 +59,22 @@ static struct {
@@ -59,10 +64,22 @@ static struct {
int refcount;
} cpu_hotplug = {
.active_writer = NULL,
@ -63,7 +62,7 @@ index 37b2212..d79d33a 100644
struct hotplug_pcp {
struct task_struct *unplug;
int refcount;
@@ -87,8 +104,8 @@ retry:
@@ -92,8 +109,8 @@ retry:
return;
}
preempt_enable();
@ -74,7 +73,7 @@ index 37b2212..d79d33a 100644
preempt_disable();
goto retry;
}
@@ -161,9 +178,9 @@ void get_online_cpus(void)
@@ -166,9 +183,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
@ -86,7 +85,7 @@ index 37b2212..d79d33a 100644
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -172,10 +189,10 @@ void put_online_cpus(void)
@@ -177,10 +194,10 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
@ -99,7 +98,7 @@ index 37b2212..d79d33a 100644
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -207,11 +224,11 @@ static void cpu_hotplug_begin(void)
@@ -212,11 +229,11 @@ static void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
@ -113,7 +112,7 @@ index 37b2212..d79d33a 100644
schedule();
}
}
@@ -219,7 +236,7 @@ static void cpu_hotplug_begin(void)
@@ -224,7 +241,7 @@ static void cpu_hotplug_begin(void)
static void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;

View File

@ -0,0 +1,550 @@
From: Steven Rostedt <srostedt@redhat.com>
Date: Mon, 16 Jul 2012 08:07:43 +0000
Subject: cpu/rt: Rework cpu down for PREEMPT_RT
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
tasks can be preempted in many more places than in non-RT. In
order to handle per_cpu variables, tasks may be pinned to a CPU
for a while, and even sleep. But these tasks need to be off the CPU
if that CPU is going down.
Several synchronization methods have been tried, but when stressed
they failed. This is a new approach.
A sync_tsk thread is still created and tasks may still block on a
lock when the CPU is going down, but how that works is a bit different.
When cpu_down() starts, it will create the sync_tsk and wait on it
to inform that current tasks that are pinned on the CPU are no longer
pinned. But new tasks that are about to be pinned will still be allowed
to do so at this time.
Then the notifiers are called. Several notifiers will bring down tasks
that will enter these locations. Some of these tasks will take locks
of other tasks that are on the CPU. If we don't let those other tasks
continue, but make them block until CPU down is done, the tasks that
the notifiers are waiting on will never complete as they are waiting
for the locks held by the tasks that are blocked.
Thus we still let the task pin the CPU until the notifiers are done.
After the notifiers run, we then make new tasks entering the pinned
CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
in the hotplug_pcp descriptor.
To help things along, a new function in the scheduler code is created
called migrate_me(). This function will try to migrate the current task
off the CPU this is going down if possible. When the sync_tsk is created,
all tasks will then try to migrate off the CPU going down. There are
several cases that this wont work, but it helps in most cases.
After the notifiers are called and if a task can't migrate off but enters
the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
until the CPU down is complete. Then the scheduler will force the migration
anyway.
Also, I found that THREAD_BOUND need to also be accounted for in the
pinned CPU, and the migrate_disable no longer treats them special.
This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++---------
kernel/sched/core.c | 82 ++++++++++++++++-
3 files changed, 285 insertions(+), 44 deletions(-)
Index: linux-stable/include/linux/sched.h
===================================================================
--- linux-stable.orig/include/linux/sched.h
+++ linux-stable/include/linux/sched.h
@@ -1952,6 +1952,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -1964,6 +1968,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
+static inline int migrate_me(void) { return 0; }
+static inline void tell_sched_cpu_down_begin(int cpu) { }
+static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* Makes the lock keep the task's state */
- spinlock_t lock;
-#else
struct mutex lock; /* Synchronizes accesses to refcount, */
-#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -64,28 +59,46 @@ static struct {
int refcount;
} cpu_hotplug = {
.active_writer = NULL,
-#ifdef CONFIG_PREEMPT_RT_FULL
- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
-#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-#endif
.refcount = 0,
};
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
-#else
-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
-#endif
-
+/**
+ * hotplug_pcp - per cpu hotplug descriptor
+ * @unplug: set when pin_current_cpu() needs to sync tasks
+ * @sync_tsk: the task that waits for tasks to finish pinned sections
+ * @refcount: counter of tasks in pinned sections
+ * @grab_lock: set when the tasks entering pinned sections should wait
+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
+ * @mutex_init: zero if the mutex hasn't been initialized yet.
+ *
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
+ * is used as a flag and still exists after @sync_tsk has exited and
+ * @sync_tsk set to NULL.
+ */
struct hotplug_pcp {
struct task_struct *unplug;
+ struct task_struct *sync_tsk;
int refcount;
+ int grab_lock;
struct completion synced;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spinlock_t lock;
+#else
+ struct mutex mutex;
+#endif
+ int mutex_init;
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
+#else
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
+#endif
+
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
@@ -99,18 +112,40 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
+ int force = 0;
retry:
hp = &__get_cpu_var(hotplug_pcp);
- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
hp->unplug == current || (current->flags & PF_STOMPER)) {
hp->refcount++;
return;
}
- preempt_enable();
- hotplug_lock();
- hotplug_unlock();
+
+ if (hp->grab_lock) {
+ preempt_enable();
+ hotplug_lock(hp);
+ hotplug_unlock(hp);
+ } else {
+ preempt_enable();
+ /*
+ * Try to push this task off of this CPU.
+ */
+ if (!migrate_me()) {
+ preempt_disable();
+ hp = &__get_cpu_var(hotplug_pcp);
+ if (!hp->grab_lock) {
+ /*
+ * Just let it continue it's already pinned
+ * or about to sleep.
+ */
+ force = 1;
+ goto retry;
+ }
+ preempt_enable();
+ }
+ }
preempt_disable();
goto retry;
}
@@ -132,26 +167,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
-/*
- * FIXME: Is this really correct under all circumstances ?
- */
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+}
+
static int sync_unplug_thread(void *data)
{
struct hotplug_pcp *hp = data;
preempt_disable();
hp->unplug = current;
+ wait_for_pinned_cpus(hp);
+
+ /*
+ * This thread will synchronize the cpu_down() with threads
+ * that have pinned the CPU. When the pinned CPU count reaches
+ * zero, we inform the cpu_down code to continue to the next step.
+ */
set_current_state(TASK_UNINTERRUPTIBLE);
- while (hp->refcount) {
- schedule_preempt_disabled();
+ preempt_enable();
+ complete(&hp->synced);
+
+ /*
+ * If all succeeds, the next step will need tasks to wait till
+ * the CPU is offline before continuing. To do this, the grab_lock
+ * is set and tasks going into pin_current_cpu() will block on the
+ * mutex. But we still need to wait for those that are already in
+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
+ * will kick this thread out.
+ */
+ while (!hp->grab_lock && !kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+
+ /* Make sure grab_lock is seen before we see a stale completion */
+ smp_mb();
+
+ /*
+ * Now just before cpu_down() enters stop machine, we need to make
+ * sure all tasks that are in pinned CPU sections are out, and new
+ * tasks will now grab the lock, keeping them from entering pinned
+ * CPU sections.
+ */
+ if (!kthread_should_stop()) {
+ preempt_disable();
+ wait_for_pinned_cpus(hp);
+ preempt_enable();
+ complete(&hp->synced);
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
- preempt_enable();
- complete(&hp->synced);
+
+ /*
+ * Force this thread off this CPU as it's going down and
+ * we don't want any more work on this CPU.
+ */
+ current->flags &= ~PF_THREAD_BOUND;
+ do_set_cpus_allowed(current, cpu_present_mask);
+ migrate_me();
return 0;
}
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
+{
+ wake_up_process(hp->sync_tsk);
+ wait_for_completion(&hp->synced);
+}
+
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
@@ -159,23 +252,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- struct task_struct *tsk;
+ int err;
+
+ /* Protected by cpu_hotplug.lock */
+ if (!hp->mutex_init) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spin_lock_init(&hp->lock);
+#else
+ mutex_init(&hp->mutex);
+#endif
+ hp->mutex_init = 1;
+ }
+
+ /* Inform the scheduler to migrate tasks off this CPU */
+ tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(tsk))
- return (PTR_ERR(tsk));
- kthread_bind(tsk, cpu);
- wake_up_process(tsk);
- wait_for_completion(&hp->synced);
+
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(hp->sync_tsk)) {
+ err = PTR_ERR(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+ return err;
+ }
+ kthread_bind(hp->sync_tsk, cpu);
+
+ /*
+ * Wait for tasks to get out of the pinned sections,
+ * it's still OK if new tasks enter. Some CPU notifiers will
+ * wait for tasks that are going to enter these sections and
+ * we must not have them block.
+ */
+ __cpu_unplug_sync(hp);
+
return 0;
}
+static void cpu_unplug_sync(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ init_completion(&hp->synced);
+ /* The completion needs to be initialzied before setting grab_lock */
+ smp_wmb();
+
+ /* Grab the mutex before setting grab_lock */
+ hotplug_lock(hp);
+ hp->grab_lock = 1;
+
+ /*
+ * The CPU notifiers have been completed.
+ * Wait for tasks to get out of pinned CPU sections and have new
+ * tasks block until the CPU is completely down.
+ */
+ __cpu_unplug_sync(hp);
+
+ /* All done with the sync thread */
+ kthread_stop(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+}
+
static void cpu_unplug_done(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
hp->unplug = NULL;
+ /* Let all tasks know cpu unplug is finished before cleaning up */
+ smp_wmb();
+
+ if (hp->sync_tsk)
+ kthread_stop(hp->sync_tsk);
+
+ if (hp->grab_lock) {
+ hotplug_unlock(hp);
+ /* protected by cpu_hotplug.lock */
+ hp->grab_lock = 0;
+ }
+ tell_sched_cpu_down_done(cpu);
}
void get_online_cpus(void)
@@ -183,9 +336,9 @@ void get_online_cpus(void)
might_sleep();
if (cpu_hotplug.active_writer == current)
return;
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
cpu_hotplug.refcount++;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -194,10 +347,10 @@ void put_online_cpus(void)
{
if (cpu_hotplug.active_writer == current)
return;
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
wake_up_process(cpu_hotplug.active_writer);
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(put_online_cpus);
@@ -229,11 +382,11 @@ static void cpu_hotplug_begin(void)
cpu_hotplug.active_writer = current;
for (;;) {
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
if (likely(!cpu_hotplug.refcount))
break;
__set_current_state(TASK_UNINTERRUPTIBLE);
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
schedule();
}
}
@@ -241,7 +394,7 @@ static void cpu_hotplug_begin(void)
static void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
#else /* #if CONFIG_HOTPLUG_CPU */
@@ -416,6 +569,9 @@ static int __ref _cpu_down(unsigned int
goto out_release;
}
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
+ cpu_unplug_sync(cpu);
+
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
Index: linux-stable/kernel/sched/core.c
===================================================================
--- linux-stable.orig/kernel/sched/core.c
+++ linux-stable/kernel/sched/core.c
@@ -3418,7 +3418,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -3449,7 +3449,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif
@@ -5341,6 +5341,84 @@ void do_set_cpus_allowed(struct task_str
cpumask_copy(&p->cpus_allowed, new_mask);
}
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
+static DEFINE_MUTEX(sched_down_mutex);
+static cpumask_t sched_down_cpumask;
+
+void tell_sched_cpu_down_begin(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_set_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+void tell_sched_cpu_down_done(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+/**
+ * migrate_me - try to move the current task off this cpu
+ *
+ * Used by the pin_current_cpu() code to try to get tasks
+ * to move off the current CPU as it is going down.
+ * It will only move the task if the task isn't pinned to
+ * the CPU (with migrate_disable, affinity or THREAD_BOUND)
+ * and the task has to be in a RUNNING state. Otherwise the
+ * movement of the task will wake it up (change its state
+ * to running) when the task did not expect it.
+ *
+ * Returns 1 if it succeeded in moving the current task
+ * 0 otherwise.
+ */
+int migrate_me(void)
+{
+ struct task_struct *p = current;
+ struct migration_arg arg;
+ struct cpumask *cpumask;
+ struct cpumask *mask;
+ unsigned long flags;
+ unsigned int dest_cpu;
+ struct rq *rq;
+
+ /*
+ * We can not migrate tasks bounded to a CPU or tasks not
+ * running. The movement of the task will wake it up.
+ */
+ if (p->flags & PF_THREAD_BOUND || p->state)
+ return 0;
+
+ mutex_lock(&sched_down_mutex);
+ rq = task_rq_lock(p, &flags);
+
+ cpumask = &__get_cpu_var(sched_cpumasks);
+ mask = &p->cpus_allowed;
+
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
+
+ if (!cpumask_weight(cpumask)) {
+ /* It's only on this CPU? */
+ task_rq_unlock(rq, p, &flags);
+ mutex_unlock(&sched_down_mutex);
+ return 0;
+ }
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
+
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ task_rq_unlock(rq, p, &flags);
+
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ mutex_unlock(&sched_down_mutex);
+
+ return 1;
+}
+
/*
* This is how migration works:
*

View File

@ -1,18 +1,17 @@
From 8ef3bc01ced54510e190912494f202018fe31643 Mon Sep 17 00:00:00 2001
Subject: cpu-rt-variants.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 15:42:38 +0200
Subject: [089/256] cpu-rt-variants.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/smp.h | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 10530d9..04e7ed9 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -219,6 +219,14 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
Index: linux-stable/include/linux/smp.h
===================================================================
--- linux-stable.orig/include/linux/smp.h
+++ linux-stable/include/linux/smp.h
@@ -218,6 +218,14 @@ static inline void kick_all_cpus_sync(vo
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()

View File

@ -1,7 +1,6 @@
From ce185e640e2da8f73059ddd561a8b1ffac0fc91f Mon Sep 17 00:00:00 2001
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Subject: [233/256] cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
We can't deal with the cpumask allocations which happen in atomic
context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
@ -12,11 +11,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lib/Kconfig | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f556832..98c1a17 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -750,7 +750,7 @@ config IOMMU_HELPER
Index: linux-stable/arch/x86/Kconfig
===================================================================
--- linux-stable.orig/arch/x86/Kconfig
+++ linux-stable/arch/x86/Kconfig
@@ -757,7 +757,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
@ -25,11 +24,11 @@ index f556832..98c1a17 100644
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
diff --git a/lib/Kconfig b/lib/Kconfig
index 4a8aba2..4c03fe3 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -303,6 +303,7 @@ config CHECK_SIGNATURE
Index: linux-stable/lib/Kconfig
===================================================================
--- linux-stable.orig/lib/Kconfig
+++ linux-stable/lib/Kconfig
@@ -312,6 +312,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS

View File

@ -0,0 +1,51 @@
Subject: crypto: Make core builtin and init srcu early
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 12 Oct 2012 11:09:19 +0100
When the scru notifier is not initialized before the first user we
crash.
[ 0.281119] BUG: unable to handle kernel NULL pointer dereference at (null)
[ 0.281124] IP: [<ffffffff8108ee6d>] __srcu_read_lock+0x2f/0x79
Make the core code built-in for now and enfore early init.
FIXME: Create a static initializer for this.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
crypto/Kconfig | 2 +-
crypto/api.c | 7 +++++++
2 files changed, 8 insertions(+), 1 deletion(-)
Index: linux-stable/crypto/Kconfig
===================================================================
--- linux-stable.orig/crypto/Kconfig
+++ linux-stable/crypto/Kconfig
@@ -13,7 +13,7 @@ source "crypto/async_tx/Kconfig"
# Cryptographic API Configuration
#
menuconfig CRYPTO
- tristate "Cryptographic API"
+ bool "Cryptographic API"
help
This option provides the core Cryptographic API.
Index: linux-stable/crypto/api.c
===================================================================
--- linux-stable.orig/crypto/api.c
+++ linux-stable/crypto/api.c
@@ -34,6 +34,13 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
struct srcu_notifier_head crypto_chain;
EXPORT_SYMBOL_GPL(crypto_chain);
+static int __init crypto_api_init(void)
+{
+ srcu_init_notifier_head(&crypto_chain);
+ return 0;
+}
+core_initcall(crypto_api_init);
+
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);

View File

@ -0,0 +1,25 @@
Subject: debugobjects-rt.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
Index: linux-stable/lib/debugobjects.c
===================================================================
--- linux-stable.orig/lib/debugobjects.c
+++ linux-stable/lib/debugobjects.c
@@ -309,7 +309,10 @@ __debug_object_init(void *addr, struct d
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (preempt_count() == 0 && !irqs_disabled())
+#endif
+ fill_pool();
db = get_bucket((unsigned long) addr);

View File

@ -1,7 +1,6 @@
From fade21f9f82ea6e0b42364a6e3d49f0450722afb Mon Sep 17 00:00:00 2001
Subject: dm: Make rt aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 14 Nov 2011 23:06:09 +0100
Subject: [232/256] dm: Make rt aware
Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
interrupts legitimately enabled here as we cant deadlock against the
@ -14,11 +13,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/md/dm.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e24143c..ad7d7e3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1648,14 +1648,14 @@ static void dm_request_fn(struct request_queue *q)
Index: linux-stable/drivers/md/dm.c
===================================================================
--- linux-stable.orig/drivers/md/dm.c
+++ linux-stable/drivers/md/dm.c
@@ -1692,14 +1692,14 @@ static void dm_request_fn(struct request
if (map_request(ti, clone, md))
goto requeued;

View File

@ -0,0 +1,27 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
Subject: drivers/net: Use disable_irq_nosync() in 8139too
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/realtek/8139too.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Index: linux-stable/drivers/net/ethernet/realtek/8139too.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/realtek/8139too.c
+++ linux-stable/drivers/net/ethernet/realtek/8139too.c
@@ -2216,7 +2216,7 @@ static void rtl8139_poll_controller(stru
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}

View File

@ -1,7 +1,6 @@
From e8586bc580f0516100fe73e3e838fd746d491f75 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 17 Nov 2009 12:02:43 +0100
Subject: [031/256] drivers: net: at91_ether: Make mdio protection -rt safe
Subject: drivers: net: at91_ether: Make mdio protection -rt safe
Neither the phy interrupt nor the timer callback which updates the
link status in absense of a phy interrupt are taking lp->lock which
@ -14,15 +13,16 @@ Fix this by adding proper locking to at91ether_phy_interrupt() and
at91_check_ether() which serializes the access on -rt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/cadence/at91_ether.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 9061170..6b9e006 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -201,7 +201,9 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
Index: linux-stable/drivers/net/ethernet/cadence/at91_ether.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/cadence/at91_ether.c
+++ linux-stable/drivers/net/ethernet/cadence/at91_ether.c
@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
struct net_device *dev = (struct net_device *) dev_id;
struct at91_private *lp = netdev_priv(dev);
unsigned int phy;
@ -32,22 +32,22 @@ index 9061170..6b9e006 100644
/*
* This hander is triggered on both edges, but the PHY chips expect
* level-triggering. We therefore have to check if the PHY actually has
@@ -243,6 +245,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
done:
disable_mdi();
disable_mdi(lp);
+ spin_unlock_irqrestore(&lp->lock, flags);
return IRQ_HANDLED;
}
@@ -399,9 +402,11 @@ static void at91ether_check_link(unsigned long dev_id)
@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
struct net_device *dev = (struct net_device *) dev_id;
struct at91_private *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
enable_mdi();
enable_mdi(lp);
update_linkspeed(dev, 1);
disable_mdi();
disable_mdi(lp);
+ spin_unlock_irq(&lp->lock);
mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);

View File

@ -1,8 +1,6 @@
From e6535d525046fb8f73575ec9641c213ebefe16c7 Mon Sep 17 00:00:00 2001
From: Darren Hart <dvhltc@us.ibm.com>
Date: Tue, 18 May 2010 14:33:07 -0700
Subject: [030/256] drivers: net: ehea: Make rx irq handler non-threaded
(IRQF_NO_THREAD)
Subject: drivers: net: ehea: Make rx irq handler non-threaded (IRQF_NO_THREAD)
The underlying hardware is edge triggered but presented by XICS as level
triggered. The edge triggered interrupts are not reissued after masking. This
@ -35,15 +33,16 @@ Cc: Michael Ellerman <ellerman@au1.ibm.com>
Cc: Doug Maxey <doug.maxey@us.ibm.com>
LKML-Reference: <4BF30793.5070300@us.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f4d2da0..a4cb742 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1308,7 +1308,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
Index: linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1308,7 +1308,7 @@ static int ehea_reg_interrupts(struct ne
"%s-queue%d", dev->name, i);
ret = ibmebus_request_irq(pr->eq->attr.ist1,
ehea_recv_irq_handler,

View File

@ -1,7 +1,6 @@
From 56d31d400f86dff026047ae500f2d63684b5fb34 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 20 Jun 2009 11:36:54 +0200
Subject: [083/256] drivers/net: fix livelock issues
Subject: drivers/net: fix livelock issues
Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
optimization. The reason is that the softirq thread is rescheduling
@ -11,21 +10,22 @@ monoplize the CPU and livelock on UP systems.
Remove it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
drivers/net/ethernet/neterion/s2io.c | 7 +------
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 7 +++----
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
drivers/net/rionet.c | 6 +-----
7 files changed, 10 insertions(+), 31 deletions(-)
7 files changed, 9 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c92..7cb889c 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2240,11 +2240,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
Index: linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2122,11 +2122,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
}
tpd_req = atl1c_cal_tpd_req(skb);
@ -38,11 +38,11 @@ index 1ef0c92..7cb889c 100644
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b2..cecc414 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1822,8 +1822,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
Index: linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1803,8 +1803,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
@ -52,11 +52,11 @@ index 93ff2b2..cecc414 100644
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47a8435..279c04e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
Index: linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
@ -66,11 +66,11 @@ index 47a8435..279c04e 100644
reclaim_completed_tx(sge, q);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8..ad2f094 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
Index: linux-stable/drivers/net/ethernet/neterion/s2io.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/neterion/s2io.c
+++ linux-stable/drivers/net/ethernet/neterion/s2io.c
@@ -4088,12 +4088,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
@ -84,29 +84,28 @@ index 6338ef8..ad2f094 100644
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 1e38d50..f017954 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2128,10 +2128,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->stats.tx_length_errors++;
return NETDEV_TX_OK;
}
Index: linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2159,10 +2159,8 @@ static int pch_gbe_xmit_frame(struct sk_
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
+
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ff..1afa33c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1606,13 +1606,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
Index: linux-stable/drivers/net/ethernet/tehuti/tehuti.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/tehuti/tehuti.c
+++ linux-stable/drivers/net/ethernet/tehuti/tehuti.c
@@ -1630,13 +1630,8 @@ static netdev_tx_t bdx_tx_transmit(struc
unsigned long flags;
ENTER;
@ -122,13 +121,13 @@ index ad973ff..1afa33c 100644
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 91d2588..d4c418e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -176,11 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u16 destid;
Index: linux-stable/drivers/net/rionet.c
===================================================================
--- linux-stable.orig/drivers/net/rionet.c
+++ linux-stable/drivers/net/rionet.c
@@ -178,11 +178,7 @@ static int rionet_start_xmit(struct sk_b
unsigned long flags;
int add_num = 1;
- local_irq_save(flags);
- if (!spin_trylock(&rnet->tx_lock)) {
@ -137,5 +136,5 @@ index 91d2588..d4c418e 100644
- }
+ spin_lock_irqsave(&rnet->tx_lock, flags);
if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
netif_stop_queue(ndev);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nact;

View File

@ -1,7 +1,6 @@
From ac9c32dccd74d3c6344a4d857e3b0fa028df7249 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 1 Apr 2010 20:20:57 +0200
Subject: [085/256] drivers: net: gianfar: Make RT aware
Subject: drivers: net: gianfar: Make RT aware
The adjust_link() disables interrupts before taking the queue
locks. On RT those locks are converted to "sleeping" locks and
@ -11,15 +10,16 @@ local_irq_save/restore_nort.
Reported-by: Xianghua Xiao <xiaoxianghua@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Xianghua Xiao <xiaoxianghua@gmail.com>
---
drivers/net/ethernet/freescale/gianfar.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed53..3a42d36 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1643,7 +1643,7 @@ void stop_gfar(struct net_device *dev)
Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/freescale/gianfar.c
+++ linux-stable/drivers/net/ethernet/freescale/gianfar.c
@@ -1652,7 +1652,7 @@ void stop_gfar(struct net_device *dev)
/* Lock it down */
@ -28,7 +28,7 @@ index e7bed53..3a42d36 100644
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -1651,7 +1651,7 @@ void stop_gfar(struct net_device *dev)
@@ -1660,7 +1660,7 @@ void stop_gfar(struct net_device *dev)
unlock_rx_qs(priv);
unlock_tx_qs(priv);
@ -37,7 +37,7 @@ index e7bed53..3a42d36 100644
/* Free the IRQs */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2948,7 +2948,7 @@ static void adjust_link(struct net_device *dev)
@@ -2957,7 +2957,7 @@ static void adjust_link(struct net_devic
struct phy_device *phydev = priv->phydev;
int new_state = 0;
@ -46,7 +46,7 @@ index e7bed53..3a42d36 100644
lock_tx_qs(priv);
if (phydev->link) {
@@ -3015,7 +3015,7 @@ static void adjust_link(struct net_device *dev)
@@ -3026,7 +3026,7 @@ static void adjust_link(struct net_devic
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
unlock_tx_qs(priv);

View File

@ -1,22 +1,21 @@
From dd9a1a7b9922f4f3c8fee60720aff0baf8fa150b Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:18 -0500
Subject: [028/256] drivers/net: tulip_remove_one needs to call
pci_disable_device()
Subject: drivers/net: tulip_remove_one needs to call pci_disable_device()
Otherwise the device is not completely shut down.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/dec/tulip/tulip_core.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641..d9a5fe0 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1946,6 +1946,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
Index: linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1948,6 +1948,7 @@ static void __devexit tulip_remove_one (
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);

View File

@ -1,7 +1,6 @@
From 235883690f6e8ec9f7b95aa9d14e4131ad511090 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 3 Jul 2009 08:30:00 -0500
Subject: [084/256] drivers/net: vortex fix locking issues
Subject: drivers/net: vortex fix locking issues
Argh, cut and paste wasn't enough...
@ -17,15 +16,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 file changed, 4 insertions(+), 4 deletions(-)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
drivers/net/ethernet/3com/3c59x.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index e463d10..848aeea 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -843,9 +843,9 @@ static void poll_vortex(struct net_device *dev)
Index: linux-stable/drivers/net/ethernet/3com/3c59x.c
===================================================================
--- linux-stable.orig/drivers/net/ethernet/3com/3c59x.c
+++ linux-stable/drivers/net/ethernet/3com/3c59x.c
@@ -843,9 +843,9 @@ static void poll_vortex(struct net_devic
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
@ -37,7 +33,7 @@ index e463d10..848aeea 100644
}
#endif
@@ -1920,12 +1920,12 @@ static void vortex_tx_timeout(struct net_device *dev)
@@ -1920,12 +1920,12 @@ static void vortex_tx_timeout(struct net
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;

View File

@ -1,21 +1,21 @@
From f8d2d716a11367c1961fb00e19123380a9d48ee4 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
Subject: [025/256] drivers: random: Reduce preempt disabled region
Subject: drivers: random: Reduce preempt disabled region
No need to keep preemption disabled across the whole function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/char/random.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ec04a7..1e37cf4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -633,8 +633,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
Index: linux-stable/drivers/char/random.c
===================================================================
--- linux-stable.orig/drivers/char/random.c
+++ linux-stable/drivers/char/random.c
@@ -679,9 +679,12 @@ static void add_timer_randomness(struct
preempt_disable();
/* if over the trickle threshold, use only 1 in 4096 samples */
if (input_pool.entropy_count > trickle_thresh &&
@ -25,11 +25,12 @@ index 4ec04a7..1e37cf4 100644
+ preempt_enable();
+ return;
+ }
+ preempt_enable();
sample.jiffies = jiffies;
@@ -680,8 +683,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.cycles = get_cycles();
sample.num = num;
@@ -722,8 +725,6 @@ static void add_timer_randomness(struct
credit_entropy_bits(&input_pool,
min_t(int, fls(delta>>1), 11));
}

View File

@ -1,19 +1,19 @@
From e447d6e43e11faf76069bfabd1d43c1a55bc57aa Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:01 -0500
Subject: [193/256] serial: 8250: Call flush_to_ldisc when the irq is threaded
Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
drivers/tty/serial/8250/8250.c | 2 ++
drivers/tty/tty_buffer.c | 4 ++++
2 files changed, 6 insertions(+)
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 6ede5a7..ff93c70 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -1584,12 +1584,14 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
Index: linux-stable/drivers/tty/serial/8250/8250.c
===================================================================
--- linux-stable.orig/drivers/tty/serial/8250/8250.c
+++ linux-stable/drivers/tty/serial/8250/8250.c
@@ -1549,12 +1549,14 @@ static irqreturn_t serial8250_interrupt(
l = l->next;
@ -28,11 +28,11 @@ index 6ede5a7..ff93c70 100644
} while (l != end);
spin_unlock(&i->lock);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 6c9b7cd..a56c223 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -493,10 +493,14 @@ void tty_flip_buffer_push(struct tty_struct *tty)
Index: linux-stable/drivers/tty/tty_buffer.c
===================================================================
--- linux-stable.orig/drivers/tty/tty_buffer.c
+++ linux-stable/drivers/tty/tty_buffer.c
@@ -538,10 +538,14 @@ void tty_flip_buffer_push(struct tty_str
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);

View File

@ -1,19 +1,19 @@
From 6503c468cedb29732e4dd282219e3d311837e031 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:01 -0500
Subject: [192/256] serial: 8250: Clean up the locking for -rt
Subject: serial: 8250: Clean up the locking for -rt
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/8250/8250.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index d537431..6ede5a7 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -2808,14 +2808,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
Index: linux-stable/drivers/tty/serial/8250/8250.c
===================================================================
--- linux-stable.orig/drivers/tty/serial/8250/8250.c
+++ linux-stable/drivers/tty/serial/8250/8250.c
@@ -2773,14 +2773,10 @@ serial8250_console_write(struct console
touch_nmi_watchdog();
@ -32,7 +32,7 @@ index d537431..6ede5a7 100644
/*
* First save the IER then disable the interrupts
@@ -2847,8 +2843,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
@@ -2812,8 +2808,7 @@ serial8250_console_write(struct console
serial8250_modem_status(up);
if (locked)

View File

@ -1,18 +1,17 @@
From 3cbc89cb4588fb756ae01316e2652119eb6dd5fc Mon Sep 17 00:00:00 2001
Subject: drivers-tty-fix-omap-lock-crap.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
Subject: [194/256] drivers-tty-fix-omap-lock-crap.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/omap-serial.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index d00b38e..f697492 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1064,13 +1064,10 @@ serial_omap_console_write(struct console *co, const char *s,
Index: linux-stable/drivers/tty/serial/omap-serial.c
===================================================================
--- linux-stable.orig/drivers/tty/serial/omap-serial.c
+++ linux-stable/drivers/tty/serial/omap-serial.c
@@ -1082,13 +1082,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(&up->pdev->dev);
@ -29,7 +28,7 @@ index d00b38e..f697492 100644
/*
* First save the IER then disable the interrupts
@@ -1099,8 +1096,7 @@ serial_omap_console_write(struct console *co, const char *s,
@@ -1117,8 +1114,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(&up->pdev->dev);
pm_runtime_put_autosuspend(&up->pdev->dev);
if (locked)

View File

@ -1,32 +1,31 @@
From 5f55e0bba8030011d082892e2c5c789795450715 Mon Sep 17 00:00:00 2001
Subject: early-printk-consolidate.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 23 Jul 2011 11:04:08 +0200
Subject: [057/256] early-printk-consolidate.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/kernel/early_printk.c | 17 +++--------------
arch/blackfin/kernel/early_printk.c | 2 --
arch/microblaze/kernel/early_printk.c | 26 ++++----------------------
arch/mips/kernel/early_printk.c | 10 ++++------
arch/mips/kernel/early_printk.c | 11 +++++------
arch/powerpc/kernel/udbg.c | 6 ++----
arch/sh/kernel/sh_bios.c | 2 --
arch/sparc/kernel/setup_32.c | 1 +
arch/sparc/kernel/setup_64.c | 8 +++++++-
arch/tile/kernel/early_printk.c | 26 ++++----------------------
arch/tile/kernel/early_printk.c | 27 +++++----------------------
arch/um/kernel/early_printk.c | 8 +++++---
arch/unicore32/kernel/early_printk.c | 12 ++++--------
arch/x86/kernel/early_printk.c | 21 ++-------------------
include/linux/console.h | 1 +
include/linux/printk.h | 5 +++++
include/linux/printk.h | 6 ++++++
kernel/printk.c | 30 +++++++++++++++++++++++-------
15 files changed, 65 insertions(+), 110 deletions(-)
15 files changed, 68 insertions(+), 110 deletions(-)
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
index 85aa2b2..4307653 100644
--- a/arch/arm/kernel/early_printk.c
+++ b/arch/arm/kernel/early_printk.c
@@ -29,28 +29,17 @@ static void early_console_write(struct console *con, const char *s, unsigned n)
Index: linux-stable/arch/arm/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/arm/kernel/early_printk.c
+++ linux-stable/arch/arm/kernel/early_printk.c
@@ -29,28 +29,17 @@ static void early_console_write(struct c
early_write(s, n);
}
@ -58,11 +57,11 @@ index 85aa2b2..4307653 100644
return 0;
}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 84ed837..61fbd2d 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_init(unsigned int port,
Index: linux-stable/arch/blackfin/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/blackfin/kernel/early_printk.c
+++ linux-stable/arch/blackfin/kernel/early_printk.c
@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_
extern struct console *bfin_jc_early_init(void);
#endif
@ -71,10 +70,10 @@ index 84ed837..61fbd2d 100644
/* Default console */
#define DEFAULT_PORT 0
#define DEFAULT_CFLAG CS8|B57600
diff --git a/arch/microblaze/kernel/early_printk.c b/arch/microblaze/kernel/early_printk.c
index aba1f9a..b099a86 100644
--- a/arch/microblaze/kernel/early_printk.c
+++ b/arch/microblaze/kernel/early_printk.c
Index: linux-stable/arch/microblaze/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/microblaze/kernel/early_printk.c
+++ linux-stable/arch/microblaze/kernel/early_printk.c
@@ -21,7 +21,6 @@
#include <asm/setup.h>
#include <asm/prom.h>
@ -83,7 +82,7 @@ index aba1f9a..b099a86 100644
static u32 base_addr;
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -109,27 +108,11 @@ static struct console early_serial_uart16550_console = {
@@ -109,27 +108,11 @@ static struct console early_serial_uart1
};
#endif /* CONFIG_SERIAL_8250_CONSOLE */
@ -141,11 +140,19 @@ index aba1f9a..b099a86 100644
- early_console_initialized = 0;
+ early_console = NULL;
}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9ae813e..973c995 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -25,20 +25,18 @@ early_console_write(struct console *con, const char *s, unsigned n)
Index: linux-stable/arch/mips/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/mips/kernel/early_printk.c
+++ linux-stable/arch/mips/kernel/early_printk.c
@@ -8,6 +8,7 @@
* written by Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/console.h>
+#include <linux/printk.h>
#include <linux/init.h>
#include <asm/setup.h>
@@ -25,20 +26,18 @@ early_console_write(struct console *con,
}
}
@ -170,10 +177,10 @@ index 9ae813e..973c995 100644
- register_console(&early_console);
+ register_console(&early_console_prom);
}
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index c39c1ca..8b00aab 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
Index: linux-stable/arch/powerpc/kernel/udbg.c
===================================================================
--- linux-stable.orig/arch/powerpc/kernel/udbg.c
+++ linux-stable/arch/powerpc/kernel/udbg.c
@@ -179,15 +179,13 @@ static struct console udbg_console = {
.index = 0,
};
@ -191,7 +198,7 @@ index c39c1ca..8b00aab 100644
return;
if (!udbg_putc)
@@ -197,7 +195,7 @@ void __init register_early_udbg_console(void)
@@ -197,7 +195,7 @@ void __init register_early_udbg_console(
printk(KERN_INFO "early console immortal !\n");
udbg_console.flags &= ~CON_BOOT;
}
@ -200,10 +207,10 @@ index c39c1ca..8b00aab 100644
register_console(&udbg_console);
}
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 47475cc..a5b51b9 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
Index: linux-stable/arch/sh/kernel/sh_bios.c
===================================================================
--- linux-stable.orig/arch/sh/kernel/sh_bios.c
+++ linux-stable/arch/sh/kernel/sh_bios.c
@@ -144,8 +144,6 @@ static struct console bios_console = {
.index = -1,
};
@ -213,23 +220,23 @@ index 47475cc..a5b51b9 100644
static int __init setup_early_printk(char *buf)
{
int keep_early = 0;
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d444468..a000aa5 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -221,6 +221,7 @@ void __init setup_arch(char **cmdline_p)
Index: linux-stable/arch/sparc/kernel/setup_32.c
===================================================================
--- linux-stable.orig/arch/sparc/kernel/setup_32.c
+++ linux-stable/arch/sparc/kernel/setup_32.c
@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
+ early_console = &prom_early_console;
register_console(&prom_early_console);
/* Set sparc_cpu_model */
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 1414d16..8b37e5a 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -487,6 +487,12 @@ static void __init init_sparc64_elf_hwcap(void)
printk("ARCH: ");
Index: linux-stable/arch/sparc/kernel/setup_64.c
===================================================================
--- linux-stable.orig/arch/sparc/kernel/setup_64.c
+++ linux-stable/arch/sparc/kernel/setup_64.c
@@ -487,6 +487,12 @@ static void __init init_sparc64_elf_hwca
popc_patch();
}
@ -251,11 +258,19 @@ index 1414d16..8b37e5a 100644
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index afb9c9a..ff25220 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -33,25 +33,8 @@ static struct console early_hv_console = {
Index: linux-stable/arch/tile/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/tile/kernel/early_printk.c
+++ linux-stable/arch/tile/kernel/early_printk.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/printk.h>
#include <asm/setup.h>
#include <hv/hypervisor.h>
@@ -33,25 +34,8 @@ static struct console early_hv_console =
};
/* Direct interface for emergencies */
@ -281,7 +296,7 @@ index afb9c9a..ff25220 100644
void early_panic(const char *fmt, ...)
{
va_list ap;
@@ -69,14 +52,13 @@ static int __initdata keep_early;
@@ -69,14 +53,13 @@ static int __initdata keep_early;
static int __init setup_early_printk(char *str)
{
@ -297,7 +312,7 @@ index afb9c9a..ff25220 100644
register_console(early_console);
return 0;
@@ -85,12 +67,12 @@ static int __init setup_early_printk(char *str)
@@ -85,12 +68,12 @@ static int __init setup_early_printk(cha
void __init disable_early_printk(void)
{
early_console_complete = 1;
@ -312,7 +327,7 @@ index afb9c9a..ff25220 100644
} else {
early_printk("keeping early console\n");
}
@@ -98,7 +80,7 @@ void __init disable_early_printk(void)
@@ -98,7 +81,7 @@ void __init disable_early_printk(void)
void warn_early_printk(void)
{
@ -321,11 +336,11 @@ index afb9c9a..ff25220 100644
return;
early_printk("\
Machine shutting down before console output is fully initialized.\n\
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
index ec649bf..183060f 100644
--- a/arch/um/kernel/early_printk.c
+++ b/arch/um/kernel/early_printk.c
@@ -16,7 +16,7 @@ static void early_console_write(struct console *con, const char *s, unsigned int
Index: linux-stable/arch/um/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/um/kernel/early_printk.c
+++ linux-stable/arch/um/kernel/early_printk.c
@@ -16,7 +16,7 @@ static void early_console_write(struct c
um_early_printk(s, n);
}
@ -347,11 +362,11 @@ index ec649bf..183060f 100644
return 0;
}
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
index 3922255..9be0d5d 100644
--- a/arch/unicore32/kernel/early_printk.c
+++ b/arch/unicore32/kernel/early_printk.c
@@ -33,21 +33,17 @@ static struct console early_ocd_console = {
Index: linux-stable/arch/unicore32/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/unicore32/kernel/early_printk.c
+++ linux-stable/arch/unicore32/kernel/early_printk.c
@@ -33,21 +33,17 @@ static struct console early_ocd_console
.index = -1,
};
@ -377,11 +392,11 @@ index 3922255..9be0d5d 100644
if (keep_early)
early_console->flags &= ~CON_BOOT;
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 9b9f18b..d15f575 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -169,25 +169,9 @@ static struct console early_serial_console = {
Index: linux-stable/arch/x86/kernel/early_printk.c
===================================================================
--- linux-stable.orig/arch/x86/kernel/early_printk.c
+++ linux-stable/arch/x86/kernel/early_printk.c
@@ -169,25 +169,9 @@ static struct console early_serial_conso
.index = -1,
};
@ -408,7 +423,7 @@ index 9b9f18b..d15f575 100644
printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
con->name);
return;
@@ -207,9 +191,8 @@ static int __init setup_early_printk(char *buf)
@@ -207,9 +191,8 @@ static int __init setup_early_printk(cha
if (!buf)
return 0;
@ -419,10 +434,10 @@ index 9b9f18b..d15f575 100644
keep = (strstr(buf, "keep") != NULL);
diff --git a/include/linux/console.h b/include/linux/console.h
index 7201ce4..dec7f97 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
Index: linux-stable/include/linux/console.h
===================================================================
--- linux-stable.orig/include/linux/console.h
+++ linux-stable/include/linux/console.h
@@ -133,6 +133,7 @@ struct console {
for (con = console_drivers; con != NULL; con = con->next)
@ -431,17 +446,18 @@ index 7201ce4..dec7f97 100644
extern int add_preferred_console(char *name, int idx, char *options);
extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 0525927..c8d7f7b 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -88,8 +88,13 @@ int no_printk(const char *fmt, ...)
Index: linux-stable/include/linux/printk.h
===================================================================
--- linux-stable.orig/include/linux/printk.h
+++ linux-stable/include/linux/printk.h
@@ -95,8 +95,14 @@ int no_printk(const char *fmt, ...)
return 0;
}
+#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
+void early_vprintk(const char *fmt, va_list ap);
+#else
+static inline __printf(1, 2) __cold
+void early_printk(const char *s, ...) { }
@ -449,11 +465,11 @@ index 0525927..c8d7f7b 100644
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
diff --git a/kernel/printk.c b/kernel/printk.c
index b663c2c..c442606 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -47,13 +47,6 @@
Index: linux-stable/kernel/printk.c
===================================================================
--- linux-stable.orig/kernel/printk.c
+++ linux-stable/kernel/printk.c
@@ -48,13 +48,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
@ -464,17 +480,17 @@ index b663c2c..c442606 100644
-{
-}
-
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
/* printk's without a loglevel use this.. */
@@ -524,6 +517,29 @@ static void __call_console_drivers(unsigned start, unsigned end)
}
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -1232,6 +1225,29 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
+
+static void early_vprintk(const char *fmt, va_list ap)
+void early_vprintk(const char *fmt, va_list ap)
+{
+ if (early_console) {
+ char buf[512];

View File

@ -1,18 +1,17 @@
From e00039cd78706b8e320bb3009c12991e4ec6e303 Mon Sep 17 00:00:00 2001
Subject: epoll.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 8 Jul 2011 16:35:35 +0200
Subject: [206/256] epoll.patch
Date: Fri, 08 Jul 2011 16:35:35 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/eventpoll.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index c0b3c70..39a2364 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -488,12 +488,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
Index: linux-stable/fs/eventpoll.c
===================================================================
--- linux-stable.orig/fs/eventpoll.c
+++ linux-stable/fs/eventpoll.c
@@ -495,12 +495,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{

View File

@ -1,7 +1,6 @@
From aa2098c961cc12184d7df1e87ff12c6020d5f785 Mon Sep 17 00:00:00 2001
Subject: filemap-fix-up.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 18:56:24 +0200
Subject: [036/256] filemap-fix-up.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Wrecked-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
@ -10,11 +9,11 @@ Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org
mm/filemap.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 79c4b2b..51c34c0 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2036,7 +2036,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
Index: linux-stable/mm/filemap.c
===================================================================
--- linux-stable.orig/mm/filemap.c
+++ linux-stable/mm/filemap.c
@@ -1955,7 +1955,7 @@ size_t iov_iter_copy_from_user_atomic(st
char *kaddr;
size_t copied;

View File

@ -1,7 +1,5 @@
From 66eecab296c168e350a2f94e157a443b6243e63e Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 7 Jun 2012 07:46:49 -0400
Subject: [051/256] x86: Do not disable preemption in int3 on 32bit
Subject: x86: Do not disable preemption in int3 on 32bit
Preemption must be disabled before enabling interrupts in do_trap
on x86_64 because the stack in use for int3 and debug is a per CPU
@ -19,15 +17,16 @@ when the stack is on the IST.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
1 file changed, 23 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ff9281f1..0b01977 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -87,9 +87,21 @@ static inline void conditional_sti(struct pt_regs *regs)
Index: linux-stable/arch/x86/kernel/traps.c
===================================================================
--- linux-stable.orig/arch/x86/kernel/traps.c
+++ linux-stable/arch/x86/kernel/traps.c
@@ -87,9 +87,21 @@ static inline void conditional_sti(struc
local_irq_enable();
}
@ -50,7 +49,7 @@ index ff9281f1..0b01977 100644
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
@@ -100,11 +112,13 @@ static inline void conditional_cli(struct pt_regs *regs)
@@ -100,11 +112,13 @@ static inline void conditional_cli(struc
local_irq_disable();
}
@ -65,7 +64,7 @@ index ff9281f1..0b01977 100644
}
static void __kprobes
@@ -226,9 +240,9 @@ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
@@ -225,9 +239,9 @@ dotraplinkage void do_stack_segment(stru
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
return;
@ -77,7 +76,7 @@ index ff9281f1..0b01977 100644
}
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
@@ -320,9 +334,9 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
@@ -327,9 +341,9 @@ dotraplinkage void __kprobes notrace do_
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
@ -89,7 +88,7 @@ index ff9281f1..0b01977 100644
debug_stack_usage_dec();
}
@@ -423,12 +437,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
@@ -430,12 +444,12 @@ dotraplinkage void __kprobes do_debug(st
debug_stack_usage_inc();
/* It's safe to allow irq's after DR6 has been saved */
@ -104,7 +103,7 @@ index ff9281f1..0b01977 100644
debug_stack_usage_dec();
return;
}
@@ -448,7 +462,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
@@ -455,7 +469,7 @@ dotraplinkage void __kprobes do_debug(st
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code);

View File

@ -1,7 +1,6 @@
From 144c78a3b223b226689db343fedd8d47fbd7dc2d Mon Sep 17 00:00:00 2001
Subject: fs-block-rt-support.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jun 2011 17:05:09 +0200
Subject: [199/256] fs-block-rt-support.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -9,11 +8,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
fs/file.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index 0d947d0..f068328 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(blk_delay_queue);
Index: linux-stable/block/blk-core.c
===================================================================
--- linux-stable.orig/block/blk-core.c
+++ linux-stable/block/blk-core.c
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
@ -22,11 +21,11 @@ index 0d947d0..f068328 100644
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
diff --git a/fs/file.c b/fs/file.c
index 6f176f5..9f5343d 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *rcu)
Index: linux-stable/fs/file.c
===================================================================
--- linux-stable.orig/fs/file.c
+++ linux-stable/fs/file.c
@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *r
kfree(fdt->open_fds);
kfree(fdt);
} else {

View File

@ -1,7 +1,6 @@
From ba7ac3a9415b0c09e16f37e5c56fc5cf64aaf71f Mon Sep 17 00:00:00 2001
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 7 Mar 2012 21:00:34 +0100
Subject: [245/256] fs: dcache: Use cpu_chill() in trylock loops
Date: Wed, 07 Mar 2012 21:00:34 +0100
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
@ -16,10 +15,10 @@ Cc: stable-rt@vger.kernel.org
fs/namespace.c | 3 ++-
4 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 908e184..bdd1788 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
Index: linux-stable/fs/autofs4/autofs_i.h
===================================================================
--- linux-stable.orig/fs/autofs4/autofs_i.h
+++ linux-stable/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
@ -28,11 +27,11 @@ index 908e184..bdd1788 100644
#include <asm/current.h>
#include <asm/uaccess.h>
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68e..859badd 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -171,7 +171,7 @@ again:
Index: linux-stable/fs/autofs4/expire.c
===================================================================
--- linux-stable.orig/fs/autofs4/expire.c
+++ linux-stable/fs/autofs4/expire.c
@@ -166,7 +166,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@ -41,10 +40,10 @@ index 1feb68e..859badd 100644
goto relock;
}
spin_unlock(&p->d_lock);
diff --git a/fs/dcache.c b/fs/dcache.c
index b80531c..0801198 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
Index: linux-stable/fs/dcache.c
===================================================================
--- linux-stable.orig/fs/dcache.c
+++ linux-stable/fs/dcache.c
@@ -37,6 +37,7 @@
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
@ -53,7 +52,7 @@ index b80531c..0801198 100644
#include "internal.h"
#include "mount.h"
@@ -472,7 +473,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
@@ -488,7 +489,7 @@ static inline struct dentry *dentry_kill
if (inode && !spin_trylock(&inode->i_lock)) {
relock:
spin_unlock(&dentry->d_lock);
@ -62,7 +61,7 @@ index b80531c..0801198 100644
return dentry; /* try again with same dentry */
}
if (IS_ROOT(dentry))
@@ -858,7 +859,7 @@ relock:
@@ -876,7 +877,7 @@ relock:
if (!spin_trylock(&dentry->d_lock)) {
spin_unlock(&dcache_lru_lock);
@ -71,7 +70,7 @@ index b80531c..0801198 100644
goto relock;
}
@@ -2040,7 +2041,7 @@ again:
@@ -2115,7 +2116,7 @@ again:
if (dentry->d_count == 1) {
if (inode && !spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
@ -80,10 +79,10 @@ index b80531c..0801198 100644
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
diff --git a/fs/namespace.c b/fs/namespace.c
index 6dc617c..02f02ea 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
Index: linux-stable/fs/namespace.c
===================================================================
--- linux-stable.orig/fs/namespace.c
+++ linux-stable/fs/namespace.c
@@ -20,6 +20,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
@ -92,7 +91,7 @@ index 6dc617c..02f02ea 100644
#include "pnode.h"
#include "internal.h"
@@ -315,7 +316,7 @@ int mnt_want_write(struct vfsmount *m)
@@ -313,7 +314,7 @@ int __mnt_want_write(struct vfsmount *m)
smp_mb();
while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) {
preempt_enable();

View File

@ -0,0 +1,31 @@
From: Mike Galbraith <mgalbraith@suse.de>
Date: Wed, 11 Jul 2012 22:05:20 +0000
Subject: fs, jbd: pull your plug when waiting for space
With an -rt kernel, and a heavy sync IO load, tasks can jam
up on journal locks without unplugging, which can lead to
terminal IO starvation. Unplug and schedule when waiting for space.
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Theodore Tso <tytso@mit.edu>
Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/jbd/checkpoint.c | 2 ++
1 file changed, 2 insertions(+)
Index: linux-stable/fs/jbd/checkpoint.c
===================================================================
--- linux-stable.orig/fs/jbd/checkpoint.c
+++ linux-stable/fs/jbd/checkpoint.c
@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou
if (journal->j_flags & JFS_ABORT)
return;
spin_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*

View File

@ -1,8 +1,6 @@
From 2e64cea6269df0581e5f38a53232c0d1b495b6b8 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: [078/256] fs: jbd/jbd2: Make state lock and journal head lock rt
safe
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
bit_spin_locks break under RT.
@ -14,15 +12,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/buffer_head.h | 10 ++++++++++
include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
2 files changed, 34 insertions(+)
---
include/linux/buffer_head.h | 10 ++++++++++
include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
2 files changed, 34 insertions(+)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 5c16cf1..3f8e27b 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
Index: linux-stable/include/linux/buffer_head.h
===================================================================
--- linux-stable.orig/include/linux/buffer_head.h
+++ linux-stable/include/linux/buffer_head.h
@@ -74,6 +74,11 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
@ -35,7 +29,7 @@ index 5c16cf1..3f8e27b 100644
#endif
};
@@ -105,6 +110,11 @@ static inline void buffer_head_init_locks(struct buffer_head *bh)
@@ -105,6 +110,11 @@ static inline void buffer_head_init_lock
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
@ -47,11 +41,11 @@ index 5c16cf1..3f8e27b 100644
#endif
}
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 6230f85..11c313e 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
Index: linux-stable/include/linux/jbd_common.h
===================================================================
--- linux-stable.orig/include/linux/jbd_common.h
+++ linux-stable/include/linux/jbd_common.h
@@ -39,32 +39,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{

View File

@ -1,7 +1,6 @@
From 02c6cb04ae86544743ced9468bb81a8659473acc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 19 Jul 2009 08:44:27 -0500
Subject: [196/256] fs: namespace preemption fix
Subject: fs: namespace preemption fix
On RT we cannot loop with preemption disabled here as
mnt_make_readonly() might have been preempted. We can safely enable
@ -9,15 +8,16 @@ preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
as well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/namespace.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/fs/namespace.c b/fs/namespace.c
index 4e46539..6dc617c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -313,8 +313,11 @@ int mnt_want_write(struct vfsmount *m)
Index: linux-stable/fs/namespace.c
===================================================================
--- linux-stable.orig/fs/namespace.c
+++ linux-stable/fs/namespace.c
@@ -311,8 +311,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();

View File

@ -1,7 +1,6 @@
From 8cd9691b9af403d1373b92eaeaa06ba645cb3e26 Mon Sep 17 00:00:00 2001
From: Mike Galbraith <efault@gmx.de>
Date: Fri, 3 Jul 2009 08:44:12 -0500
Subject: [200/256] fs: ntfs: disable interrupt only on !RT
Subject: fs: ntfs: disable interrupt only on !RT
On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
> * Nick Piggin <nickpiggin@yahoo.com.au> wrote:
@ -35,15 +34,16 @@ instead?
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/ntfs/aops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 4e4ca73..f5d4565 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
Index: linux-stable/fs/ntfs/aops.c
===================================================================
--- linux-stable.orig/fs/ntfs/aops.c
+++ linux-stable/fs/ntfs/aops.c
@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);

View File

@ -1,7 +1,6 @@
From 050653e6f4bca360d267b8e6cfbc85184bcc89bf Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: [077/256] buffer_head: Replace bh_uptodate_lock for -rt
Subject: buffer_head: Replace bh_uptodate_lock for -rt
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
@ -13,11 +12,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 44 insertions(+), 21 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index ad5938c..38956b5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
Index: linux-stable/fs/buffer.c
===================================================================
--- linux-stable.orig/fs/buffer.c
+++ linux-stable/fs/buffer.c
@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@ -27,7 +26,7 @@ index ad5938c..38956b5 100644
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@ -37,7 +36,7 @@ index ad5938c..38956b5 100644
/*
* If none of the buffers had errors and they are all
@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@ -48,7 +47,7 @@ index ad5938c..38956b5 100644
}
/*
@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@ -58,7 +57,7 @@ index ad5938c..38956b5 100644
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
@ -76,7 +75,7 @@ index ad5938c..38956b5 100644
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3187,6 +3179,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
@@ -3177,6 +3169,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@ -84,11 +83,11 @@ index ad5938c..38956b5 100644
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index fa9c05f..4e4ca73 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
Index: linux-stable/fs/ntfs/aops.c
===================================================================
--- linux-stable.orig/fs/ntfs/aops.c
+++ linux-stable/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
@ -98,7 +97,7 @@ index fa9c05f..4e4ca73 100644
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@ -108,7 +107,7 @@ index fa9c05f..4e4ca73 100644
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
unlock_page(page);
return;
still_busy:
@ -119,10 +118,10 @@ index fa9c05f..4e4ca73 100644
}
/**
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 458f497..5c16cf1 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
Index: linux-stable/include/linux/buffer_head.h
===================================================================
--- linux-stable.orig/include/linux/buffer_head.h
+++ linux-stable/include/linux/buffer_head.h
@@ -72,8 +72,42 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */

View File

@ -1,7 +1,6 @@
From e634e6cb2d7783c3387fae3c98f3f96011802330 Mon Sep 17 00:00:00 2001
Subject: ftrace-crap.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 9 Sep 2011 16:55:53 +0200
Subject: [155/256] ftrace-crap.patch
Date: Fri, 09 Sep 2011 16:55:53 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -9,11 +8,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace.h | 1 -
2 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ee744f3..6a00ff4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -414,11 +414,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
Index: linux-stable/kernel/trace/trace.c
===================================================================
--- linux-stable.orig/kernel/trace/trace.c
+++ linux-stable/kernel/trace/trace.c
@@ -402,11 +402,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
*/
void trace_wake_up(void)
{
@ -27,7 +26,7 @@ index ee744f3..6a00ff4 100644
}
static int __init set_buf_size(char *str)
@@ -775,6 +777,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -756,6 +758,12 @@ update_max_tr_single(struct trace_array
}
#endif /* CONFIG_TRACER_MAX_TRACE */
@ -40,7 +39,7 @@ index ee744f3..6a00ff4 100644
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@@ -3303,6 +3311,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
@@ -3365,6 +3373,7 @@ static int tracing_release_pipe(struct i
return 0;
}
@ -48,7 +47,7 @@ index ee744f3..6a00ff4 100644
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
@@ -3324,8 +3333,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
@@ -3386,8 +3395,7 @@ tracing_poll_pipe(struct file *filp, pol
}
}
@ -58,7 +57,7 @@ index ee744f3..6a00ff4 100644
{
DEFINE_WAIT(wait);
@@ -3336,6 +3344,20 @@ void default_wait_pipe(struct trace_iterator *iter)
@@ -3398,6 +3406,20 @@ void default_wait_pipe(struct trace_iter
finish_wait(&trace_wait, &wait);
}
@ -79,11 +78,11 @@ index ee744f3..6a00ff4 100644
/*
* This is a make-shift waitqueue.
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f95d65d..fe96b7c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -354,7 +354,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
Index: linux-stable/kernel/trace/trace.h
===================================================================
--- linux-stable.orig/kernel/trace/trace.h
+++ linux-stable/kernel/trace/trace.h
@@ -367,7 +367,6 @@ void trace_init_global_iter(struct trace
void tracing_iter_reset(struct trace_iterator *iter, int cpu);

View File

@ -1,7 +1,6 @@
From a270094d07da20d6324798c10bdd3e277fa65af4 Mon Sep 17 00:00:00 2001
Subject: ftrace-migrate-disable-tracing.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: [144/256] ftrace-migrate-disable-tracing.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -11,10 +10,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace_output.c | 5 +++++
4 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 176a939..14cac32 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
Index: linux-stable/include/linux/ftrace_event.h
===================================================================
--- linux-stable.orig/include/linux/ftrace_event.h
+++ linux-stable/include/linux/ftrace_event.h
@@ -49,7 +49,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
@ -25,11 +24,11 @@ index 176a939..14cac32 100644
};
#define FTRACE_MAX_EVENT \
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 464a96f..8e79f40 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1179,6 +1179,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
Index: linux-stable/kernel/trace/trace.c
===================================================================
--- linux-stable.orig/kernel/trace/trace.c
+++ linux-stable/kernel/trace/trace.c
@@ -1155,6 +1155,8 @@ tracing_generic_entry_update(struct trac
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@ -38,7 +37,7 @@ index 464a96f..8e79f40 100644
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -1937,9 +1939,10 @@ static void print_lat_help_header(struct seq_file *m)
@@ -1980,9 +1982,10 @@ static void print_lat_help_header(struct
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
@ -52,11 +51,11 @@ index 464a96f..8e79f40 100644
}
static void print_event_info(struct trace_array *tr, struct seq_file *m)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29111da..4c3a7b3 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -116,6 +116,7 @@ static int trace_define_common_fields(void)
Index: linux-stable/kernel/trace/trace_events.c
===================================================================
--- linux-stable.orig/kernel/trace/trace_events.c
+++ linux-stable/kernel/trace/trace_events.c
@@ -116,6 +116,7 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
@ -64,11 +63,11 @@ index 29111da..4c3a7b3 100644
__common_field(int, padding);
return ret;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index df611a0..1b79535 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
Index: linux-stable/kernel/trace/trace_output.c
===================================================================
--- linux-stable.orig/kernel/trace/trace_output.c
+++ linux-stable/kernel/trace/trace_output.c
@@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq
else
ret = trace_seq_putc(s, '.');

View File

@ -1,12 +1,11 @@
From a385ed1641702529dd5906ce0fc1ad203538430c Mon Sep 17 00:00:00 2001
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 7 Jun 2012 07:47:03 -0400
Subject: [173/256] futex: Fix bug on when a requeued RT task times out
Subject: futex: Fix bug on when a requeued RT task times out
Requeue with timeout causes a bug with PREEMPT_RT_FULL.
The bug comes from a timed out condition.
TASK 1 TASK 2
------ ------
futex_wait_requeue_pi()
@ -16,12 +15,13 @@ The bug comes from a timed out condition.
double_lock_hb();
raw_spin_lock(pi_lock);
if (current->pi_blocked_on) {
if (current->pi_blocked_on) {
} else {
current->pi_blocked_on = PI_WAKE_INPROGRESS;
run_spin_unlock(pi_lock);
spin_lock(hb->lock); <-- blocked!
plist_for_each_entry_safe(this) {
rt_mutex_start_proxy_lock();
task_blocks_on_rt_mutex();
@ -47,16 +47,18 @@ appropriately.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
kernel/rtmutex_common.h | 1 +
2 files changed, 32 insertions(+), 1 deletion(-)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 1e98e24..7a96797 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
Index: linux-stable/kernel/rtmutex.c
===================================================================
--- linux-stable.orig/kernel/rtmutex.c
+++ linux-stable/kernel/rtmutex.c
@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
@ -66,7 +68,7 @@ index 1e98e24..7a96797 100644
}
/*
@@ -1002,6 +1003,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
@@ -1002,6 +1003,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
@ -89,24 +91,24 @@ index 1e98e24..7a96797 100644
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+ raw_spin_lock(&task->pi_lock);
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+ raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+ raw_spin_unlock(&task->pi_lock);
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index b43d832..47290ec 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
Index: linux-stable/kernel/rtmutex_common.h
===================================================================
--- linux-stable.orig/kernel/rtmutex_common.h
+++ linux-stable/kernel/rtmutex_common.h
@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)

View File

@ -1,21 +1,21 @@
From 42beaeae2dcd8371ff2e0c229f83a58bc1fc425a Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
Subject: [024/256] generic: Use raw local irq variant for generic cmpxchg
Subject: generic: Use raw local irq variant for generic cmpxchg
No point in tracing those.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/asm-generic/cmpxchg-local.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index 2533fdd..d8d4c89 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
Index: linux-stable/include/asm-generic/cmpxchg-local.h
===================================================================
--- linux-stable.orig/include/asm-generic/cmpxchg-local.h
+++ linux-stable/include/asm-generic/cmpxchg-local.h
@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
@ -24,7 +24,7 @@ index 2533fdd..d8d4c89 100644
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
default:
wrong_size_cmpxchg(ptr);
}
@ -33,7 +33,7 @@ index 2533fdd..d8d4c89 100644
return prev;
}
@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
u64 prev;
unsigned long flags;

View File

@ -1,7 +1,6 @@
From 909da5deefc2d3dcede823c42a91380ea0bec608 Mon Sep 17 00:00:00 2001
Subject: genirq: Add default affinity mask command line option
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 25 May 2012 16:59:47 +0200
Subject: [251/256] genirq: Add default affinity mask command line option
If we isolate CPUs, then we don't want random device interrupts on
them. Even w/o the user space irq balancer enabled we can end up with
@ -15,11 +14,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/irq/irqdesc.c | 21 +++++++++++++++++++--
2 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1e0150e..67d1350 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1126,6 +1126,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Index: linux-stable/Documentation/kernel-parameters.txt
===================================================================
--- linux-stable.orig/Documentation/kernel-parameters.txt
+++ linux-stable/Documentation/kernel-parameters.txt
@@ -1157,6 +1157,15 @@ bytes respectively. Such letter suffixes
See comment before ip2_setup() in
drivers/char/ip2/ip2base.c.
@ -35,10 +34,10 @@ index 1e0150e..67d1350 100644
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302..473b2b6 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
Index: linux-stable/kernel/irq/irqdesc.c
===================================================================
--- linux-stable.orig/kernel/irq/irqdesc.c
+++ linux-stable/kernel/irq/irqdesc.c
@@ -23,10 +23,27 @@
static struct lock_class_key irq_desc_lock_class;

View File

@ -1,21 +1,21 @@
From 21ae6fbad1ee6a562b0b8dd536f6915614e79c5b Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: [081/256] genirq: disable irqpoll on -rt
Subject: genirq: disable irqpoll on -rt
Creates long latencies for no value
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/irq/spurious.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 611cd60..d1c80fa 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
Index: linux-stable/kernel/irq/spurious.c
===================================================================
--- linux-stable.orig/kernel/irq/spurious.c
+++ linux-stable/kernel/irq/spurious.c
@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{

View File

@ -1,7 +1,6 @@
From 314b6e5ffef017ed2493a828cf32f886cb61e850 Mon Sep 17 00:00:00 2001
Subject: genirq-force-threading.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 3 Apr 2011 11:57:29 +0200
Subject: [082/256] genirq-force-threading.patch
Date: Sun, 03 Apr 2011 11:57:29 +0200
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@ -9,11 +8,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/irq/manage.c | 2 ++
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index d2cc272..9027bde 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -394,9 +394,13 @@ static inline int disable_irq_wake(unsigned int irq)
Index: linux-stable/include/linux/interrupt.h
===================================================================
--- linux-stable.orig/include/linux/interrupt.h
+++ linux-stable/include/linux/interrupt.h
@@ -388,9 +388,13 @@ static inline int disable_irq_wake(unsig
#ifdef CONFIG_IRQ_FORCED_THREADING
@ -29,11 +28,11 @@ index d2cc272..9027bde 100644
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 89a3ea8..975b3a1 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -18,6 +18,7 @@
Index: linux-stable/kernel/irq/manage.c
===================================================================
--- linux-stable.orig/kernel/irq/manage.c
+++ linux-stable/kernel/irq/manage.c
@@ -21,6 +21,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
@ -41,7 +40,7 @@ index 89a3ea8..975b3a1 100644
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -26,6 +27,7 @@ static int __init setup_forced_irqthreads(char *arg)
@@ -29,6 +30,7 @@ static int __init setup_forced_irqthread
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);

View File

@ -1,18 +1,17 @@
From caa14a79da63821e3ba4076f7c83c41c5873eefc Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:22:04 +0100
Subject: [079/256] genirq: Disable DEBUG_SHIRQ for rt
Date: Sun, 18 Mar 2011 10:22:04 +0100
Subject: genirq: Disable DEBUG_SHIRQ for rt
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/Kconfig.debug | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6777153..1d80795 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -151,7 +151,7 @@ config DEBUG_KERNEL
Index: linux-stable/lib/Kconfig.debug
===================================================================
--- linux-stable.orig/lib/Kconfig.debug
+++ linux-stable/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_KERNEL
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"

View File

@ -1,11 +1,6 @@
From 1d5690ccc588e836538cfb08ae21f10ce857f488 Mon Sep 17 00:00:00 2001
Subject: hardirq.h: Define softirq_count() as OUL to kill build warning
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Thu, 13 Oct 2011 17:19:09 +0800
Subject: [163/256] hardirq.h: Define softirq_count() as OUL to kill build
warning
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
kernel/lockdep.c: In function print_bad_irq_dependency:
kernel/lockdep.c:1476:3: warning: format %lu expects type long unsigned int, but argument 7 has type unsigned int
@ -27,10 +22,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/hardirq.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index cbd7c99..318d91e 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
Index: linux-stable/include/linux/hardirq.h
===================================================================
--- linux-stable.orig/include/linux/hardirq.h
+++ linux-stable/include/linux/hardirq.h
@@ -84,7 +84,7 @@
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)

View File

@ -1,7 +1,6 @@
From 9831e60c0e5b45404bc3410179206b9fb3e8c3dc Mon Sep 17 00:00:00 2001
Subject: hotplug: Call cpu_unplug_begin() before DOWN_PREPARE
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Sun, 16 Oct 2011 18:56:44 +0800
Subject: [143/256] hotplug: Call cpu_unplug_begin() before DOWN_PREPARE
cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because
at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is
@ -26,11 +25,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/cpu.c | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c8aee90..37b2212 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -337,22 +337,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -383,22 +383,20 @@ static int __ref _cpu_down(unsigned int
return -EBUSY;
}

View File

@ -1,7 +1,6 @@
From 9698a154a5ca400da809937bca83bdc7589df9be Mon Sep 17 00:00:00 2001
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
Subject: [138/256] hotplug: Lightweight get online cpus
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
@ -13,15 +12,15 @@ tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 4 ++
kernel/cpu.c | 127 +++++++++++++++++++++++++++++++++++++++++++++++++--
include/linux/cpu.h | 4 +
kernel/cpu.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 128 insertions(+), 3 deletions(-)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ee28844..00d2f6f8 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -176,6 +176,8 @@ extern struct bus_type cpu_subsys;
Index: linux-stable/include/linux/cpu.h
===================================================================
--- linux-stable.orig/include/linux/cpu.h
+++ linux-stable/include/linux/cpu.h
@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
@ -30,7 +29,7 @@ index ee28844..00d2f6f8 100644
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_un
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
@ -39,11 +38,11 @@ index ee28844..00d2f6f8 100644
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2060c6e..8aaa210 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -58,6 +58,102 @@ static struct {
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -63,6 +63,102 @@ static struct {
.refcount = 0,
};
@ -146,7 +145,7 @@ index 2060c6e..8aaa210 100644
void get_online_cpus(void)
{
might_sleep();
@@ -210,13 +306,14 @@ static int __ref take_cpu_down(void *_param)
@@ -256,13 +352,14 @@ static int __ref take_cpu_down(void *_pa
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
@ -162,7 +161,7 @@ index 2060c6e..8aaa210 100644
if (num_online_cpus() == 1)
return -EBUSY;
@@ -224,7 +321,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
@@ -270,7 +367,20 @@ static int __ref _cpu_down(unsigned int
if (!cpu_online(cpu))
return -EINVAL;
@ -184,7 +183,7 @@ index 2060c6e..8aaa210 100644
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -232,7 +342,16 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
@@ -278,7 +388,16 @@ static int __ref _cpu_down(unsigned int
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
@ -202,7 +201,7 @@ index 2060c6e..8aaa210 100644
}
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
@@ -263,6 +382,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
@@ -309,6 +428,8 @@ static int __ref _cpu_down(unsigned int
check_for_tasks(cpu);
out_release:

View File

@ -1,7 +1,6 @@
From 4210eba03aafc8271a8f97fccfab1ad0893f4086 Mon Sep 17 00:00:00 2001
Subject: hotplug: sync_unplug: No "\n" in task name
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Sun, 16 Oct 2011 18:56:43 +0800
Subject: [139/256] hotplug: sync_unplug: No " " in task name
Otherwise the output will look a little odd.
@ -12,11 +11,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8aaa210..da4ed4f 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -138,7 +138,7 @@ static int cpu_unplug_begin(unsigned int cpu)
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -143,7 +143,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);

View File

@ -1,18 +1,17 @@
From 5963228cdaa3c49a09035f1251f15113e0a3dd2c Mon Sep 17 00:00:00 2001
Subject: hotplug-use-migrate-disable.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 19:35:29 +0200
Subject: [142/256] hotplug-use-migrate-disable.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1e80dff..c8aee90 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -329,14 +329,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Index: linux-stable/kernel/cpu.c
===================================================================
--- linux-stable.orig/kernel/cpu.c
+++ linux-stable/kernel/cpu.c
@@ -375,14 +375,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@ -29,7 +28,7 @@ index 1e80dff..c8aee90 100644
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -386,6 +385,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
@@ -432,6 +431,7 @@ static int __ref _cpu_down(unsigned int
out_release:
cpu_unplug_done(cpu);
out_cancel:

View File

@ -0,0 +1,49 @@
From: Yong Zhang <yong.zhang0@gmail.com>
Subject: hrtimer: Add missing debug_activate() aid
Date: Thu, 13 Oct 2011 15:52:30 +0800
It will fix below warning, which is also reported by Fernando:
[ 7.616090] ------------[ cut here ]------------
[ 7.616093] WARNING: at kernel/hrtimer.c:391 hrtimer_fixup_activate+0x27/0x50()
[ 7.616094] Hardware name: OptiPlex 755
[ 7.616096] Modules linked in:
[ 7.616099] Pid: 0, comm: kworker/0:0 Tainted: G W 3.0.6-rt17-00284-g9d73a61 #15
[ 7.616100] Call Trace:
[ 7.616103] [<c014d9a2>] warn_slowpath_common+0x72/0xa0
[ 7.616106] [<c0175417>] ? hrtimer_fixup_activate+0x27/0x50
[ 7.616109] [<c0175417>] ? hrtimer_fixup_activate+0x27/0x50
[ 7.616112] [<c014d9f2>] warn_slowpath_null+0x22/0x30
[ 7.616115] [<c0175417>] hrtimer_fixup_activate+0x27/0x50
[ 7.616118] [<c03b3ab0>] debug_object_activate+0x100/0x130
[ 7.616121] [<c0176b96>] ? hrtimer_start_range_ns+0x26/0x30
[ 7.616123] [<c0175a59>] enqueue_hrtimer+0x19/0x100
[ 7.616126] [<c0176b96>] ? hrtimer_start_range_ns+0x26/0x30
[ 7.616129] [<c0176744>] __hrtimer_start_range_ns+0x144/0x540
[ 7.616132] [<c072705a>] ? _raw_spin_unlock_irqrestore+0x3a/0x80
[ 7.616136] [<c0176b96>] hrtimer_start_range_ns+0x26/0x30
[ 7.616139] [<c01852b5>] tick_nohz_restart_sched_tick+0x185/0x1b0
[ 7.616142] [<c0101878>] cpu_idle+0x98/0xc0
[ 7.616146] [<c071fcd8>] start_secondary+0x1d3/0x1da
[ 7.616148] ---[ end trace 0000000000000003 ]---
Reported-by: Fernando Lopez-Lezcano <nando@ccrma.stanford.edu>
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Link: http://lkml.kernel.org/r/20111013075230.GA2740@zhy
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/hrtimer.c | 1 +
1 file changed, 1 insertion(+)
Index: linux-stable/kernel/hrtimer.c
===================================================================
--- linux-stable.orig/kernel/hrtimer.c
+++ linux-stable/kernel/hrtimer.c
@@ -1063,6 +1063,7 @@ int __hrtimer_start_range_ns(struct hrti
* remove it again and report a failure. This avoids
* stale base->first entries.
*/
+ debug_deactivate(timer);
__remove_hrtimer(timer, new_base,
timer->state & HRTIMER_STATE_CALLBACK, 0);
}

View File

@ -1,18 +1,17 @@
From 251da2a935bd80dc09d674a1c0466f03ada7dfac Mon Sep 17 00:00:00 2001
Subject: hrtimer-fix-reprogram-madness.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Sep 2011 14:48:43 +0200
Subject: [119/256] hrtimer-fix-reprogram-madness.patch
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/hrtimer.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d363df8..3991464 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1317,7 +1317,11 @@ static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
Index: linux-stable/kernel/hrtimer.c
===================================================================
--- linux-stable.orig/kernel/hrtimer.c
+++ linux-stable/kernel/hrtimer.c
@@ -1338,7 +1338,11 @@ static void hrtimer_rt_reprogram(int res
if (!enqueue_hrtimer(timer, base))
return;
@ -25,7 +24,7 @@ index d363df8..3991464 100644
goto requeue;
} else if (hrtimer_active(timer)) {
@@ -1326,6 +1330,7 @@ static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
@@ -1347,6 +1351,7 @@ static void hrtimer_rt_reprogram(int res
* the event device.
*/
if (&timer->node == base->active.next &&
@ -33,7 +32,7 @@ index d363df8..3991464 100644
hrtimer_reprogram(timer, base))
goto requeue;
}
@@ -1338,6 +1343,7 @@ requeue:
@@ -1359,6 +1364,7 @@ requeue:
*/
__remove_hrtimer(timer, base, timer->state, 0);
list_add_tail(&timer->cb_entry, &base->expired);

View File

@ -1,7 +1,6 @@
From b852214ab58a1fe26b51298ed91c0a7ed3dd465d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: [116/256] hrtimer: fixup hrtimer callback changes for preempt-rt
Subject: hrtimer: fixup hrtimer callback changes for preempt-rt
In preempt-rt we can not call the callbacks which take sleeping locks
from the timer interrupt context.
@ -11,29 +10,30 @@ delivery problem for real.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
include/linux/hrtimer.h | 3 +
kernel/hrtimer.c | 190 +++++++++++++++++++++++++++++++++++++++++-----
kernel/sched/core.c | 1 +
kernel/sched/rt.c | 1 +
kernel/time/tick-sched.c | 1 +
kernel/watchdog.c | 1 +
6 files changed, 179 insertions(+), 18 deletions(-)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index e8b395d..0e37086 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
---
include/linux/hrtimer.h | 3
kernel/hrtimer.c | 196 ++++++++++++++++++++++++++++++++++++++++++-----
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
6 files changed, 183 insertions(+), 20 deletions(-)
Index: linux-stable/include/linux/hrtimer.h
===================================================================
--- linux-stable.orig/include/linux/hrtimer.h
+++ linux-stable/include/linux/hrtimer.h
@@ -111,6 +111,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -147,6 +149,7 @@ struct hrtimer_clock_base {
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
@@ -150,6 +152,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
@ -41,11 +41,11 @@ index e8b395d..0e37086 100644
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 905e2cd2..1dd627b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
Index: linux-stable/kernel/hrtimer.c
===================================================================
--- linux-stable.orig/kernel/hrtimer.c
+++ linux-stable/kernel/hrtimer.c
@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrti
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
@ -55,7 +55,7 @@ index 905e2cd2..1dd627b 100644
*/
if (hrtimer_callback_running(timer))
return 0;
@@ -625,6 +624,9 @@ static int hrtimer_reprogram(struct hrtimer *timer,
@@ -625,6 +624,9 @@ static int hrtimer_reprogram(struct hrti
return res;
}
@ -65,13 +65,13 @@ index 905e2cd2..1dd627b 100644
/*
* Initialize the high resolution related parts of cpu_base
*/
@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprog
struct hrtimer_clock_base *base,
int wakeup)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+again:
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+ /*
+ * Move softirq based timers away from the rbtree in
+ * case it expired already. Otherwise we would have a
@ -90,12 +90,12 @@ index 905e2cd2..1dd627b 100644
+ return 1;
+ }
+#else
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+#endif
if (wakeup) {
raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -733,6 +757,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -743,6 +767,11 @@ static inline int hrtimer_enqueue_reprog
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
@ -107,7 +107,7 @@ index 905e2cd2..1dd627b 100644
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -864,9 +893,9 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer)
@@ -874,9 +903,9 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@ -119,7 +119,7 @@ index 905e2cd2..1dd627b 100644
}
#else
@@ -916,6 +945,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
@@ -926,6 +955,11 @@ static void __remove_hrtimer(struct hrti
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@ -131,7 +131,7 @@ index 905e2cd2..1dd627b 100644
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -1178,6 +1212,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
@@ -1199,6 +1233,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@ -139,7 +139,7 @@ index 905e2cd2..1dd627b 100644
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1261,10 +1296,118 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
@@ -1282,10 +1317,118 @@ static void __run_hrtimer(struct hrtimer
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@ -260,7 +260,7 @@ index 905e2cd2..1dd627b 100644
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1273,7 +1416,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
@@ -1294,7 +1437,7 @@ void hrtimer_interrupt(struct clock_even
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@ -269,7 +269,7 @@ index 905e2cd2..1dd627b 100644
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1340,7 +1483,10 @@ retry:
@@ -1361,7 +1504,10 @@ retry:
break;
}
@ -281,7 +281,7 @@ index 905e2cd2..1dd627b 100644
}
}
@@ -1355,6 +1501,10 @@ retry:
@@ -1376,6 +1522,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@ -292,30 +292,41 @@ index 905e2cd2..1dd627b 100644
return;
}
@@ -1430,17 +1580,17 @@ void hrtimer_peek_ahead_timers(void)
@@ -1456,24 +1606,26 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- hrtimer_peek_ahead_timers();
-}
-
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
+#else /* CONFIG_HIGH_RES_TIMERS */
+
+static inline void __hrtimer_peek_ahead_timers(void) { }
+
+#endif /* !CONFIG_HIGH_RES_TIMERS */
+
static void run_hrtimer_softirq(struct softirq_action *h)
{
+#ifdef CONFIG_HIGH_RES_TIMERS
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
if (cpu_base->clock_was_set) {
cpu_base->clock_was_set = 0;
clock_was_set();
}
+#endif
- hrtimer_peek_ahead_timers();
+ hrtimer_rt_run_pending();
}
-#else /* CONFIG_HIGH_RES_TIMERS */
-
-static inline void __hrtimer_peek_ahead_timers(void) { }
-
-#endif /* !CONFIG_HIGH_RES_TIMERS */
-
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
@@ -1473,7 +1623,7 @@ void hrtimer_run_queues(void)
@@ -1506,7 +1658,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
@ -324,7 +335,7 @@ index 905e2cd2..1dd627b 100644
if (hrtimer_hres_active())
return;
@@ -1498,12 +1648,16 @@ void hrtimer_run_queues(void)
@@ -1531,12 +1683,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@ -343,7 +354,7 @@ index 905e2cd2..1dd627b 100644
}
/*
@@ -1525,6 +1679,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
@@ -1558,6 +1714,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@ -351,7 +362,7 @@ index 905e2cd2..1dd627b 100644
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1663,6 +1818,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
@@ -1696,6 +1853,7 @@ static void __cpuinit init_hrtimers_cpu(
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@ -359,7 +370,7 @@ index 905e2cd2..1dd627b 100644
}
hrtimer_init_hres(cpu_base);
@@ -1781,9 +1937,7 @@ void __init hrtimers_init(void)
@@ -1814,9 +1972,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
@ -369,11 +380,11 @@ index 905e2cd2..1dd627b 100644
}
/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 83a36dd..bf42b7d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -480,6 +480,7 @@ static void init_rq_hrtick(struct rq *rq)
Index: linux-stable/kernel/sched/core.c
===================================================================
--- linux-stable.orig/kernel/sched/core.c
+++ linux-stable/kernel/sched/core.c
@@ -480,6 +480,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@ -381,11 +392,11 @@ index 83a36dd..bf42b7d 100644
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 44af55e..8bb9f00 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
Index: linux-stable/kernel/sched/rt.c
===================================================================
--- linux-stable.orig/kernel/sched/rt.c
+++ linux-stable/kernel/sched/rt.c
@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwid
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@ -393,11 +404,11 @@ index 44af55e..8bb9f00 100644
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a607a7c..5d409b0 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -840,6 +840,7 @@ void tick_setup_sched_timer(void)
Index: linux-stable/kernel/time/tick-sched.c
===================================================================
--- linux-stable.orig/kernel/time/tick-sched.c
+++ linux-stable/kernel/time/tick-sched.c
@@ -873,6 +873,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@ -405,11 +416,11 @@ index a607a7c..5d409b0 100644
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index de08263..87192eb 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -441,6 +441,7 @@ static void watchdog_prepare_cpu(int cpu)
Index: linux-stable/kernel/watchdog.c
===================================================================
--- linux-stable.orig/kernel/watchdog.c
+++ linux-stable/kernel/watchdog.c
@@ -470,6 +470,7 @@ static void watchdog_prepare_cpu(int cpu
WARN_ON(per_cpu(softlockup_watchdog, cpu));
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;

Some files were not shown because too many files have changed in this diff Show More