[rt] bump to 3.6.3-rt8

svn path=/dists/trunk/linux/; revision=19460
This commit is contained in:
Uwe Kleine-König 2012-10-28 21:24:48 +00:00
parent 6baf5be73e
commit 543d408ab2
6 changed files with 300 additions and 2 deletions

2
debian/changelog vendored
View File

@ -10,7 +10,7 @@ linux (3.6.3-1~experimental.1) UNRELEASED; urgency=low
* aufs: Update to aufs3.x-rcN-20120827
[ Uwe Kleine-König ]
* reenable the rt featureset using 3.6.3-rt6
* reenable the rt featureset using 3.6.3-rt8
-- Bastian Blank <waldi@debian.org> Thu, 04 Oct 2012 17:50:39 +0200

View File

@ -0,0 +1,79 @@
Subject: cpufreq: powernow-k8: Remove bogus smp_processor_id() usage
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 27 Oct 2012 12:26:44 +0200
commit 6889125b (cpufreq/powernow-k8: workqueue user shouldn't migrate
the kworker to another CPU) has a broken optimization of calling
powernowk8_target_fn() directly from powernowk8_target() which
results in the following splat:
[ 11.789468] BUG: using smp_processor_id() in preemptible [00000000] code:
modprobe/505
[ 11.809594] caller is powernowk8_target+0x20/0x48 [powernow_k8]
[ 12.001748] Pid: 505, comm: modprobe Not tainted 3.6.3 #3
[ 12.016836] Call Trace:
[ 12.025971] [<ffffffff81241554>] debug_smp_processor_id+0xcc/0xe8
[ 12.042518] [<ffffffffa05bb07f>] powernowk8_target+0x20/0x48 [powernow_k8]
[ 12.060733] [<ffffffff813b3c23>] __cpufreq_driver_target+0x82/0x8a
[ 12.077550] [<ffffffff813b64a9>] cpufreq_governor_userspace+0x265/0x2c0
[ 12.120378] [<ffffffff81063c5c>] ? __blocking_notifier_call_chain+0x56/0x60
[ 12.138862] [<ffffffff813b3d8b>] __cpufreq_governor+0x8c/0xc9
[ 12.155193] [<ffffffff813b4031>] __cpufreq_set_policy+0x212/0x21e
[ 12.172148] [<ffffffff813b501e>] cpufreq_add_dev_interface+0x2a2/0x2bc
[ 12.189855] [<ffffffff813b602b>] ? cpufreq_update_policy+0x124/0x124
[ 12.207096] [<ffffffff813b54dc>] cpufreq_add_dev+0x4a4/0x4b4
[ 12.223161] [<ffffffff812f8136>] subsys_interface_register+0x95/0xc5
[ 12.240386] [<ffffffff8149aaf9>] ? _raw_spin_lock_irqsave+0x24/0x46
[ 12.257477] [<ffffffff813b5928>] cpufreq_register_driver+0xd2/0x1bf
[ 12.274545] [<ffffffffa05bc087>] powernowk8_init+0x193/0x1dc [powernow_k8]
[ 12.292794] [<ffffffffa05bbef4>] ? powernowk8_cpu_init+0xc53/0xc53 [powernow_k8]
[ 12.312004] [<ffffffff81002195>] do_one_initcall+0x7f/0x136
[ 12.327594] [<ffffffff8108f48f>] sys_init_module+0x17b0/0x197e
[ 12.343718] [<ffffffff81249494>] ? ddebug_proc_write+0xde/0xde
[ 12.359767] [<ffffffff8149f639>] system_call_fastpath+0x16/0x1b
This is fully preemptible non cpu bound context though the comment in the
code says:
* Must run on @pol->cpu. cpufreq core is responsible for ensuring
* that we're bound to the current CPU and pol->cpu stays online.
The core only guarantees that pol->cpu stays online, but it has no way
to bind the thread and this needs to be fully preemptible context as
powernowk8_target_fn() calls functions which might sleep.
So the correct solution is to always go through work_on_cpu().
Reported-and-tested-by: Carsten Emde <C.Emde@osadl.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: stable@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/cpufreq/powernow-k8.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
Index: linux-stable/drivers/cpufreq/powernow-k8.c
===================================================================
--- linux-stable.orig/drivers/cpufreq/powernow-k8.c
+++ linux-stable/drivers/cpufreq/powernow-k8.c
@@ -1224,13 +1224,12 @@ static int powernowk8_target(struct cpuf
.relation = relation };
/*
- * Must run on @pol->cpu. cpufreq core is responsible for ensuring
- * that we're bound to the current CPU and pol->cpu stays online.
+ * Must run on @pol->cpu. We queue it on the target cpu even
+ * if we are currently on the target cpu. This is preemptible
+ * non cpu bound context, so we can't call the target function
+ * directly.
*/
- if (smp_processor_id() == pol->cpu)
- return powernowk8_target_fn(&pta);
- else
- return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
/* Driver entry point to verify the policy and range of frequencies */

View File

@ -14,4 +14,4 @@ Index: linux-stable/localversion-rt
--- /dev/null
+++ linux-stable/localversion-rt
@@ -0,0 +1 @@
+-rt6
+-rt8

View File

@ -0,0 +1,214 @@
Subject: mm: Enable SLUB for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 25 Oct 2012 10:32:35 +0100
Make SLUB RT aware and remove the restriction in Kconfig.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
init/Kconfig | 1
mm/slub.c | 64 ++++++++++++++++++++++++++++++++++++-----------------------
2 files changed, 40 insertions(+), 25 deletions(-)
Index: linux-stable/init/Kconfig
===================================================================
--- linux-stable.orig/init/Kconfig
+++ linux-stable/init/Kconfig
@@ -1442,7 +1442,6 @@ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
- depends on !PREEMPT_RT_FULL
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
Index: linux-stable/mm/slub.c
===================================================================
--- linux-stable.orig/mm/slub.c
+++ linux-stable/mm/slub.c
@@ -31,6 +31,7 @@
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
#include <linux/prefetch.h>
+#include <linux/locallock.h>
#include <trace/events/kmem.h>
@@ -225,6 +226,8 @@ static inline void stat(const struct kme
#endif
}
+static DEFINE_LOCAL_IRQ_LOCK(slub_lock);
+
/********************************************************************
* Core slab cache functions
*******************************************************************/
@@ -1278,7 +1281,7 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
if (flags & __GFP_WAIT)
- local_irq_enable();
+ local_unlock_irq(slub_lock);
flags |= s->allocflags;
@@ -1318,7 +1321,7 @@ static struct page *allocate_slab(struct
}
if (flags & __GFP_WAIT)
- local_irq_disable();
+ local_lock_irq(slub_lock);
if (!page)
return NULL;
@@ -1871,10 +1874,10 @@ redo:
*
* This function must be called with interrupt disabled.
*/
-static void unfreeze_partials(struct kmem_cache *s)
+static void unfreeze_partials(struct kmem_cache *s, unsigned int cpu)
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
- struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
@@ -1959,9 +1962,9 @@ int put_cpu_partial(struct kmem_cache *s
* partial array is full. Move the existing
* set to the per node partial list.
*/
- local_irq_save(flags);
- unfreeze_partials(s);
- local_irq_restore(flags);
+ local_lock_irqsave(slub_lock, flags);
+ unfreeze_partials(s, smp_processor_id());
+ local_unlock_irqrestore(slub_lock, flags);
pobjects = 0;
pages = 0;
stat(s, CPU_PARTIAL_DRAIN);
@@ -2002,17 +2005,10 @@ static inline void __flush_cpu_slab(stru
if (c->page)
flush_slab(s, c);
- unfreeze_partials(s);
+ unfreeze_partials(s, cpu);
}
}
-static void flush_cpu_slab(void *d)
-{
- struct kmem_cache *s = d;
-
- __flush_cpu_slab(s, smp_processor_id());
-}
-
static bool has_cpu_slab(int cpu, void *info)
{
struct kmem_cache *s = info;
@@ -2021,10 +2017,29 @@ static bool has_cpu_slab(int cpu, void *
return c->page || c->partial;
}
+#ifndef CONFIG_PREEMPT_RT_FULL
+static void flush_cpu_slab(void *d)
+{
+ struct kmem_cache *s = d;
+
+ __flush_cpu_slab(s, smp_processor_id());
+}
+
static void flush_all(struct kmem_cache *s)
{
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
}
+#else
+static void flush_all(struct kmem_cache *s)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (has_cpu_slab(cpu, s))
+ __flush_cpu_slab(s, cpu);
+ }
+}
+#endif
/*
* Check if the objects in a per cpu structure fit numa
@@ -2201,7 +2216,7 @@ static void *__slab_alloc(struct kmem_ca
struct page *page;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(slub_lock, flags);
#ifdef CONFIG_PREEMPT
/*
* We may have been preempted and rescheduled on a different
@@ -2262,7 +2277,7 @@ load_freelist:
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
- local_irq_restore(flags);
+ local_unlock_irqrestore(slub_lock, flags);
return freelist;
new_slab:
@@ -2281,7 +2296,7 @@ new_slab:
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
- local_irq_restore(flags);
+ local_unlock_irqrestore(slub_lock, flags);
return NULL;
}
@@ -2296,7 +2311,7 @@ new_slab:
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
- local_irq_restore(flags);
+ local_unlock_irqrestore(slub_lock, flags);
return freelist;
}
@@ -2488,7 +2503,8 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- spin_lock_irqsave(&n->list_lock, flags);
+ local_spin_lock_irqsave(slub_lock,
+ &n->list_lock, flags);
}
}
@@ -2538,7 +2554,7 @@ static void __slab_free(struct kmem_cach
stat(s, FREE_ADD_PARTIAL);
}
}
- spin_unlock_irqrestore(&n->list_lock, flags);
+ local_spin_unlock_irqrestore(slub_lock, &n->list_lock, flags);
return;
slab_empty:
@@ -2552,7 +2568,7 @@ slab_empty:
/* Slab must be on the full list */
remove_full(s, page);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ local_spin_unlock_irqrestore(slub_lock, &n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
@@ -4002,9 +4018,9 @@ static int __cpuinit slab_cpuup_callback
case CPU_DEAD_FROZEN:
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
- local_irq_save(flags);
+ local_lock_irqsave(slub_lock, flags);
__flush_cpu_slab(s, cpu);
- local_irq_restore(flags);
+ local_unlock_irqrestore(slub_lock, flags);
}
mutex_unlock(&slab_mutex);
break;

View File

@ -597,6 +597,8 @@ softirq-split-locks.patch
# Enable full RT
rcu-tiny-solve-rt-mistery.patch
mm-enable-slub.patch
cpufreq-powernow-k8-fix-bogus-smp-processor-id-usage.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
#rt-replace-rt-spin-lock-to-raw-one-in-res_counter.patch

View File

@ -597,5 +597,8 @@ features/all/rt/softirq-split-locks.patch
# Enable full RT
features/all/rt/rcu-tiny-solve-rt-mistery.patch
features/all/rt/mm-enable-slub.patch
features/all/rt/cpufreq-powernow-k8-fix-bogus-smp-processor-id-usage.patch
features/all/rt/kconfig-disable-a-few-options-rt.patch
features/all/rt/kconfig-preempt-rt-full.patch
#rt-replace-rt-spin-lock-to-raw-one-in-res_counter.patch