From 88be48dec93ee2aed8f175a744a007e957578122 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 17 Nov 2011 07:50:32 +0000 Subject: [PATCH] [amd64] reenable rt featureset with 3.2-rc1-52e4c2a05-rt2 svn path=/dists/trunk/linux-2.6/; revision=18294 --- debian/changelog | 4 + debian/config/amd64/defines | 4 +- debian/config/defines | 2 +- .../all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch | 19675 ++++++++++++++++ .../all/rt/patch-3.2-rc1-52e4c2a05.patch | 10310 ++++++++ debian/patches/series/1~experimental.1-extra | 2 + 6 files changed, 29994 insertions(+), 3 deletions(-) create mode 100644 debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch create mode 100644 debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch create mode 100644 debian/patches/series/1~experimental.1-extra diff --git a/debian/changelog b/debian/changelog index 9dae4d814..e4bb086d9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,7 +1,11 @@ linux-2.6 (3.2~rc1-1~experimental.1) UNRELEASED; urgency=low + [ Ben Hutchings ] * New upstream release candidate + [ Uwe Kleine-König ] + * [amd64] reenable rt featureset with 3.2-rc1-52e4c2a05-rt2 + -- Ben Hutchings Mon, 14 Nov 2011 15:21:10 +0000 linux-2.6 (3.1.1-1) unstable; urgency=high diff --git a/debian/config/amd64/defines b/debian/config/amd64/defines index 2992e5da1..e69547196 100644 --- a/debian/config/amd64/defines +++ b/debian/config/amd64/defines @@ -1,6 +1,6 @@ [base] -#featuresets: -# rt +featuresets: + rt flavours: amd64 kernel-arch: x86 diff --git a/debian/config/defines b/debian/config/defines index 378696650..ab8a3308d 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -51,4 +51,4 @@ lilo: lilo (>= 22.8-8.2~) s390-tools: s390-tools (>= 1.8.3-2~) [featureset-rt] -enabled: false +enabled: true diff --git a/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch b/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch new file mode 100644 index 000000000..29a9e645b --- /dev/null +++ b/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch @@ -0,0 +1,19675 @@ +Index: linux-3.2/arch/x86/kernel/apic/apic.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/apic/apic.c ++++ linux-3.2/arch/x86/kernel/apic/apic.c +@@ -857,8 +857,8 @@ void __irq_entry smp_apic_timer_interrup + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ +- exit_idle(); + irq_enter(); ++ exit_idle(); + local_apic_timer_interrupt(); + irq_exit(); + +@@ -1790,8 +1790,8 @@ void smp_spurious_interrupt(struct pt_re + { + u32 v; + +- exit_idle(); + irq_enter(); ++ exit_idle(); + /* + * Check if this really is a spurious interrupt and ACK it + * if it is a vectored one. Just in case... +@@ -1827,8 +1827,8 @@ void smp_error_interrupt(struct pt_regs + "Illegal register address", /* APIC Error Bit 7 */ + }; + +- exit_idle(); + irq_enter(); ++ exit_idle(); + /* First tickle the hardware, only then report what went on. -- REW */ + v0 = apic_read(APIC_ESR); + apic_write(APIC_ESR, 0); +Index: linux-3.2/arch/x86/kernel/apic/io_apic.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/apic/io_apic.c ++++ linux-3.2/arch/x86/kernel/apic/io_apic.c +@@ -2418,8 +2418,8 @@ asmlinkage void smp_irq_move_cleanup_int + unsigned vector, me; + + ack_APIC_irq(); +- exit_idle(); + irq_enter(); ++ exit_idle(); + + me = smp_processor_id(); + for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { +@@ -2518,7 +2518,8 @@ static void ack_apic_level(struct irq_da + irq_complete_move(cfg); + #ifdef CONFIG_GENERIC_PENDING_IRQ + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + do_unmask_irq = 1; + mask_ioapic(cfg); + } +Index: linux-3.2/arch/x86/kernel/cpu/mcheck/therm_throt.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/therm_throt.c ++++ linux-3.2/arch/x86/kernel/cpu/mcheck/therm_throt.c +@@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) + + asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) + { +- exit_idle(); + irq_enter(); ++ exit_idle(); + inc_irq_stat(irq_thermal_count); + smp_thermal_vector(); + irq_exit(); +Index: linux-3.2/arch/x86/kernel/cpu/mcheck/threshold.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/threshold.c ++++ linux-3.2/arch/x86/kernel/cpu/mcheck/threshold.c +@@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = def + + asmlinkage void smp_threshold_interrupt(void) + { +- exit_idle(); + irq_enter(); ++ exit_idle(); + inc_irq_stat(irq_threshold_count); + mce_threshold_vector(); + irq_exit(); +Index: linux-3.2/arch/x86/kernel/irq.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/irq.c ++++ linux-3.2/arch/x86/kernel/irq.c +@@ -181,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct p + unsigned vector = ~regs->orig_ax; + unsigned irq; + +- exit_idle(); + irq_enter(); ++ exit_idle(); + + irq = __this_cpu_read(vector_irq[vector]); + +@@ -209,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs + + ack_APIC_irq(); + +- exit_idle(); +- + irq_enter(); + ++ exit_idle(); ++ + inc_irq_stat(x86_platform_ipis); + + if (x86_platform_ipi_callback) +Index: linux-3.2/drivers/gpu/drm/drm_irq.c +=================================================================== +--- linux-3.2.orig/drivers/gpu/drm/drm_irq.c ++++ linux-3.2/drivers/gpu/drm/drm_irq.c +@@ -110,10 +110,7 @@ static void vblank_disable_and_save(stru + /* Prevent vblank irq processing while disabling vblank irqs, + * so no updates of timestamps or count can happen after we've + * disabled. Needed to prevent races in case of delayed irq's. +- * Disable preemption, so vblank_time_lock is held as short as +- * possible, even under a kernel with PREEMPT_RT patches. + */ +- preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags); + + dev->driver->disable_vblank(dev, crtc); +@@ -164,7 +161,6 @@ static void vblank_disable_and_save(stru + clear_vblank_timestamps(dev, crtc); + + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); +- preempt_enable(); + } + + static void vblank_disable_fn(unsigned long arg) +@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *de + spin_lock_irqsave(&dev->vbl_lock, irqflags); + /* Going from 0->1 means we have to enable interrupts again */ + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { +- /* Disable preemption while holding vblank_time_lock. Do +- * it explicitely to guard against PREEMPT_RT kernel. +- */ +- preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); + if (!dev->vblank_enabled[crtc]) { + /* Enable vblank irqs under vblank_time_lock protection. +@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *de + } + } + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); +- preempt_enable(); + } else { + if (!dev->vblank_enabled[crtc]) { + atomic_dec(&dev->vblank_refcount[crtc]); +Index: linux-3.2/arch/x86/kernel/kprobes.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/kprobes.c ++++ linux-3.2/arch/x86/kernel/kprobes.c +@@ -478,7 +478,6 @@ static void __kprobes setup_singlestep(s + * stepping. + */ + regs->ip = (unsigned long)p->ainsn.insn; +- preempt_enable_no_resched(); + return; + } + #endif +Index: linux-3.2/arch/x86/kernel/hpet.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/hpet.c ++++ linux-3.2/arch/x86/kernel/hpet.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -568,6 +569,30 @@ static void init_one_hpet_msi_clockevent + #define RESERVE_TIMERS 0 + #endif + ++static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d) ++{ ++ hpet_msi_disable = 1; ++ return 0; ++} ++ ++static struct dmi_system_id __initdata dmi_hpet_table[] = { ++ /* ++ * MSI based per cpu timers lose interrupts when intel_idle() ++ * is enabled - independent of the c-state. With idle=poll the ++ * problem cannot be observed. We have no idea yet, whether ++ * this is a W510 specific issue or a general chipset oddity. ++ */ ++ { ++ .callback = dmi_disable_hpet_msi, ++ .ident = "Lenovo W510", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), ++ }, ++ }, ++ {} ++}; ++ + static void hpet_msi_capability_lookup(unsigned int start_timer) + { + unsigned int id; +@@ -575,6 +600,8 @@ static void hpet_msi_capability_lookup(u + unsigned int num_timers_used = 0; + int i; + ++ dmi_check_system(dmi_hpet_table); ++ + if (hpet_msi_disable) + return; + +Index: linux-3.2/block/blk-core.c +=================================================================== +--- linux-3.2.orig/block/blk-core.c ++++ linux-3.2/block/blk-core.c +@@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_delay_queue); + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +@@ -300,7 +300,11 @@ void __blk_run_queue(struct request_queu + { + if (unlikely(blk_queue_stopped(q))) + return; +- ++ /* ++ * q->request_fn() can drop q->queue_lock and reenable ++ * interrupts, but must return with q->queue_lock held and ++ * interrupts disabled. ++ */ + q->request_fn(q); + } + EXPORT_SYMBOL(__blk_run_queue); +@@ -2742,11 +2746,11 @@ static void queue_unplugged(struct reque + * this lock). + */ + if (from_schedule) { +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + blk_run_queue_async(q); + } else { + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + } +@@ -2772,7 +2776,6 @@ static void flush_plug_callbacks(struct + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -2793,11 +2796,6 @@ void blk_flush_plug_list(struct blk_plug + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -2810,7 +2808,7 @@ void blk_flush_plug_list(struct blk_plug + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + /* + * rq is already accounted, so use raw insert +@@ -2828,8 +2826,6 @@ void blk_flush_plug_list(struct blk_plug + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) +Index: linux-3.2/kernel/sched.c +=================================================================== +--- linux-3.2.orig/kernel/sched.c ++++ linux-3.2/kernel/sched.c +@@ -71,6 +71,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -188,6 +189,7 @@ void init_rt_bandwidth(struct rt_bandwid + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.irqsafe = 1; + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +@@ -940,7 +942,11 @@ late_initcall(sched_init_debug); + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#else ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#endif + + /* + * period over which we average the RT time consumption, measured +@@ -1276,6 +1282,7 @@ static void init_rq_hrtick(struct rq *rq + + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.irqsafe = 1; + } + #else /* CONFIG_SCHED_HRTICK */ + static inline void hrtick_clear(struct rq *rq) +@@ -2563,7 +2570,12 @@ static int select_fallback_rq(int cpu, s + printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", + task_pid_nr(p), p->comm, cpu); + } +- ++ /* ++ * Clear PF_THREAD_BOUND, otherwise we wreckage ++ * migrate_disable/enable. See optimization for ++ * PF_THREAD_BOUND tasks there. ++ */ ++ p->flags &= ~PF_THREAD_BOUND; + return dest_cpu; + } + +@@ -2643,10 +2655,6 @@ static void ttwu_activate(struct rq *rq, + { + activate_task(rq, p, en_flags); + p->on_rq = 1; +- +- /* if a worker is waking up, notify workqueue */ +- if (p->flags & PF_WQ_WORKER) +- wq_worker_waking_up(p, cpu_of(rq)); + } + + /* +@@ -2824,8 +2832,25 @@ try_to_wake_up(struct task_struct *p, un + + smp_wmb(); + raw_spin_lock_irqsave(&p->pi_lock, flags); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) ++ p->saved_state = TASK_RUNNING; ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + success = 1; /* we're going to change ->state */ + cpu = task_cpu(p); +@@ -2881,40 +2906,6 @@ out: + } + + /** +- * try_to_wake_up_local - try to wake up a local task with rq lock held +- * @p: the thread to be awakened +- * +- * Put @p on the run-queue if it's not already there. The caller must +- * ensure that this_rq() is locked, @p is bound to this_rq() and not +- * the current task. +- */ +-static void try_to_wake_up_local(struct task_struct *p) +-{ +- struct rq *rq = task_rq(p); +- +- BUG_ON(rq != this_rq()); +- BUG_ON(p == current); +- lockdep_assert_held(&rq->lock); +- +- if (!raw_spin_trylock(&p->pi_lock)) { +- raw_spin_unlock(&rq->lock); +- raw_spin_lock(&p->pi_lock); +- raw_spin_lock(&rq->lock); +- } +- +- if (!(p->state & TASK_NORMAL)) +- goto out; +- +- if (!p->on_rq) +- ttwu_activate(rq, p, ENQUEUE_WAKEUP); +- +- ttwu_do_wakeup(rq, p, 0); +- ttwu_stat(p, smp_processor_id(), 0); +-out: +- raw_spin_unlock(&p->pi_lock); +-} +- +-/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * +@@ -2931,6 +2922,18 @@ int wake_up_process(struct task_struct * + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +@@ -3205,8 +3208,12 @@ static void finish_task_switch(struct rq + finish_lock_switch(rq, prev); + + fire_sched_in_preempt_notifiers(current); ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) +- mmdrop(mm); ++ mmdrop_delayed(mm); + if (unlikely(prev_state == TASK_DEAD)) { + /* + * Remove function-return probe instances associated with this +@@ -4353,6 +4360,126 @@ static inline void schedule_debug(struct + schedstat_inc(this_rq(), sched_count); + } + ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ ++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) ++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) ++ ++static inline void update_migrate_disable(struct task_struct *p) ++{ ++ const struct cpumask *mask; ++ ++ if (likely(!p->migrate_disable)) ++ return; ++ ++ /* Did we already update affinity? */ ++ if (unlikely(migrate_disabled_updated(p))) ++ return; ++ ++ /* ++ * Since this is always current we can get away with only locking ++ * rq->lock, the ->cpus_allowed value can normally only be changed ++ * while holding both p->pi_lock and rq->lock, but seeing that this ++ * is current, we cannot actually be waking up, so all code that ++ * relies on serialization against p->pi_lock is out of scope. ++ * ++ * Having rq->lock serializes us against things like ++ * set_cpus_allowed_ptr() that can still happen concurrently. ++ */ ++ mask = tsk_cpus_allowed(p); ++ ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->rt.nr_cpus_allowed = cpumask_weight(mask); ++ ++ /* Let migrate_enable know to fix things back up */ ++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; ++} ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || p->flags & PF_THREAD_BOUND) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ ++ preempt_disable(); ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ preempt_enable(); ++ return; ++ } ++ ++ pin_current_cpu(); ++ p->migrate_disable = 1; ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (in_atomic() || p->flags & PF_THREAD_BOUND) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ preempt_disable(); ++ if (migrate_disable_count(p) > 1) { ++ p->migrate_disable--; ++ preempt_enable(); ++ return; ++ } ++ ++ if (unlikely(migrate_disabled_updated(p))) { ++ /* ++ * Undo whatever update_migrate_disable() did, also see there ++ * about locking. ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ ++ /* ++ * Clearing migrate_disable causes tsk_cpus_allowed to ++ * show the tasks original cpu affinity. ++ */ ++ p->migrate_disable = 0; ++ mask = tsk_cpus_allowed(p); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->rt.nr_cpus_allowed = cpumask_weight(mask); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } else ++ p->migrate_disable = 0; ++ ++ unpin_current_cpu(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_enable); ++#else ++static inline void update_migrate_disable(struct task_struct *p) { } ++#define migrate_disabled_updated(p) 0 ++#endif ++ + static void put_prev_task(struct rq *rq, struct task_struct *prev) + { + if (prev->on_rq || rq->skip_clock_update < 0) +@@ -4412,6 +4539,8 @@ need_resched: + + raw_spin_lock_irq(&rq->lock); + ++ update_migrate_disable(prev); ++ + switch_count = &prev->nivcsw; + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if (unlikely(signal_pending_state(prev->state, prev))) { +@@ -4419,19 +4548,6 @@ need_resched: + } else { + deactivate_task(rq, prev, DEQUEUE_SLEEP); + prev->on_rq = 0; +- +- /* +- * If a worker went to sleep, notify and ask workqueue +- * whether it wants to wake up a task to maintain +- * concurrency. +- */ +- if (prev->flags & PF_WQ_WORKER) { +- struct task_struct *to_wakeup; +- +- to_wakeup = wq_worker_sleeping(prev, cpu); +- if (to_wakeup) +- try_to_wake_up_local(to_wakeup); +- } + } + switch_count = &prev->nvcsw; + } +@@ -4465,15 +4581,23 @@ need_resched: + + post_schedule(rq); + +- preempt_enable_no_resched(); ++ __preempt_enable_no_resched(); + if (need_resched()) + goto need_resched; + } + + static inline void sched_submit_work(struct task_struct *tsk) + { +- if (!tsk->state) ++ if (!tsk->state || tsk_is_pi_blocked(tsk)) + return; ++ ++ /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ */ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. +@@ -4482,15 +4606,37 @@ static inline void sched_submit_work(str + blk_schedule_flush_plug(tsk); + } + ++static inline void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++} ++ + asmlinkage void __sched schedule(void) + { + struct task_struct *tsk = current; + + sched_submit_work(tsk); + __schedule(); ++ sched_update_worker(tsk); + } + EXPORT_SYMBOL(schedule); + ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ __preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ + #ifdef CONFIG_MUTEX_SPIN_ON_OWNER + + static inline bool owner_running(struct mutex *lock, struct task_struct *owner) +@@ -4555,7 +4701,16 @@ asmlinkage void __sched notrace preempt_ + + do { + add_preempt_count_notrace(PREEMPT_ACTIVE); ++ /* ++ * The add/subtract must not be traced by the function ++ * tracer. But we still want to account for the ++ * preempt off latency tracer. Since the _notrace versions ++ * of add/subtract skip the accounting for latency tracer ++ * we must force it manually. ++ */ ++ start_critical_timings(); + __schedule(); ++ stop_critical_timings(); + sub_preempt_count_notrace(PREEMPT_ACTIVE); + + /* +@@ -4999,6 +5154,24 @@ void rt_mutex_setprio(struct task_struct + + rq = __task_rq_lock(p); + ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ + trace_sched_pi_setprio(p, prio); + oldprio = p->prio; + prev_class = p->sched_class; +@@ -5022,11 +5195,10 @@ void rt_mutex_setprio(struct task_struct + enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); + + check_class_changed(rq, p, prev_class, oldprio); ++out_unlock: + __task_rq_unlock(rq); + } +- + #endif +- + void set_user_nice(struct task_struct *p, long nice) + { + int old_prio, delta, on_rq; +@@ -5706,7 +5878,7 @@ SYSCALL_DEFINE0(sched_yield) + __release(rq->lock); + spin_release(&rq->lock.dep_map, 1, _THIS_IP_); + do_raw_spin_unlock(&rq->lock); +- preempt_enable_no_resched(); ++ __preempt_enable_no_resched(); + + schedule(); + +@@ -5720,9 +5892,17 @@ static inline int should_resched(void) + + static void __cond_resched(void) + { +- add_preempt_count(PREEMPT_ACTIVE); +- __schedule(); +- sub_preempt_count(PREEMPT_ACTIVE); ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ __schedule(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ /* ++ * Check again in case we missed a preemption ++ * opportunity between schedule and now. ++ */ ++ barrier(); ++ ++ } while (need_resched()); + } + + int __sched _cond_resched(void) +@@ -5763,6 +5943,7 @@ int __cond_resched_lock(spinlock_t *lock + } + EXPORT_SYMBOL(__cond_resched_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL + int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); +@@ -5776,6 +5957,7 @@ int __sched __cond_resched_softirq(void) + return 0; + } + EXPORT_SYMBOL(__cond_resched_softirq); ++#endif + + /** + * yield - yield the current processor to other threads. +@@ -6099,6 +6281,9 @@ void __cpuinit init_idle(struct task_str + */ + idle->sched_class = &idle_sched_class; + ftrace_graph_init_idle_task(idle, cpu); ++#if defined(CONFIG_SMP) ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif + } + + /* +@@ -6151,11 +6336,12 @@ static inline void sched_init_granularit + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (p->sched_class && p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, new_mask); +- ++ if (!migrate_disabled_updated(p)) { ++ if (p->sched_class && p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, new_mask); ++ p->rt.nr_cpus_allowed = cpumask_weight(new_mask); ++ } + cpumask_copy(&p->cpus_allowed, new_mask); +- p->rt.nr_cpus_allowed = cpumask_weight(new_mask); + } + + /* +@@ -6206,7 +6392,7 @@ int set_cpus_allowed_ptr(struct task_str + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -6295,6 +6481,8 @@ static int migration_cpu_stop(void *data + + #ifdef CONFIG_HOTPLUG_CPU + ++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); ++ + /* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. +@@ -6307,7 +6495,12 @@ void idle_task_exit(void) + + if (mm != &init_mm) + switch_mm(mm, &init_mm, current); +- mmdrop(mm); ++ ++ /* ++ * Defer the cleanup to an alive cpu. On RT we can neither ++ * call mmdrop() nor mmdrop_delayed() from here. ++ */ ++ per_cpu(idle_last_mm, smp_processor_id()) = mm; + } + + /* +@@ -6652,6 +6845,12 @@ migration_call(struct notifier_block *nf + migrate_nr_uninterruptible(rq); + calc_global_load_remove(rq); + break; ++ case CPU_DEAD: ++ if (per_cpu(idle_last_mm, cpu)) { ++ mmdrop(per_cpu(idle_last_mm, cpu)); ++ per_cpu(idle_last_mm, cpu) = NULL; ++ } ++ break; + #endif + } + +@@ -8358,7 +8557,8 @@ void __init sched_init(void) + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); ++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + ++ sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } +Index: linux-3.2/kernel/workqueue.c +=================================================================== +--- linux-3.2.orig/kernel/workqueue.c ++++ linux-3.2/kernel/workqueue.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + + #include "workqueue_sched.h" + +@@ -57,20 +58,10 @@ enum { + WORKER_DIE = 1 << 1, /* die die die */ + WORKER_IDLE = 1 << 2, /* is idle */ + WORKER_PREP = 1 << 3, /* preparing to run works */ +- WORKER_ROGUE = 1 << 4, /* not bound to any cpu */ +- WORKER_REBIND = 1 << 5, /* mom is home, come back */ +- WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ +- WORKER_UNBOUND = 1 << 7, /* worker is unbound */ +- +- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND | +- WORKER_CPU_INTENSIVE | WORKER_UNBOUND, +- +- /* gcwq->trustee_state */ +- TRUSTEE_START = 0, /* start */ +- TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */ +- TRUSTEE_BUTCHER = 2, /* butcher workers */ +- TRUSTEE_RELEASE = 3, /* release workers */ +- TRUSTEE_DONE = 4, /* trustee is done */ ++ WORKER_CPU_INTENSIVE = 1 << 4, /* cpu intensive */ ++ WORKER_UNBOUND = 1 << 5, /* worker is unbound */ ++ ++ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND, + + BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ + BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, +@@ -84,7 +75,6 @@ enum { + (min two ticks) */ + MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ + CREATE_COOLDOWN = HZ, /* time to breath after fail */ +- TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ + + /* + * Rescue workers are used only on emergencies and shared by +@@ -136,7 +126,7 @@ struct worker { + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ + int id; /* I: worker id */ +- struct work_struct rebind_work; /* L: rebind worker to cpu */ ++ int sleeping; /* None */ + }; + + /* +@@ -163,10 +153,8 @@ struct global_cwq { + + struct ida worker_ida; /* L: for worker IDs */ + +- struct task_struct *trustee; /* L: for gcwq shutdown */ +- unsigned int trustee_state; /* L: trustee state */ +- wait_queue_head_t trustee_wait; /* trustee wait */ + struct worker *first_idle; /* L: first idle worker */ ++ wait_queue_head_t idle_wait; + } ____cacheline_aligned_in_smp; + + /* +@@ -658,66 +646,58 @@ static void wake_up_worker(struct global + } + + /** +- * wq_worker_waking_up - a worker is waking up +- * @task: task waking up +- * @cpu: CPU @task is waking up to +- * +- * This function is called during try_to_wake_up() when a worker is +- * being awoken. ++ * wq_worker_running - a worker is running again ++ * @task: task returning from sleep + * +- * CONTEXT: +- * spin_lock_irq(rq->lock) ++ * This function is called when a worker returns from schedule() + */ +-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) ++void wq_worker_running(struct task_struct *task) + { + struct worker *worker = kthread_data(task); + ++ if (!worker->sleeping) ++ return; + if (!(worker->flags & WORKER_NOT_RUNNING)) +- atomic_inc(get_gcwq_nr_running(cpu)); ++ atomic_inc(get_gcwq_nr_running(smp_processor_id())); ++ worker->sleeping = 0; + } + + /** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep +- * @cpu: CPU in question, must be the current CPU number +- * +- * This function is called during schedule() when a busy worker is +- * going to sleep. Worker on the same cpu can be woken up by +- * returning pointer to its task. + * +- * CONTEXT: +- * spin_lock_irq(rq->lock) +- * +- * RETURNS: +- * Worker task on @cpu to wake up, %NULL if none. ++ * This function is called from schedule() when a busy worker is ++ * going to sleep. + */ +-struct task_struct *wq_worker_sleeping(struct task_struct *task, +- unsigned int cpu) ++void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task), *to_wakeup = NULL; +- struct global_cwq *gcwq = get_gcwq(cpu); +- atomic_t *nr_running = get_gcwq_nr_running(cpu); ++ struct worker *worker = kthread_data(task); ++ struct global_cwq *gcwq; ++ int cpu; + + if (worker->flags & WORKER_NOT_RUNNING) +- return NULL; ++ return; + +- /* this can only happen on the local cpu */ +- BUG_ON(cpu != raw_smp_processor_id()); ++ if (WARN_ON_ONCE(worker->sleeping)) ++ return; ++ ++ worker->sleeping = 1; + ++ cpu = smp_processor_id(); ++ gcwq = get_gcwq(cpu); ++ spin_lock_irq(&gcwq->lock); + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. +- * +- * NOT_RUNNING is clear. This means that trustee is not in +- * charge and we're running on the local cpu w/ rq lock held +- * and preemption disabled, which in turn means that none else +- * could be manipulating idle_list, so dereferencing idle_list +- * without gcwq lock is safe. +- */ +- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist)) +- to_wakeup = first_worker(gcwq); +- return to_wakeup ? to_wakeup->task : NULL; ++ */ ++ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) && ++ !list_empty(&gcwq->worklist)) { ++ worker = first_worker(gcwq); ++ if (worker) ++ wake_up_process(worker->task); ++ } ++ spin_unlock_irq(&gcwq->lock); + } + + /** +@@ -979,13 +959,38 @@ static bool is_chained_work(struct workq + return false; + } + +-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, +- struct work_struct *work) ++static void ___queue_work(struct workqueue_struct *wq, struct global_cwq *gcwq, ++ struct work_struct *work) + { +- struct global_cwq *gcwq; + struct cpu_workqueue_struct *cwq; + struct list_head *worklist; + unsigned int work_flags; ++ ++ /* gcwq determined, get cwq and queue */ ++ cwq = get_cwq(gcwq->cpu, wq); ++ trace_workqueue_queue_work(gcwq->cpu, cwq, work); ++ ++ BUG_ON(!list_empty(&work->entry)); ++ ++ cwq->nr_in_flight[cwq->work_color]++; ++ work_flags = work_color_to_flags(cwq->work_color); ++ ++ if (likely(cwq->nr_active < cwq->max_active)) { ++ trace_workqueue_activate_work(work); ++ cwq->nr_active++; ++ worklist = gcwq_determine_ins_pos(gcwq, cwq); ++ } else { ++ work_flags |= WORK_STRUCT_DELAYED; ++ worklist = &cwq->delayed_works; ++ } ++ ++ insert_work(cwq, work, worklist, work_flags); ++} ++ ++static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, ++ struct work_struct *work) ++{ ++ struct global_cwq *gcwq; + unsigned long flags; + + debug_work_activate(work); +@@ -1031,27 +1036,32 @@ static void __queue_work(unsigned int cp + spin_lock_irqsave(&gcwq->lock, flags); + } + +- /* gcwq determined, get cwq and queue */ +- cwq = get_cwq(gcwq->cpu, wq); +- trace_workqueue_queue_work(cpu, cwq, work); ++ ___queue_work(wq, gcwq, work); + +- BUG_ON(!list_empty(&work->entry)); ++ spin_unlock_irqrestore(&gcwq->lock, flags); ++} + +- cwq->nr_in_flight[cwq->work_color]++; +- work_flags = work_color_to_flags(cwq->work_color); ++/** ++ * queue_work_on - queue work on specific cpu ++ * @cpu: CPU number to execute work on ++ * @wq: workqueue to use ++ * @work: work to queue ++ * ++ * Returns 0 if @work was already on a queue, non-zero otherwise. ++ * ++ * We queue the work to a specific CPU, the caller must ensure it ++ * can't go away. ++ */ ++static int ++__queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) ++{ ++ int ret = 0; + +- if (likely(cwq->nr_active < cwq->max_active)) { +- trace_workqueue_activate_work(work); +- cwq->nr_active++; +- worklist = gcwq_determine_ins_pos(gcwq, cwq); +- } else { +- work_flags |= WORK_STRUCT_DELAYED; +- worklist = &cwq->delayed_works; ++ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { ++ __queue_work(cpu, wq, work); ++ ret = 1; + } +- +- insert_work(cwq, work, worklist, work_flags); +- +- spin_unlock_irqrestore(&gcwq->lock, flags); ++ return ret; + } + + /** +@@ -1068,34 +1078,19 @@ int queue_work(struct workqueue_struct * + { + int ret; + +- ret = queue_work_on(get_cpu(), wq, work); +- put_cpu(); ++ ret = __queue_work_on(get_cpu_light(), wq, work); ++ put_cpu_light(); + + return ret; + } + EXPORT_SYMBOL_GPL(queue_work); + +-/** +- * queue_work_on - queue work on specific cpu +- * @cpu: CPU number to execute work on +- * @wq: workqueue to use +- * @work: work to queue +- * +- * Returns 0 if @work was already on a queue, non-zero otherwise. +- * +- * We queue the work to a specific CPU, the caller must ensure it +- * can't go away. +- */ + int + queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) + { +- int ret = 0; ++ WARN_ON(wq->flags & WQ_NON_AFFINE); + +- if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { +- __queue_work(cpu, wq, work); +- ret = 1; +- } +- return ret; ++ return __queue_work_on(cpu, wq, work); + } + EXPORT_SYMBOL_GPL(queue_work_on); + +@@ -1141,6 +1136,8 @@ int queue_delayed_work_on(int cpu, struc + struct timer_list *timer = &dwork->timer; + struct work_struct *work = &dwork->work; + ++ WARN_ON((wq->flags & WQ_NON_AFFINE) && cpu != -1); ++ + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + unsigned int lcpu; + +@@ -1206,12 +1203,13 @@ static void worker_enter_idle(struct wor + /* idle_list is LIFO */ + list_add(&worker->entry, &gcwq->idle_list); + +- if (likely(!(worker->flags & WORKER_ROGUE))) { +- if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) +- mod_timer(&gcwq->idle_timer, +- jiffies + IDLE_WORKER_TIMEOUT); +- } else +- wake_up_all(&gcwq->trustee_wait); ++ if (gcwq->nr_idle == gcwq->nr_workers) ++ wake_up_all(&gcwq->idle_wait); ++ ++ if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) { ++ mod_timer(&gcwq->idle_timer, ++ jiffies + IDLE_WORKER_TIMEOUT); ++ } + + /* sanity check nr_running */ + WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle && +@@ -1288,8 +1286,14 @@ __acquires(&gcwq->lock) + return false; + if (task_cpu(task) == gcwq->cpu && + cpumask_equal(¤t->cpus_allowed, +- get_cpu_mask(gcwq->cpu))) ++ get_cpu_mask(gcwq->cpu))) { ++ /* ++ * Since we're binding to a particular cpu and need to ++ * stay there for correctness, mark us PF_THREAD_BOUND. ++ */ ++ task->flags |= PF_THREAD_BOUND; + return true; ++ } + spin_unlock_irq(&gcwq->lock); + + /* +@@ -1303,20 +1307,15 @@ __acquires(&gcwq->lock) + } + } + +-/* +- * Function for worker->rebind_work used to rebind rogue busy workers +- * to the associated cpu which is coming back online. This is +- * scheduled by cpu up but can race with other cpu hotplug operations +- * and may be executed twice without intervening cpu down. +- */ +-static void worker_rebind_fn(struct work_struct *work) ++static void worker_unbind_and_unlock(struct worker *worker) + { +- struct worker *worker = container_of(work, struct worker, rebind_work); + struct global_cwq *gcwq = worker->gcwq; ++ struct task_struct *task = worker->task; + +- if (worker_maybe_bind_and_lock(worker)) +- worker_clr_flags(worker, WORKER_REBIND); +- ++ /* ++ * Its no longer required we're PF_THREAD_BOUND, the work is done. ++ */ ++ task->flags &= ~PF_THREAD_BOUND; + spin_unlock_irq(&gcwq->lock); + } + +@@ -1328,7 +1327,6 @@ static struct worker *alloc_worker(void) + if (worker) { + INIT_LIST_HEAD(&worker->entry); + INIT_LIST_HEAD(&worker->scheduled); +- INIT_WORK(&worker->rebind_work, worker_rebind_fn); + /* on creation a worker is in !idle && prep state */ + worker->flags = WORKER_PREP; + } +@@ -1383,15 +1381,9 @@ static struct worker *create_worker(stru + if (IS_ERR(worker->task)) + goto fail; + +- /* +- * A rogue worker will become a regular one if CPU comes +- * online later on. Make sure every worker has +- * PF_THREAD_BOUND set. +- */ + if (bind && !on_unbound_cpu) + kthread_bind(worker->task, gcwq->cpu); + else { +- worker->task->flags |= PF_THREAD_BOUND; + if (on_unbound_cpu) + worker->flags |= WORKER_UNBOUND; + } +@@ -1668,13 +1660,6 @@ static bool manage_workers(struct worker + + gcwq->flags &= ~GCWQ_MANAGING_WORKERS; + +- /* +- * The trustee might be waiting to take over the manager +- * position, tell it we're done. +- */ +- if (unlikely(gcwq->trustee)) +- wake_up_all(&gcwq->trustee_wait); +- + return ret; + } + +@@ -2075,7 +2060,7 @@ repeat: + if (keep_working(gcwq)) + wake_up_worker(gcwq); + +- spin_unlock_irq(&gcwq->lock); ++ worker_unbind_and_unlock(rescuer); + } + + schedule(); +@@ -3024,7 +3009,6 @@ struct workqueue_struct *__alloc_workque + if (IS_ERR(rescuer->task)) + goto err; + +- rescuer->task->flags |= PF_THREAD_BOUND; + wake_up_process(rescuer->task); + } + +@@ -3214,171 +3198,76 @@ EXPORT_SYMBOL_GPL(work_busy); + * gcwqs serve mix of short, long and very long running works making + * blocked draining impractical. + * +- * This is solved by allowing a gcwq to be detached from CPU, running +- * it with unbound (rogue) workers and allowing it to be reattached +- * later if the cpu comes back online. A separate thread is created +- * to govern a gcwq in such state and is called the trustee of the +- * gcwq. +- * +- * Trustee states and their descriptions. +- * +- * START Command state used on startup. On CPU_DOWN_PREPARE, a +- * new trustee is started with this state. +- * +- * IN_CHARGE Once started, trustee will enter this state after +- * assuming the manager role and making all existing +- * workers rogue. DOWN_PREPARE waits for trustee to +- * enter this state. After reaching IN_CHARGE, trustee +- * tries to execute the pending worklist until it's empty +- * and the state is set to BUTCHER, or the state is set +- * to RELEASE. +- * +- * BUTCHER Command state which is set by the cpu callback after +- * the cpu has went down. Once this state is set trustee +- * knows that there will be no new works on the worklist +- * and once the worklist is empty it can proceed to +- * killing idle workers. +- * +- * RELEASE Command state which is set by the cpu callback if the +- * cpu down has been canceled or it has come online +- * again. After recognizing this state, trustee stops +- * trying to drain or butcher and clears ROGUE, rebinds +- * all remaining workers back to the cpu and releases +- * manager role. +- * +- * DONE Trustee will enter this state after BUTCHER or RELEASE +- * is complete. +- * +- * trustee CPU draining +- * took over down complete +- * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE +- * | | ^ +- * | CPU is back online v return workers | +- * ----------------> RELEASE -------------- + */ + +-/** +- * trustee_wait_event_timeout - timed event wait for trustee +- * @cond: condition to wait for +- * @timeout: timeout in jiffies +- * +- * wait_event_timeout() for trustee to use. Handles locking and +- * checks for RELEASE request. +- * +- * CONTEXT: +- * spin_lock_irq(gcwq->lock) which may be released and regrabbed +- * multiple times. To be used by trustee. +- * +- * RETURNS: +- * Positive indicating left time if @cond is satisfied, 0 if timed +- * out, -1 if canceled. +- */ +-#define trustee_wait_event_timeout(cond, timeout) ({ \ +- long __ret = (timeout); \ +- while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \ +- __ret) { \ +- spin_unlock_irq(&gcwq->lock); \ +- __wait_event_timeout(gcwq->trustee_wait, (cond) || \ +- (gcwq->trustee_state == TRUSTEE_RELEASE), \ +- __ret); \ +- spin_lock_irq(&gcwq->lock); \ +- } \ +- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \ +-}) ++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, ++ unsigned long action, ++ void *hcpu) ++{ ++ unsigned int cpu = (unsigned long)hcpu; ++ struct global_cwq *gcwq = get_gcwq(cpu); ++ struct worker *uninitialized_var(new_worker); ++ unsigned long flags; + +-/** +- * trustee_wait_event - event wait for trustee +- * @cond: condition to wait for +- * +- * wait_event() for trustee to use. Automatically handles locking and +- * checks for CANCEL request. +- * +- * CONTEXT: +- * spin_lock_irq(gcwq->lock) which may be released and regrabbed +- * multiple times. To be used by trustee. +- * +- * RETURNS: +- * 0 if @cond is satisfied, -1 if canceled. +- */ +-#define trustee_wait_event(cond) ({ \ +- long __ret1; \ +- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\ +- __ret1 < 0 ? -1 : 0; \ +-}) ++ action &= ~CPU_TASKS_FROZEN; + +-static int __cpuinit trustee_thread(void *__gcwq) +-{ +- struct global_cwq *gcwq = __gcwq; +- struct worker *worker; +- struct work_struct *work; +- struct hlist_node *pos; +- long rc; +- int i; ++ switch (action) { ++ case CPU_UP_PREPARE: ++ BUG_ON(gcwq->first_idle); ++ new_worker = create_worker(gcwq, false); ++ if (!new_worker) ++ return NOTIFY_BAD; ++ case CPU_UP_CANCELED: ++ case CPU_ONLINE: ++ break; ++ default: ++ return notifier_from_errno(0); ++ } + +- BUG_ON(gcwq->cpu != smp_processor_id()); ++ /* some are called w/ irq disabled, don't disturb irq status */ ++ spin_lock_irqsave(&gcwq->lock, flags); + +- spin_lock_irq(&gcwq->lock); +- /* +- * Claim the manager position and make all workers rogue. +- * Trustee must be bound to the target cpu and can't be +- * cancelled. +- */ +- BUG_ON(gcwq->cpu != smp_processor_id()); +- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS)); +- BUG_ON(rc < 0); ++ switch (action) { ++ case CPU_UP_PREPARE: ++ BUG_ON(gcwq->first_idle); ++ gcwq->first_idle = new_worker; ++ break; + +- gcwq->flags |= GCWQ_MANAGING_WORKERS; ++ case CPU_UP_CANCELED: ++ destroy_worker(gcwq->first_idle); ++ gcwq->first_idle = NULL; ++ break; + +- list_for_each_entry(worker, &gcwq->idle_list, entry) +- worker->flags |= WORKER_ROGUE; ++ case CPU_ONLINE: ++ spin_unlock_irq(&gcwq->lock); ++ kthread_bind(gcwq->first_idle->task, cpu); ++ spin_lock_irq(&gcwq->lock); ++ gcwq->flags |= GCWQ_MANAGE_WORKERS; ++ start_worker(gcwq->first_idle); ++ gcwq->first_idle = NULL; ++ break; ++ } + +- for_each_busy_worker(worker, i, pos, gcwq) +- worker->flags |= WORKER_ROGUE; ++ spin_unlock_irqrestore(&gcwq->lock, flags); + +- /* +- * Call schedule() so that we cross rq->lock and thus can +- * guarantee sched callbacks see the rogue flag. This is +- * necessary as scheduler callbacks may be invoked from other +- * cpus. +- */ +- spin_unlock_irq(&gcwq->lock); +- schedule(); +- spin_lock_irq(&gcwq->lock); ++ return notifier_from_errno(0); ++} + +- /* +- * Sched callbacks are disabled now. Zap nr_running. After +- * this, nr_running stays zero and need_more_worker() and +- * keep_working() are always true as long as the worklist is +- * not empty. +- */ +- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0); ++static void flush_gcwq(struct global_cwq *gcwq) ++{ ++ struct work_struct *work, *nw; ++ struct worker *worker, *n; ++ LIST_HEAD(non_affine_works); + +- spin_unlock_irq(&gcwq->lock); +- del_timer_sync(&gcwq->idle_timer); + spin_lock_irq(&gcwq->lock); ++ list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) { ++ struct workqueue_struct *wq = get_work_cwq(work)->wq; + +- /* +- * We're now in charge. Notify and proceed to drain. We need +- * to keep the gcwq running during the whole CPU down +- * procedure as other cpu hotunplug callbacks may need to +- * flush currently running tasks. +- */ +- gcwq->trustee_state = TRUSTEE_IN_CHARGE; +- wake_up_all(&gcwq->trustee_wait); +- +- /* +- * The original cpu is in the process of dying and may go away +- * anytime now. When that happens, we and all workers would +- * be migrated to other cpus. Try draining any left work. We +- * want to get it over with ASAP - spam rescuers, wake up as +- * many idlers as necessary and create new ones till the +- * worklist is empty. Note that if the gcwq is frozen, there +- * may be frozen works in freezable cwqs. Don't declare +- * completion while frozen. +- */ +- while (gcwq->nr_workers != gcwq->nr_idle || +- gcwq->flags & GCWQ_FREEZING || +- gcwq->trustee_state == TRUSTEE_IN_CHARGE) { ++ if (wq->flags & WQ_NON_AFFINE) ++ list_move(&work->entry, &non_affine_works); ++ } ++ ++ while (!list_empty(&gcwq->worklist)) { + int nr_works = 0; + + list_for_each_entry(work, &gcwq->worklist, entry) { +@@ -3392,189 +3281,54 @@ static int __cpuinit trustee_thread(void + wake_up_process(worker->task); + } + ++ spin_unlock_irq(&gcwq->lock); ++ + if (need_to_create_worker(gcwq)) { +- spin_unlock_irq(&gcwq->lock); +- worker = create_worker(gcwq, false); +- spin_lock_irq(&gcwq->lock); +- if (worker) { +- worker->flags |= WORKER_ROGUE; ++ worker = create_worker(gcwq, true); ++ if (worker) + start_worker(worker); +- } + } + +- /* give a breather */ +- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0) +- break; +- } +- +- /* +- * Either all works have been scheduled and cpu is down, or +- * cpu down has already been canceled. Wait for and butcher +- * all workers till we're canceled. +- */ +- do { +- rc = trustee_wait_event(!list_empty(&gcwq->idle_list)); +- while (!list_empty(&gcwq->idle_list)) +- destroy_worker(list_first_entry(&gcwq->idle_list, +- struct worker, entry)); +- } while (gcwq->nr_workers && rc >= 0); +- +- /* +- * At this point, either draining has completed and no worker +- * is left, or cpu down has been canceled or the cpu is being +- * brought back up. There shouldn't be any idle one left. +- * Tell the remaining busy ones to rebind once it finishes the +- * currently scheduled works by scheduling the rebind_work. +- */ +- WARN_ON(!list_empty(&gcwq->idle_list)); +- +- for_each_busy_worker(worker, i, pos, gcwq) { +- struct work_struct *rebind_work = &worker->rebind_work; ++ wait_event_timeout(gcwq->idle_wait, ++ gcwq->nr_idle == gcwq->nr_workers, HZ/10); + +- /* +- * Rebind_work may race with future cpu hotplug +- * operations. Use a separate flag to mark that +- * rebinding is scheduled. +- */ +- worker->flags |= WORKER_REBIND; +- worker->flags &= ~WORKER_ROGUE; ++ spin_lock_irq(&gcwq->lock); ++ } + +- /* queue rebind_work, wq doesn't matter, use the default one */ +- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, +- work_data_bits(rebind_work))) +- continue; ++ WARN_ON(gcwq->nr_workers != gcwq->nr_idle); + +- debug_work_activate(rebind_work); +- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, +- worker->scheduled.next, +- work_color_to_flags(WORK_NO_COLOR)); +- } ++ list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry) ++ destroy_worker(worker); + +- /* relinquish manager role */ +- gcwq->flags &= ~GCWQ_MANAGING_WORKERS; ++ WARN_ON(gcwq->nr_workers || gcwq->nr_idle); + +- /* notify completion */ +- gcwq->trustee = NULL; +- gcwq->trustee_state = TRUSTEE_DONE; +- wake_up_all(&gcwq->trustee_wait); + spin_unlock_irq(&gcwq->lock); +- return 0; +-} + +-/** +- * wait_trustee_state - wait for trustee to enter the specified state +- * @gcwq: gcwq the trustee of interest belongs to +- * @state: target state to wait for +- * +- * Wait for the trustee to reach @state. DONE is already matched. +- * +- * CONTEXT: +- * spin_lock_irq(gcwq->lock) which may be released and regrabbed +- * multiple times. To be used by cpu_callback. +- */ +-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) +-__releases(&gcwq->lock) +-__acquires(&gcwq->lock) +-{ +- if (!(gcwq->trustee_state == state || +- gcwq->trustee_state == TRUSTEE_DONE)) { +- spin_unlock_irq(&gcwq->lock); +- __wait_event(gcwq->trustee_wait, +- gcwq->trustee_state == state || +- gcwq->trustee_state == TRUSTEE_DONE); +- spin_lock_irq(&gcwq->lock); ++ gcwq = get_gcwq(get_cpu_light()); ++ spin_lock_irq(&gcwq->lock); ++ list_for_each_entry_safe(work, nw, &non_affine_works, entry) { ++ list_del_init(&work->entry); ++ ___queue_work(get_work_cwq(work)->wq, gcwq, work); + } ++ spin_unlock_irq(&gcwq->lock); ++ put_cpu_light(); + } + +-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, ++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; + struct global_cwq *gcwq = get_gcwq(cpu); +- struct task_struct *new_trustee = NULL; +- struct worker *uninitialized_var(new_worker); +- unsigned long flags; + + action &= ~CPU_TASKS_FROZEN; + +- switch (action) { +- case CPU_DOWN_PREPARE: +- new_trustee = kthread_create(trustee_thread, gcwq, +- "workqueue_trustee/%d\n", cpu); +- if (IS_ERR(new_trustee)) +- return notifier_from_errno(PTR_ERR(new_trustee)); +- kthread_bind(new_trustee, cpu); +- /* fall through */ +- case CPU_UP_PREPARE: +- BUG_ON(gcwq->first_idle); +- new_worker = create_worker(gcwq, false); +- if (!new_worker) { +- if (new_trustee) +- kthread_stop(new_trustee); +- return NOTIFY_BAD; +- } +- } ++ switch (action) { ++ case CPU_DOWN_PREPARE: ++ flush_gcwq(gcwq); ++ break; ++ } + +- /* some are called w/ irq disabled, don't disturb irq status */ +- spin_lock_irqsave(&gcwq->lock, flags); +- +- switch (action) { +- case CPU_DOWN_PREPARE: +- /* initialize trustee and tell it to acquire the gcwq */ +- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE); +- gcwq->trustee = new_trustee; +- gcwq->trustee_state = TRUSTEE_START; +- wake_up_process(gcwq->trustee); +- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); +- /* fall through */ +- case CPU_UP_PREPARE: +- BUG_ON(gcwq->first_idle); +- gcwq->first_idle = new_worker; +- break; +- +- case CPU_DYING: +- /* +- * Before this, the trustee and all workers except for +- * the ones which are still executing works from +- * before the last CPU down must be on the cpu. After +- * this, they'll all be diasporas. +- */ +- gcwq->flags |= GCWQ_DISASSOCIATED; +- break; +- +- case CPU_POST_DEAD: +- gcwq->trustee_state = TRUSTEE_BUTCHER; +- /* fall through */ +- case CPU_UP_CANCELED: +- destroy_worker(gcwq->first_idle); +- gcwq->first_idle = NULL; +- break; +- +- case CPU_DOWN_FAILED: +- case CPU_ONLINE: +- gcwq->flags &= ~GCWQ_DISASSOCIATED; +- if (gcwq->trustee_state != TRUSTEE_DONE) { +- gcwq->trustee_state = TRUSTEE_RELEASE; +- wake_up_process(gcwq->trustee); +- wait_trustee_state(gcwq, TRUSTEE_DONE); +- } +- +- /* +- * Trustee is done and there might be no worker left. +- * Put the first_idle in and request a real manager to +- * take a look. +- */ +- spin_unlock_irq(&gcwq->lock); +- kthread_bind(gcwq->first_idle->task, cpu); +- spin_lock_irq(&gcwq->lock); +- gcwq->flags |= GCWQ_MANAGE_WORKERS; +- start_worker(gcwq->first_idle); +- gcwq->first_idle = NULL; +- break; +- } +- +- spin_unlock_irqrestore(&gcwq->lock, flags); + + return notifier_from_errno(0); + } +@@ -3772,7 +3526,8 @@ static int __init init_workqueues(void) + unsigned int cpu; + int i; + +- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE); ++ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE); ++ hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE); + + /* initialize gcwqs */ + for_each_gcwq_cpu(cpu) { +@@ -3795,9 +3550,7 @@ static int __init init_workqueues(void) + (unsigned long)gcwq); + + ida_init(&gcwq->worker_ida); +- +- gcwq->trustee_state = TRUSTEE_DONE; +- init_waitqueue_head(&gcwq->trustee_wait); ++ init_waitqueue_head(&gcwq->idle_wait); + } + + /* create the initial worker */ +Index: linux-3.2/kernel/workqueue_sched.h +=================================================================== +--- linux-3.2.orig/kernel/workqueue_sched.h ++++ linux-3.2/kernel/workqueue_sched.h +@@ -4,6 +4,5 @@ + * Scheduler hooks for concurrency managed workqueue. Only to be + * included from sched.c and workqueue.c. + */ +-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); +-struct task_struct *wq_worker_sleeping(struct task_struct *task, +- unsigned int cpu); ++void wq_worker_running(struct task_struct *task); ++void wq_worker_sleeping(struct task_struct *task); +Index: linux-3.2/arch/mips/kernel/signal.c +=================================================================== +--- linux-3.2.orig/arch/mips/kernel/signal.c ++++ linux-3.2/arch/mips/kernel/signal.c +@@ -604,6 +604,9 @@ static void do_signal(struct pt_regs *re + if (!user_mode(regs)) + return; + ++ local_irq_enable(); ++ preempt_check_resched(); ++ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else +Index: linux-3.2/arch/mips/cavium-octeon/smp.c +=================================================================== +--- linux-3.2.orig/arch/mips/cavium-octeon/smp.c ++++ linux-3.2/arch/mips/cavium-octeon/smp.c +@@ -207,8 +207,9 @@ void octeon_prepare_cpus(unsigned int ma + * the other bits alone. + */ + cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); +- if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED, +- "SMP-IPI", mailbox_interrupt)) { ++ if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, ++ IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", ++ mailbox_interrupt)) { + panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n"); + } + } +Index: linux-3.2/arch/arm/kernel/signal.c +=================================================================== +--- linux-3.2.orig/arch/arm/kernel/signal.c ++++ linux-3.2/arch/arm/kernel/signal.c +@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re + if (!user_mode(regs)) + return; + ++ local_irq_enable(); ++ preempt_check_resched(); ++ + /* + * If we were from a system call, check for system call restarting... + */ +Index: linux-3.2/arch/powerpc/platforms/85xx/mpc85xx_cds.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c ++++ linux-3.2/arch/powerpc/platforms/85xx/mpc85xx_cds.c +@@ -177,7 +177,7 @@ static irqreturn_t mpc85xx_8259_cascade_ + + static struct irqaction mpc85xxcds_8259_irqaction = { + .handler = mpc85xx_8259_cascade_action, +- .flags = IRQF_SHARED, ++ .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "8259 cascade", + }; + #endif /* PPC_I8259 */ +Index: linux-3.2/arch/powerpc/platforms/wsp/opb_pic.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/platforms/wsp/opb_pic.c ++++ linux-3.2/arch/powerpc/platforms/wsp/opb_pic.c +@@ -320,7 +320,8 @@ void __init opb_pic_init(void) + } + + /* Attach opb interrupt handler to new virtual IRQ */ +- rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb); ++ rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD, ++ "OPB LS Cascade", opb); + if (rc) { + printk("opb: request_irq failed: %d\n", rc); + continue; +Index: linux-3.2/arch/powerpc/kernel/smp.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/smp.c ++++ linux-3.2/arch/powerpc/kernel/smp.c +@@ -187,8 +187,8 @@ int smp_request_message_ipi(int virq, in + return 1; + } + #endif +- err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU, +- smp_ipi_name[msg], 0); ++ err = request_irq(virq, smp_ipi_action[msg], ++ IRQF_PERCPU | IRQF_NO_THREAD, smp_ipi_name[msg], 0); + WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", + virq, smp_ipi_name[msg], err); + +Index: linux-3.2/arch/powerpc/platforms/powermac/smp.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/platforms/powermac/smp.c ++++ linux-3.2/arch/powerpc/platforms/powermac/smp.c +@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(voi + + if (psurge_secondary_virq) + rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, +- IRQF_PERCPU, "IPI", NULL); ++ IRQF_NO_THREAD | IRQF_PERCPU, "IPI", NULL); + + if (rc) + pr_err("Failed to setup secondary cpu IPI\n"); +@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(in + + static struct irqaction psurge_irqaction = { + .handler = psurge_ipi_intr, +- .flags = IRQF_PERCPU, ++ .flags = IRQF_PERCPU | IRQF_NO_THREAD, + .name = "primary IPI", + }; + +Index: linux-3.2/arch/powerpc/sysdev/xics/xics-common.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/sysdev/xics/xics-common.c ++++ linux-3.2/arch/powerpc/sysdev/xics/xics-common.c +@@ -134,10 +134,11 @@ static void xics_request_ipi(void) + BUG_ON(ipi == NO_IRQ); + + /* +- * IPIs are marked IRQF_PERCPU. The handler was set in map. ++ * IPIs are marked PERCPU and also IRQF_NO_THREAD as they must ++ * run in hard interrupt context. The handler was set in map. + */ + BUG_ON(request_irq(ipi, icp_ops->ipi_action, +- IRQF_PERCPU, "IPI", NULL)); ++ IRQF_NO_THREAD|IRQF_PERCPU, "IPI", NULL)); + } + + int __init xics_smp_probe(void) +Index: linux-3.2/arch/powerpc/Kconfig +=================================================================== +--- linux-3.2.orig/arch/powerpc/Kconfig ++++ linux-3.2/arch/powerpc/Kconfig +@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT + + config RWSEM_GENERIC_SPINLOCK + bool ++ default y if PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config GENERIC_LOCKBREAK + bool +@@ -132,6 +133,7 @@ config PPC + select IRQ_PER_CPU + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL ++ select IRQ_FORCED_THREADING + select HAVE_RCU_TABLE_FREE if SMP + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_BPF_JIT if (PPC64 && NET) +@@ -274,7 +276,7 @@ menu "Kernel options" + + config HIGHMEM + bool "High memory support" +- depends on PPC32 ++ depends on PPC32 && !PREEMPT_RT_FULL + + source kernel/time/Kconfig + source kernel/Kconfig.hz +Index: linux-3.2/kernel/sched_rt.c +=================================================================== +--- linux-3.2.orig/kernel/sched_rt.c ++++ linux-3.2/kernel/sched_rt.c +@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq + { + int more = 0; + ++ if (!sched_feat(RT_RUNTIME_SHARE)) ++ return more; ++ + if (rt_rq->rt_time > rt_rq->rt_runtime) { + raw_spin_unlock(&rt_rq->rt_runtime_lock); + more = do_balance_runtime(rt_rq); +@@ -577,12 +580,9 @@ static inline int balance_runtime(struct + + static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) + { +- int i, idle = 1; ++ int i, idle = 1, throttled = 0; + const struct cpumask *span; + +- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) +- return 1; +- + span = sched_rt_period_mask(); + for_each_cpu(i, span) { + int enqueue = 0; +@@ -617,12 +617,17 @@ static int do_sched_rt_period_timer(stru + if (!rt_rq_throttled(rt_rq)) + enqueue = 1; + } ++ if (rt_rq->rt_throttled) ++ throttled = 1; + + if (enqueue) + sched_rt_rq_enqueue(rt_rq); + raw_spin_unlock(&rq->lock); + } + ++ if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) ++ return 1; ++ + return idle; + } + +@@ -654,8 +659,24 @@ static int sched_rt_runtime_exceeded(str + return 0; + + if (rt_rq->rt_time > runtime) { +- rt_rq->rt_throttled = 1; +- printk_once(KERN_WARNING "sched: RT throttling activated\n"); ++ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); ++ ++ /* ++ * Don't actually throttle groups that have no runtime assigned ++ * but accrue some time due to boosting. ++ */ ++ if (likely(rt_b->rt_runtime)) { ++ rt_rq->rt_throttled = 1; ++ printk_once(KERN_WARNING "sched: RT throttling activated\n"); ++ } else { ++ /* ++ * In case we did anyway, make it go away, ++ * replenishment is a joke, since it will replenish us ++ * with exactly 0 ns. ++ */ ++ rt_rq->rt_time = 0; ++ } ++ + if (rt_rq_throttled(rt_rq)) { + sched_rt_rq_dequeue(rt_rq); + return 1; +@@ -683,7 +704,8 @@ static void update_curr_rt(struct rq *rq + if (unlikely((s64)delta_exec < 0)) + delta_exec = 0; + +- schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); ++ schedstat_set(curr->se.statistics.exec_max, ++ max(curr->se.statistics.exec_max, delta_exec)); + + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); +Index: linux-3.2/include/linux/init_task.h +=================================================================== +--- linux-3.2.orig/include/linux/init_task.h ++++ linux-3.2/include/linux/init_task.h +@@ -126,6 +126,14 @@ extern struct cred init_cred; + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ ++#define INIT_TASK_COMM "swapper" ++ + /* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) +@@ -162,7 +170,7 @@ extern struct cred init_cred; + .group_leader = &tsk, \ + RCU_INIT_POINTER(.real_cred, &init_cred), \ + RCU_INIT_POINTER(.cred, &init_cred), \ +- .comm = "swapper", \ ++ .comm = INIT_TASK_COMM, \ + .thread = INIT_THREAD, \ + .fs = &init_fs, \ + .files = &init_files, \ +@@ -178,6 +186,7 @@ extern struct cred init_cred; + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +Index: linux-3.2/kernel/signal.c +=================================================================== +--- linux-3.2.orig/kernel/signal.c ++++ linux-3.2/kernel/signal.c +@@ -344,13 +344,45 @@ static bool task_participate_group_stop( + return false; + } + ++#ifdef __HAVE_ARCH_CMPXCHG ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ return NULL; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ return 1; ++} ++ ++#endif ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -367,7 +399,10 @@ __sigqueue_alloc(int sig, struct task_st + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -384,6 +419,13 @@ __sigqueue_alloc(int sig, struct task_st + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -393,6 +435,21 @@ static void __sigqueue_free(struct sigqu + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ atomic_dec(&up->sigpending); ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -406,6 +463,21 @@ void flush_sigqueue(struct sigpending *q + } + + /* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ ++/* + * Flush all pending signals for a task. + */ + void __flush_signals(struct task_struct *t) +@@ -554,7 +626,7 @@ static void collect_signal(int sig, stru + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -600,6 +672,8 @@ int dequeue_signal(struct task_struct *t + { + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -682,6 +756,9 @@ void signal_wake_up(struct task_struct * + + set_tsk_thread_flag(t, TIF_SIGPENDING); + ++ if (unlikely(t == current)) ++ return; ++ + /* + * For SIGKILL, we want to wake it up in the stopped/traced/killable + * case. We don't check t->state here because there is a race with it +@@ -1252,12 +1329,12 @@ struct sighand_struct *__lock_task_sigha + struct sighand_struct *sighand; + + for (;;) { +- local_irq_save(*flags); ++ local_irq_save_nort(*flags); + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) { + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + break; + } + +@@ -1268,7 +1345,7 @@ struct sighand_struct *__lock_task_sigha + } + spin_unlock(&sighand->siglock); + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + } + + return sighand; +@@ -1515,7 +1592,8 @@ EXPORT_SYMBOL(kill_pid); + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; +@@ -1857,15 +1935,7 @@ static void ptrace_stop(int exit_code, i + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + schedule(); + } else { + /* +Index: linux-3.2/arch/arm/kernel/perf_event.c +=================================================================== +--- linux-3.2.orig/arch/arm/kernel/perf_event.c ++++ linux-3.2/arch/arm/kernel/perf_event.c +@@ -432,7 +432,7 @@ armpmu_reserve_hardware(struct arm_pmu * + } + + err = request_irq(irq, handle_irq, +- IRQF_DISABLED | IRQF_NOBALANCING, ++ IRQF_NOBALANCING | IRQF_NO_THREAD, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", +Index: linux-3.2/arch/arm/Kconfig +=================================================================== +--- linux-3.2.orig/arch/arm/Kconfig ++++ linux-3.2/arch/arm/Kconfig +@@ -29,6 +29,7 @@ config ARM + select HAVE_GENERIC_HARDIRQS + select HAVE_SPARSE_IRQ + select GENERIC_IRQ_SHOW ++ select IRQ_FORCED_THREADING + select CPU_PM if (SUSPEND || CPU_IDLE) + help + The ARM series is a line of low-power-consumption RISC chip designs +@@ -1654,7 +1655,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU ++ depends on MMU && !PREEMPT_RT_FULL + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address +Index: linux-3.2/arch/arm/mach-exynos/platsmp.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-exynos/platsmp.c ++++ linux-3.2/arch/arm/mach-exynos/platsmp.c +@@ -63,7 +63,7 @@ static void __iomem *scu_base_addr(void) + return (void __iomem *)(S5P_VA_SCU); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __cpuinit exynos4_gic_secondary_init(void) + { +@@ -108,8 +108,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -120,7 +120,7 @@ int __cpuinit boot_secondary(unsigned in + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -149,7 +149,7 @@ int __cpuinit boot_secondary(unsigned in + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -177,7 +177,7 @@ int __cpuinit boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-3.2/arch/arm/mach-msm/platsmp.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-msm/platsmp.c ++++ linux-3.2/arch/arm/mach-msm/platsmp.c +@@ -39,7 +39,7 @@ extern void msm_secondary_startup(void); + */ + volatile int pen_release = -1; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static inline int get_core_count(void) + { +@@ -69,8 +69,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static __cpuinit void prepare_cold_cpu(unsigned int cpu) +@@ -107,7 +107,7 @@ int __cpuinit boot_secondary(unsigned in + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -141,7 +141,7 @@ int __cpuinit boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-3.2/arch/arm/mach-omap2/omap-smp.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-omap2/omap-smp.c ++++ linux-3.2/arch/arm/mach-omap2/omap-smp.c +@@ -29,7 +29,7 @@ + /* SCU base address */ + static void __iomem *scu_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __cpuinit platform_secondary_init(unsigned int cpu) + { +@@ -43,8 +43,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -53,7 +53,7 @@ int __cpuinit boot_secondary(unsigned in + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +Index: linux-3.2/arch/arm/mach-tegra/platsmp.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-tegra/platsmp.c ++++ linux-3.2/arch/arm/mach-tegra/platsmp.c +@@ -28,7 +28,7 @@ + + extern void tegra_secondary_startup(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE); + + #define EVP_CPU_RESET_VECTOR \ +@@ -50,8 +50,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -65,7 +65,7 @@ int __cpuinit boot_secondary(unsigned in + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + + /* set the reset vector to point to the secondary_startup routine */ +@@ -101,7 +101,7 @@ int __cpuinit boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +Index: linux-3.2/arch/arm/mach-ux500/platsmp.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-ux500/platsmp.c ++++ linux-3.2/arch/arm/mach-ux500/platsmp.c +@@ -57,7 +57,7 @@ static void __iomem *scu_base_addr(void) + return NULL; + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __cpuinit platform_secondary_init(unsigned int cpu) + { +@@ -77,8 +77,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -89,7 +89,7 @@ int __cpuinit boot_secondary(unsigned in + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -110,7 +110,7 @@ int __cpuinit boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-3.2/arch/arm/plat-versatile/platsmp.c +=================================================================== +--- linux-3.2.orig/arch/arm/plat-versatile/platsmp.c ++++ linux-3.2/arch/arm/plat-versatile/platsmp.c +@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release( + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __cpuinit platform_secondary_init(unsigned int cpu) + { +@@ -57,8 +57,8 @@ void __cpuinit platform_secondary_init(u + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -69,7 +69,7 @@ int __cpuinit boot_secondary(unsigned in + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -99,7 +99,7 @@ int __cpuinit boot_secondary(unsigned in + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-3.2/include/linux/sched.h +=================================================================== +--- linux-3.2.orig/include/linux/sched.h ++++ linux-3.2/include/linux/sched.h +@@ -63,6 +63,7 @@ struct sched_param { + #include + #include + ++#include + #include + #include + #include +@@ -91,6 +92,7 @@ struct sched_param { + #include + #include + #include ++#include + + #include + +@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte + extern signed long schedule_timeout_killable(signed long timeout); + extern signed long schedule_timeout_uninterruptible(signed long timeout); + asmlinkage void schedule(void); ++extern void schedule_preempt_disabled(void); + extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); + + struct nsproxy; +@@ -1070,6 +1073,7 @@ struct sched_domain; + #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ + #define WF_FORK 0x02 /* child wakeup after fork */ + #define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + #define ENQUEUE_WAKEUP 1 + #define ENQUEUE_HEAD 2 +@@ -1219,6 +1223,7 @@ enum perf_event_task_context { + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -1255,6 +1260,12 @@ struct task_struct { + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int migrate_disable; ++#ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++#endif ++#endif + cpumask_t cpus_allowed; + + #ifdef CONFIG_PREEMPT_RCU +@@ -1353,6 +1364,9 @@ struct task_struct { + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task +@@ -1386,6 +1400,7 @@ struct task_struct { + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ +@@ -1429,6 +1444,9 @@ struct task_struct { + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int pagefault_disabled; ++#endif + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +@@ -1561,6 +1579,12 @@ struct task_struct { + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ unsigned long timer_offset; ++#endif ++#endif + #endif /* CONFIG_TRACING */ + #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ + struct memcg_batch_info { +@@ -1573,10 +1597,26 @@ struct task_struct { + #ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_t ptrace_bp_refcnt; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++ int softirq_nestcnt; ++#endif ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++#endif + }; + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } ++#else ++static inline bool cur_pf_disabled(void) { return false; } ++#endif ++ ++static inline bool pagefault_disabled(void) ++{ ++ return in_atomic() || cur_pf_disabled(); ++} + + /* + * Priority of a process goes from 0..MAX_PRIO-1, valid RT +@@ -1746,6 +1786,15 @@ extern struct pid *cad_pid; + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -1753,6 +1802,7 @@ static inline void put_task_struct(struc + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); + extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); +@@ -1778,6 +1828,7 @@ extern void thread_group_times(struct ta + #define PF_FROZEN 0x00010000 /* frozen for system suspend */ + #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ + #define PF_KSWAPD 0x00040000 /* I am kswapd */ ++#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */ + #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ + #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ + #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ +@@ -2051,12 +2102,20 @@ extern unsigned int sysctl_sched_cfs_ban + extern int rt_mutex_getprio(struct task_struct *p); + extern void rt_mutex_setprio(struct task_struct *p, int prio); + extern void rt_mutex_adjust_pi(struct task_struct *p); ++static inline bool tsk_is_pi_blocked(struct task_struct *tsk) ++{ ++ return tsk->pi_blocked_on != NULL; ++} + #else + static inline int rt_mutex_getprio(struct task_struct *p) + { + return p->normal_prio; + } + # define rt_mutex_adjust_pi(p) do { } while (0) ++static inline bool tsk_is_pi_blocked(struct task_struct *tsk) ++{ ++ return false; ++} + #endif + + extern bool yield_to(struct task_struct *p, bool preempt); +@@ -2136,6 +2195,7 @@ extern void xtime_update(unsigned long t + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +@@ -2226,12 +2286,24 @@ extern struct mm_struct * mm_alloc(void) + + /* mmdrop drops the mm and the page tables */ + extern void __mmdrop(struct mm_struct *); ++ + static inline void mmdrop(struct mm_struct * mm) + { + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + /* mmput gets rid of the mappings and all user-space */ + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ +@@ -2534,7 +2606,7 @@ extern int _cond_resched(void); + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT ++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET + #else + #define PREEMPT_LOCK_OFFSET 0 +@@ -2545,12 +2617,16 @@ extern int __cond_resched_lock(spinlock_ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + /* + * Does a critical section need to be broken due to another +@@ -2613,6 +2689,26 @@ static inline void set_task_cpu(struct t + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif ++} ++ ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (p->migrate_disable) ++ return cpumask_of(task_cpu(p)); ++#endif ++ ++ return &p->cpus_allowed; ++} ++ + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +Index: linux-3.2/arch/arm/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/arm/kernel/process.c ++++ linux-3.2/arch/arm/kernel/process.c +@@ -211,9 +211,7 @@ void cpu_idle(void) + } + leds_event(led_idle_end); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +@@ -488,6 +486,31 @@ unsigned long arch_randomize_brk(struct + } + + #ifdef CONFIG_MMU ++ ++/* ++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not ++ * initialized by pgtable_page_ctor() then a coredump of the vector page will ++ * fail. ++ */ ++static int __init vectors_user_mapping_init_page(void) ++{ ++ struct page *page; ++ unsigned long addr = 0xffff0000; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ page = pmd_page(*(pmd)); ++ ++ pgtable_page_ctor(page); ++ ++ return 0; ++} ++late_initcall(vectors_user_mapping_init_page); ++ + /* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Let's declare a mapping +Index: linux-3.2/arch/avr32/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/avr32/kernel/process.c ++++ linux-3.2/arch/avr32/kernel/process.c +@@ -38,9 +38,7 @@ void cpu_idle(void) + while (!need_resched()) + cpu_idle_sleep(); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/blackfin/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/blackfin/kernel/process.c ++++ linux-3.2/arch/blackfin/kernel/process.c +@@ -92,9 +92,7 @@ void cpu_idle(void) + while (!need_resched()) + idle(); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/cris/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/cris/kernel/process.c ++++ linux-3.2/arch/cris/kernel/process.c +@@ -115,9 +115,7 @@ void cpu_idle (void) + idle = default_idle; + idle(); + } +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/frv/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/frv/kernel/process.c ++++ linux-3.2/arch/frv/kernel/process.c +@@ -92,9 +92,7 @@ void cpu_idle(void) + idle(); + } + +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/h8300/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/h8300/kernel/process.c ++++ linux-3.2/arch/h8300/kernel/process.c +@@ -81,9 +81,7 @@ void cpu_idle(void) + while (1) { + while (!need_resched()) + idle(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/ia64/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/ia64/kernel/process.c ++++ linux-3.2/arch/ia64/kernel/process.c +@@ -330,9 +330,7 @@ cpu_idle (void) + normal_xtp(); + #endif + } +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + check_pgt_cache(); + if (cpu_is_offline(cpu)) + play_dead(); +Index: linux-3.2/arch/m32r/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/m32r/kernel/process.c ++++ linux-3.2/arch/m32r/kernel/process.c +@@ -90,9 +90,7 @@ void cpu_idle (void) + + idle(); + } +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/m68k/kernel/process_mm.c +=================================================================== +--- linux-3.2.orig/arch/m68k/kernel/process_mm.c ++++ linux-3.2/arch/m68k/kernel/process_mm.c +@@ -94,9 +94,7 @@ void cpu_idle(void) + while (1) { + while (!need_resched()) + idle(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/m68k/kernel/process_no.c +=================================================================== +--- linux-3.2.orig/arch/m68k/kernel/process_no.c ++++ linux-3.2/arch/m68k/kernel/process_no.c +@@ -73,9 +73,7 @@ void cpu_idle(void) + /* endless idle loop with no priority at all */ + while (1) { + idle(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/microblaze/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/microblaze/kernel/process.c ++++ linux-3.2/arch/microblaze/kernel/process.c +@@ -108,9 +108,7 @@ void cpu_idle(void) + idle(); + tick_nohz_restart_sched_tick(); + +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + check_pgt_cache(); + } + } +Index: linux-3.2/arch/mips/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/mips/kernel/process.c ++++ linux-3.2/arch/mips/kernel/process.c +@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void) + play_dead(); + #endif + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/mn10300/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/mn10300/kernel/process.c ++++ linux-3.2/arch/mn10300/kernel/process.c +@@ -123,9 +123,7 @@ void cpu_idle(void) + idle(); + } + +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/parisc/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/parisc/kernel/process.c ++++ linux-3.2/arch/parisc/kernel/process.c +@@ -71,9 +71,7 @@ void cpu_idle(void) + while (1) { + while (!need_resched()) + barrier(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + check_pgt_cache(); + } + } +Index: linux-3.2/arch/powerpc/kernel/idle.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/idle.c ++++ linux-3.2/arch/powerpc/kernel/idle.c +@@ -94,11 +94,11 @@ void cpu_idle(void) + HMT_medium(); + ppc64_runlatch_on(); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- if (cpu_should_die()) ++ if (cpu_should_die()) { ++ __preempt_enable_no_resched(); + cpu_die(); +- schedule(); +- preempt_disable(); ++ } ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/powerpc/platforms/iseries/setup.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/platforms/iseries/setup.c ++++ linux-3.2/arch/powerpc/platforms/iseries/setup.c +@@ -582,9 +582,7 @@ static void iseries_shared_idle(void) + if (hvlpevent_is_pending()) + process_iSeries_events(); + +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +@@ -611,9 +609,7 @@ static void iseries_dedicated_idle(void) + + ppc64_runlatch_on(); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/s390/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/s390/kernel/process.c ++++ linux-3.2/arch/s390/kernel/process.c +@@ -95,9 +95,7 @@ void cpu_idle(void) + while (!need_resched()) + default_idle(); + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/score/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/score/kernel/process.c ++++ linux-3.2/arch/score/kernel/process.c +@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void) + while (!need_resched()) + barrier(); + +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/sh/kernel/idle.c +=================================================================== +--- linux-3.2.orig/arch/sh/kernel/idle.c ++++ linux-3.2/arch/sh/kernel/idle.c +@@ -112,9 +112,7 @@ void cpu_idle(void) + } + + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/sparc/kernel/process_32.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/process_32.c ++++ linux-3.2/arch/sparc/kernel/process_32.c +@@ -113,9 +113,7 @@ void cpu_idle(void) + while (!need_resched()) + cpu_relax(); + } +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + check_pgt_cache(); + } + } +@@ -138,9 +136,7 @@ void cpu_idle(void) + while (!need_resched()) + cpu_relax(); + } +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + check_pgt_cache(); + } + } +Index: linux-3.2/arch/sparc/kernel/process_64.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/process_64.c ++++ linux-3.2/arch/sparc/kernel/process_64.c +@@ -102,15 +102,13 @@ void cpu_idle(void) + + tick_nohz_restart_sched_tick(); + +- preempt_enable_no_resched(); +- + #ifdef CONFIG_HOTPLUG_CPU +- if (cpu_is_offline(cpu)) ++ if (cpu_is_offline(cpu)) { ++ __preempt_enable_no_resched(); + cpu_play_dead(); ++ } + #endif +- +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/tile/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/tile/kernel/process.c ++++ linux-3.2/arch/tile/kernel/process.c +@@ -106,9 +106,7 @@ void cpu_idle(void) + current_thread_info()->status |= TS_POLLING; + } + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/x86/kernel/process_32.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/process_32.c ++++ linux-3.2/arch/x86/kernel/process_32.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -117,9 +118,7 @@ void cpu_idle(void) + start_critical_timings(); + } + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +@@ -352,6 +351,41 @@ __switch_to(struct task_struct *prev_p, + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++ /* ++ * Save @prev's kmap_atomic stack ++ */ ++ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx); ++ if (unlikely(prev_p->kmap_idx)) { ++ int i; ++ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ pte_t *ptep = kmap_pte - idx; ++ prev_p->kmap_pte[i] = *ptep; ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ ++ __this_cpu_write(__kmap_atomic_idx, 0); ++ } ++ ++ /* ++ * Restore @next_p's kmap_atomic stack ++ */ ++ if (unlikely(next_p->kmap_idx)) { ++ int i; ++ ++ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx); ++ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++ } ++#endif ++ + /* If we're going to preload the fpu context, make sure clts + is run while we're batching the cpu state updates. */ + if (preload_fpu) +Index: linux-3.2/arch/x86/kernel/process_64.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/process_64.c ++++ linux-3.2/arch/x86/kernel/process_64.c +@@ -150,9 +150,7 @@ void cpu_idle(void) + } + + tick_nohz_restart_sched_tick(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/arch/xtensa/kernel/process.c +=================================================================== +--- linux-3.2.orig/arch/xtensa/kernel/process.c ++++ linux-3.2/arch/xtensa/kernel/process.c +@@ -113,9 +113,7 @@ void cpu_idle(void) + while (1) { + while (!need_resched()) + platform_idle(); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + } + } + +Index: linux-3.2/init/main.c +=================================================================== +--- linux-3.2.orig/init/main.c ++++ linux-3.2/init/main.c +@@ -68,6 +68,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -378,11 +379,8 @@ static noinline void __init_refok rest_i + * at least once to get things moving: + */ + init_idle_bootup_task(current); +- preempt_enable_no_resched(); +- schedule(); +- ++ schedule_preempt_disabled(); + /* Call into cpu_idle with preempt disabled */ +- preempt_disable(); + cpu_idle(); + } + +@@ -492,6 +490,7 @@ asmlinkage void __init start_kernel(void + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ ++ softirq_early_init(); + tick_init(); + boot_cpu_init(); + page_address_init(); +Index: linux-3.2/kernel/mutex.c +=================================================================== +--- linux-3.2.orig/kernel/mutex.c ++++ linux-3.2/kernel/mutex.c +@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock, + + /* didn't get the lock, go to sleep: */ + spin_unlock_mutex(&lock->wait_lock, flags); +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); ++ schedule_preempt_disabled(); + spin_lock_mutex(&lock->wait_lock, flags); + } + +Index: linux-3.2/kernel/softirq.c +=================================================================== +--- linux-3.2.orig/kernel/softirq.c ++++ linux-3.2/kernel/softirq.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = { + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ ++# ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * On preempt-rt a softirq might be blocked on a lock. There might be ++ * no other runnable task on this CPU because the lock owner runs on ++ * some other CPU. So we have to go into idle with the pending bit ++ * set. Therefor we need to check this otherwise we warn about false ++ * positives which confuses users and defeats the whole purpose of ++ * this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ u32 warnpending = 0, pending = local_softirq_pending(); ++ ++ if (rate_limit >= 10) ++ return; ++ ++ if (pending) { ++ struct task_struct *tsk; ++ ++ tsk = __get_cpu_var(ksoftirqd); ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ ++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) ++ warnpending = 1; ++ ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ pending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +@@ -76,6 +138,36 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + ++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) ++{ ++ struct softirq_action *h = softirq_vec; ++ unsigned int prev_count = preempt_count(); ++ ++ local_irq_enable(); ++ for ( ; pending; h++, pending >>= 1) { ++ unsigned int vec_nr = h - softirq_vec; ++ ++ if (!(pending & 1)) ++ continue; ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ printk(KERN_ERR ++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, (unsigned int) preempt_count()); ++ preempt_count() = prev_count; ++ } ++ if (need_rcu_bh_qs) ++ rcu_bh_qs(cpu); ++ } ++ local_irq_disable(); ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -206,7 +298,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); + + asmlinkage void __do_softirq(void) + { +- struct softirq_action *h; + __u32 pending; + int max_restart = MAX_SOFTIRQ_RESTART; + int cpu; +@@ -215,7 +306,7 @@ asmlinkage void __do_softirq(void) + account_system_vtime(current); + + __local_bh_disable((unsigned long)__builtin_return_address(0), +- SOFTIRQ_OFFSET); ++ SOFTIRQ_OFFSET); + lockdep_softirq_enter(); + + cpu = smp_processor_id(); +@@ -223,36 +314,7 @@ restart: + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- do { +- if (pending & 1) { +- unsigned int vec_nr = h - softirq_vec; +- int prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- printk(KERN_ERR "huh, entered softirq %u %s %p" +- "with preempt_count %08x," +- " exited with %08x?\n", vec_nr, +- softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count() = prev_count; +- } +- +- rcu_bh_qs(cpu); +- } +- h++; +- pending >>= 1; +- } while (pending); +- +- local_irq_disable(); ++ handle_pending_softirqs(pending, cpu, 1); + + pending = local_softirq_pending(); + if (pending && --max_restart) +@@ -267,6 +329,26 @@ restart: + __local_bh_enable(SOFTIRQ_OFFSET); + } + ++/* ++ * Called with preemption disabled from run_ksoftirqd() ++ */ ++static int ksoftirqd_do_softirq(int cpu) ++{ ++ /* ++ * Preempt disable stops cpu going offline. ++ * If already offline, we'll be on wrong CPU: ++ * don't process. ++ */ ++ if (cpu_is_offline(cpu)) ++ return -1; ++ ++ local_irq_disable(); ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_irq_enable(); ++ return 0; ++} ++ + #ifndef __ARCH_HAS_DO_SOFTIRQ + + asmlinkage void do_softirq(void) +@@ -289,6 +371,184 @@ asmlinkage void do_softirq(void) + + #endif + ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static inline void ksoftirqd_set_sched_params(void) { } ++static inline void ksoftirqd_clr_sched_params(void) { } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock ++ */ ++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); ++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); ++ ++static void __do_softirq_common(int need_rcu_bh_qs); ++ ++void __do_softirq(void) ++{ ++ __do_softirq_common(0); ++} ++ ++void __init softirq_early_init(void) ++{ ++ local_irq_lock_init(local_softirq_lock); ++} ++ ++void local_bh_disable(void) ++{ ++ migrate_disable(); ++ current->softirq_nestcnt++; ++} ++EXPORT_SYMBOL(local_bh_disable); ++ ++void local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ if ((current->softirq_nestcnt == 1) && ++ local_softirq_pending() && ++ local_trylock(local_softirq_lock)) { ++ ++ local_irq_disable(); ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_irq_enable(); ++ local_unlock(local_softirq_lock); ++ WARN_ON(current->softirq_nestcnt != 1); ++ } ++ current->softirq_nestcnt--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable); ++ ++void local_bh_enable_ip(unsigned long ip) ++{ ++ local_bh_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable_ip); ++ ++/* For tracing */ ++int notrace __in_softirq(void) ++{ ++ if (__get_cpu_var(local_softirq_lock).owner == current) ++ return __get_cpu_var(local_softirq_lock).nestcnt; ++ return 0; ++} ++ ++int in_serving_softirq(void) ++{ ++ int res; ++ ++ preempt_disable(); ++ res = __get_cpu_var(local_softirq_runner) == current; ++ preempt_enable(); ++ return res; ++} ++EXPORT_SYMBOL(in_serving_softirq); ++ ++/* ++ * Called with bh and local interrupts disabled. For full RT cpu must ++ * be pinned. ++ */ ++static void __do_softirq_common(int need_rcu_bh_qs) ++{ ++ u32 pending = local_softirq_pending(); ++ int cpu = smp_processor_id(); ++ ++ current->softirq_nestcnt++; ++ ++ /* Reset the pending bitmask before enabling irqs */ ++ set_softirq_pending(0); ++ ++ __get_cpu_var(local_softirq_runner) = current; ++ ++ lockdep_softirq_enter(); ++ ++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); ++ ++ pending = local_softirq_pending(); ++ if (pending) ++ wakeup_softirqd(); ++ ++ lockdep_softirq_exit(); ++ __get_cpu_var(local_softirq_runner) = NULL; ++ ++ current->softirq_nestcnt--; ++} ++ ++static int __thread_do_softirq(int cpu) ++{ ++ /* ++ * Prevent the current cpu from going offline. ++ * pin_current_cpu() can reenable preemption and block on the ++ * hotplug mutex. When it returns, the current cpu is ++ * pinned. It might be the wrong one, but the offline check ++ * below catches that. ++ */ ++ pin_current_cpu(); ++ /* ++ * If called from ksoftirqd (cpu >= 0) we need to check ++ * whether we are on the wrong cpu due to cpu offlining. If ++ * called via thread_do_softirq() no action required. ++ */ ++ if (cpu >= 0 && cpu_is_offline(cpu)) { ++ unpin_current_cpu(); ++ return -1; ++ } ++ preempt_enable(); ++ local_lock(local_softirq_lock); ++ local_irq_disable(); ++ /* ++ * We cannot switch stacks on RT as we want to be able to ++ * schedule! ++ */ ++ if (local_softirq_pending()) ++ __do_softirq_common(cpu >= 0); ++ local_unlock(local_softirq_lock); ++ unpin_current_cpu(); ++ preempt_disable(); ++ local_irq_enable(); ++ return 0; ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq()) { ++ preempt_disable(); ++ __thread_do_softirq(-1); ++ preempt_enable(); ++ } ++} ++ ++static int ksoftirqd_do_softirq(int cpu) ++{ ++ return __thread_do_softirq(cpu); ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++static inline void ksoftirqd_set_sched_params(void) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++} ++ ++static inline void ksoftirqd_clr_sched_params(void) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ ++#endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. + */ +@@ -302,9 +562,9 @@ void irq_enter(void) + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_check_idle(cpu); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -313,6 +573,7 @@ void irq_enter(void) + #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) + __do_softirq(); + else { +@@ -321,10 +582,14 @@ static inline void invoke_softirq(void) + wakeup_softirqd(); + __local_bh_enable(SOFTIRQ_OFFSET); + } ++#else ++ wakeup_softirqd(); ++#endif + } + #else + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) + do_softirq(); + else { +@@ -333,6 +598,9 @@ static inline void invoke_softirq(void) + wakeup_softirqd(); + __local_bh_enable(SOFTIRQ_OFFSET); + } ++#else ++ wakeup_softirqd(); ++#endif + } + #endif + +@@ -353,7 +621,7 @@ void irq_exit(void) + if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) + tick_nohz_stop_sched_tick(0); + #endif +- preempt_enable_no_resched(); ++ __preempt_enable_no_resched(); + } + + /* +@@ -739,29 +1007,21 @@ void __init softirq_init(void) + + static int run_ksoftirqd(void * __bind_cpu) + { ++ ksoftirqd_set_sched_params(); ++ + set_current_state(TASK_INTERRUPTIBLE); + + while (!kthread_should_stop()) { + preempt_disable(); +- if (!local_softirq_pending()) { +- preempt_enable_no_resched(); +- schedule(); +- preempt_disable(); +- } ++ if (!local_softirq_pending()) ++ schedule_preempt_disabled(); + + __set_current_state(TASK_RUNNING); + + while (local_softirq_pending()) { +- /* Preempt disable stops cpu going offline. +- If already offline, we'll be on wrong CPU: +- don't process */ +- if (cpu_is_offline((long)__bind_cpu)) ++ if (ksoftirqd_do_softirq((long) __bind_cpu)) + goto wait_to_die; +- local_irq_disable(); +- if (local_softirq_pending()) +- __do_softirq(); +- local_irq_enable(); +- preempt_enable_no_resched(); ++ __preempt_enable_no_resched(); + cond_resched(); + preempt_disable(); + rcu_note_context_switch((long)__bind_cpu); +@@ -774,6 +1034,7 @@ static int run_ksoftirqd(void * __bind_c + + wait_to_die: + preempt_enable(); ++ ksoftirqd_clr_sched_params(); + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { +@@ -850,9 +1111,8 @@ static int __cpuinit cpu_callback(struct + int hotcpu = (unsigned long)hcpu; + struct task_struct *p; + +- switch (action) { ++ switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: +- case CPU_UP_PREPARE_FROZEN: + p = kthread_create_on_node(run_ksoftirqd, + hcpu, + cpu_to_node(hotcpu), +@@ -865,19 +1125,16 @@ static int __cpuinit cpu_callback(struct + per_cpu(ksoftirqd, hotcpu) = p; + break; + case CPU_ONLINE: +- case CPU_ONLINE_FROZEN: + wake_up_process(per_cpu(ksoftirqd, hotcpu)); + break; + #ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: +- case CPU_UP_CANCELED_FROZEN: + if (!per_cpu(ksoftirqd, hotcpu)) + break; + /* Unbind so it can run. Fall thru. */ + kthread_bind(per_cpu(ksoftirqd, hotcpu), + cpumask_any(cpu_online_mask)); +- case CPU_DEAD: +- case CPU_DEAD_FROZEN: { ++ case CPU_POST_DEAD: { + static const struct sched_param param = { + .sched_priority = MAX_RT_PRIO-1 + }; +Index: linux-3.2/kernel/posix-timers.c +=================================================================== +--- linux-3.2.orig/kernel/posix-timers.c ++++ linux-3.2/kernel/posix-timers.c +@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_ + static struct pid *good_sigevent(sigevent_t * event) + { + struct task_struct *rtn = current->group_leader; ++ int sig = event->sigev_signo; + + if ((event->sigev_notify & SIGEV_THREAD_ID ) && + (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || +@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven + return NULL; + + if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && +- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || ++ sig_kernel_coredump(sig))) + return NULL; + + return task_pid(rtn); +@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ + return overrun; + } + ++/* ++ * Protected by RCU! ++ */ ++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (kc->timer_set == common_timer_set) ++ hrtimer_wait_for_timer(&timr->it.real.timer); ++ else ++ /* FIXME: Whacky hack for posix-cpu-timers */ ++ schedule_timeout(1); ++#endif ++} ++ + /* Set a POSIX.1b interval timer. */ + /* timr->it_lock is taken. */ + static int +@@ -841,6 +857,7 @@ retry: + if (!timr) + return -EINVAL; + ++ rcu_read_lock(); + kc = clockid_to_kclock(timr->it_clock); + if (WARN_ON_ONCE(!kc || !kc->timer_set)) + error = -EINVAL; +@@ -849,9 +866,12 @@ retry: + + unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ timer_wait_for_callback(kc, timr); + rtn = NULL; // We already got the old time... ++ rcu_read_unlock(); + goto retry; + } ++ rcu_read_unlock(); + + if (old_setting && !error && + copy_to_user(old_setting, &old_spec, sizeof (old_spec))) +@@ -889,10 +909,15 @@ retry_delete: + if (!timer) + return -EINVAL; + ++ rcu_read_lock(); + if (timer_delete_hook(timer) == TIMER_RETRY) { + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } ++ rcu_read_unlock(); + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + ++ /* On RT we can race with a deletion */ ++ if (!timer->it_signal) { ++ unlock_timer(timer, flags); ++ return; ++ } ++ + if (timer_delete_hook(timer) == TIMER_RETRY) { ++ rcu_read_lock(); + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } + list_del(&timer->list); +Index: linux-3.2/include/linux/signal.h +=================================================================== +--- linux-3.2.orig/include/linux/signal.h ++++ linux-3.2/include/linux/signal.h +@@ -229,6 +229,7 @@ static inline void init_sigpending(struc + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +Index: linux-3.2/kernel/exit.c +=================================================================== +--- linux-3.2.orig/kernel/exit.c ++++ linux-3.2/kernel/exit.c +@@ -141,7 +141,7 @@ static void __exit_signal(struct task_st + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +Index: linux-3.2/kernel/fork.c +=================================================================== +--- linux-3.2.orig/kernel/fork.c ++++ linux-3.2/kernel/fork.c +@@ -86,7 +86,7 @@ int max_threads; /* tunable limit on nr + + DEFINE_PER_CPU(unsigned long, process_counts) = 0; + +-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ ++DEFINE_RWLOCK(tasklist_lock); /* outer */ + + #ifdef CONFIG_PROVE_RCU + int lockdep_tasklist_lock_is_held(void) +@@ -197,7 +197,18 @@ void __put_task_struct(struct task_struc + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + /* + * macro override instead of weak attribute alias, to workaround +@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm) + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + /* + * Decrement the use count and release all resources for an mm. + */ +@@ -1028,6 +1052,9 @@ void mm_init_owner(struct mm_struct *mm, + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = cputime_zero; + tsk->cputime_expires.virt_exp = cputime_zero; + tsk->cputime_expires.sched_exp = 0; +@@ -1136,6 +1163,7 @@ static struct task_struct *copy_process( + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = cputime_zero; + p->stime = cputime_zero; +@@ -1197,6 +1225,9 @@ static struct task_struct *copy_process( + p->hardirq_context = 0; + p->softirq_context = 0; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ p->pagefault_disabled = 0; ++#endif + #ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; +Index: linux-3.2/kernel/sched_features.h +=================================================================== +--- linux-3.2.orig/kernel/sched_features.h ++++ linux-3.2/kernel/sched_features.h +@@ -60,10 +60,15 @@ SCHED_FEAT(OWNER_SPIN, 1) + */ + SCHED_FEAT(NONTASK_POWER, 1) + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, 1) ++#else ++SCHED_FEAT(TTWU_QUEUE, 0) ++#endif + + SCHED_FEAT(FORCE_SD_OVERLAP, 0) ++SCHED_FEAT(RT_RUNTIME_SHARE, 1) +Index: linux-3.2/include/asm-generic/cmpxchg-local.h +=================================================================== +--- linux-3.2.orig/include/asm-generic/cmpxchg-local.h ++++ linux-3.2/include/asm-generic/cmpxchg-local.h +@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo + if (size == 8 && sizeof(unsigned long) != 8) + wrong_size_cmpxchg(ptr); + +- local_irq_save(flags); ++ raw_local_irq_save(flags); + switch (size) { + case 1: prev = *(u8 *)ptr; + if (prev == old) +@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo + default: + wrong_size_cmpxchg(ptr); + } +- local_irq_restore(flags); ++ raw_local_irq_restore(flags); + return prev; + } + +@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene + u64 prev; + unsigned long flags; + +- local_irq_save(flags); ++ raw_local_irq_save(flags); + prev = *(u64 *)ptr; + if (prev == old) + *(u64 *)ptr = new; +- local_irq_restore(flags); ++ raw_local_irq_restore(flags); + return prev; + } + +Index: linux-3.2/drivers/char/random.c +=================================================================== +--- linux-3.2.orig/drivers/char/random.c ++++ linux-3.2/drivers/char/random.c +@@ -433,7 +433,7 @@ static struct entropy_store input_pool = + .poolinfo = &poolinfo_table[0], + .name = "input", + .limit = 1, +- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), ++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), + .pool = input_pool_data + }; + +@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo + .name = "blocking", + .limit = 1, + .pull = &input_pool, +- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), ++ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock), + .pool = blocking_pool_data + }; + +@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_ + .poolinfo = &poolinfo_table[1], + .name = "nonblocking", + .pull = &input_pool, +- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), ++ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock), + .pool = nonblocking_pool_data + }; + +@@ -633,8 +633,11 @@ static void add_timer_randomness(struct + preempt_disable(); + /* if over the trickle threshold, use only 1 in 4096 samples */ + if (input_pool.entropy_count > trickle_thresh && +- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) +- goto out; ++ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) { ++ preempt_enable(); ++ return; ++ } ++ preempt_enable(); + + sample.jiffies = jiffies; + sample.cycles = get_cycles(); +@@ -676,8 +679,6 @@ static void add_timer_randomness(struct + credit_entropy_bits(&input_pool, + min_t(int, fls(delta>>1), 11)); + } +-out: +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, +Index: linux-3.2/arch/arm/mach-at91/at91rm9200_time.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-at91/at91rm9200_time.c ++++ linux-3.2/arch/arm/mach-at91/at91rm9200_time.c +@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod + last_crtr = read_CRTR(); + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: ++ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + /* PIT for periodic irqs; fixed rate of 1/HZ */ + irqmask = AT91_ST_PITS; + at91_sys_write(AT91_ST_PIMR, LATCH); +@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: ++ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + case CLOCK_EVT_MODE_RESUME: + irqmask = 0; + break; +Index: linux-3.2/arch/arm/mach-at91/at91sam926x_time.c +=================================================================== +--- linux-3.2.orig/arch/arm/mach-at91/at91sam926x_time.c ++++ linux-3.2/arch/arm/mach-at91/at91sam926x_time.c +@@ -54,7 +54,7 @@ static struct clocksource pit_clk = { + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }; + +- ++static struct irqaction at91sam926x_pit_irq; + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ +@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo + { + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: ++ /* Set up irq handler */ ++ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq); ++ + /* update clocksource counter */ + pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR)); + at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN +@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo + case CLOCK_EVT_MODE_UNUSED: + /* disable irq, leaving the clocksource active */ + at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); ++ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq); + break; + case CLOCK_EVT_MODE_RESUME: + break; +Index: linux-3.2/drivers/clocksource/tcb_clksrc.c +=================================================================== +--- linux-3.2.orig/drivers/clocksource/tcb_clksrc.c ++++ linux-3.2/drivers/clocksource/tcb_clksrc.c +@@ -21,8 +21,7 @@ + * resolution better than 200 nsec). + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -68,6 +67,7 @@ static struct clocksource clksrc = { + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ u32 freq; + void __iomem *regs; + }; + +@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + + static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) +@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_PERIODIC: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ/2)/HZ, ++ tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_ONESHOT: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt = + .features = CLOCK_EVT_FEAT_PERIODIC + | CLOCK_EVT_FEAT_ONESHOT, + .shift = 32, ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + /* Should be lower than at91rm9200's system timer */ + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, + .set_mode = tc_mode, + }, +@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = { + .handler = ch2_irq, + }; + +-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; + +@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc + clkevt.clk = t2_clk; + tc_irqaction.dev_id = &clkevt; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; + +- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift); +- clkevt.clkevt.max_delta_ns +- = clockevent_delta2ns(0xffff, &clkevt.clkevt); ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk)/divisor; ++ ++ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC, ++ clkevt.clkevt.shift); ++ clkevt.clkevt.max_delta_ns = ++ clockevent_delta2ns(0xffff, &clkevt.clkevt); + clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void) + clocksource_register(&clksrc); + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + setup_clkevents(tc, clk32k_divisor_idx); +- ++#else ++ setup_clkevents(tc, best_divisor_idx); ++#endif + return 0; + } + arch_initcall(tcb_clksrc_init); +Index: linux-3.2/drivers/misc/Kconfig +=================================================================== +--- linux-3.2.orig/drivers/misc/Kconfig ++++ linux-3.2/drivers/misc/Kconfig +@@ -82,6 +82,7 @@ config AB8500_PWM + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y if !PREEMPT_RT_FULL ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ + config IBM_ASM + tristate "Device driver for IBM RSA service processor" + depends on X86 && PCI && INPUT && EXPERIMENTAL +@@ -133,6 +141,35 @@ config IBM_ASM + for information on the specific driver level and support statement + for your IBM server. + ++config HWLAT_DETECTOR ++ tristate "Testing module to detect hardware-induced latencies" ++ depends on DEBUG_FS ++ depends on RING_BUFFER ++ default m ++ ---help--- ++ A simple hardware latency detector. Use this module to detect ++ large latencies introduced by the behavior of the underlying ++ system firmware external to Linux. We do this using periodic ++ use of stop_machine to grab all available CPUs and measure ++ for unexplainable gaps in the CPU timestamp counter(s). By ++ default, the module is not enabled until the "enable" file ++ within the "hwlat_detector" debugfs directory is toggled. ++ ++ This module is often used to detect SMI (System Management ++ Interrupts) on x86 systems, though is not x86 specific. To ++ this end, we default to using a sample window of 1 second, ++ during which we will sample for 0.5 seconds. If an SMI or ++ similar event occurs during that time, it is recorded ++ into an 8K samples global ring buffer until retreived. ++ ++ WARNING: This software should never be enabled (it can be built ++ but should not be turned on after it is loaded) in a production ++ environment where high latencies are a concern since the ++ sampling mechanism actually introduces latencies for ++ regular tasks while the CPU(s) are being held. ++ ++ If unsure, say N ++ + config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI +Index: linux-3.2/drivers/net/ethernet/dec/tulip/tulip_core.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/dec/tulip/tulip_core.c ++++ linux-3.2/drivers/net/ethernet/dec/tulip/tulip_core.c +@@ -1949,6 +1949,7 @@ static void __devexit tulip_remove_one ( + pci_iounmap(pdev, tp->base_addr); + free_netdev (dev); + pci_release_regions (pdev); ++ pci_disable_device (pdev); + pci_set_drvdata (pdev, NULL); + + /* pci_power_off (pdev, -1); */ +Index: linux-3.2/drivers/net/ethernet/realtek/8139too.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/realtek/8139too.c ++++ linux-3.2/drivers/net/ethernet/realtek/8139too.c +@@ -2174,7 +2174,7 @@ static irqreturn_t rtl8139_interrupt (in + */ + static void rtl8139_poll_controller(struct net_device *dev) + { +- disable_irq(dev->irq); ++ disable_irq_nosync(dev->irq); + rtl8139_interrupt(dev->irq, dev); + enable_irq(dev->irq); + } +Index: linux-3.2/drivers/net/ethernet/ibm/ehea/ehea_main.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/ibm/ehea/ehea_main.c ++++ linux-3.2/drivers/net/ethernet/ibm/ehea/ehea_main.c +@@ -1303,7 +1303,7 @@ static int ehea_reg_interrupts(struct ne + "%s-queue%d", dev->name, i); + ret = ibmebus_request_irq(pr->eq->attr.ist1, + ehea_recv_irq_handler, +- IRQF_DISABLED, pr->int_send_name, ++ IRQF_NO_THREAD, pr->int_send_name, + pr); + if (ret) { + netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", +Index: linux-3.2/drivers/net/ethernet/cadence/at91_ether.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/cadence/at91_ether.c ++++ linux-3.2/drivers/net/ethernet/cadence/at91_ether.c +@@ -200,7 +200,9 @@ static irqreturn_t at91ether_phy_interru + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + unsigned int phy; ++ unsigned long flags; + ++ spin_lock_irqsave(&lp->lock, flags); + /* + * This hander is triggered on both edges, but the PHY chips expect + * level-triggering. We therefore have to check if the PHY actually has +@@ -242,6 +244,7 @@ static irqreturn_t at91ether_phy_interru + + done: + disable_mdi(); ++ spin_unlock_irqrestore(&lp->lock, flags); + + return IRQ_HANDLED; + } +@@ -398,9 +401,11 @@ static void at91ether_check_link(unsigne + struct net_device *dev = (struct net_device *) dev_id; + struct at91_private *lp = netdev_priv(dev); + ++ spin_lock_irq(&lp->lock); + enable_mdi(); + update_linkspeed(dev, 1); + disable_mdi(); ++ spin_unlock_irq(&lp->lock); + + mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); + } +Index: linux-3.2/include/linux/preempt.h +=================================================================== +--- linux-3.2.orig/include/linux/preempt.h ++++ linux-3.2/include/linux/preempt.h +@@ -48,15 +48,21 @@ do { \ + barrier(); \ + } while (0) + +-#define preempt_enable_no_resched() \ ++#define __preempt_enable_no_resched() \ + do { \ + barrier(); \ + dec_preempt_count(); \ + } while (0) + ++#ifndef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() __preempt_enable_no_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++#endif ++ + #define preempt_enable() \ + do { \ +- preempt_enable_no_resched(); \ ++ __preempt_enable_no_resched(); \ + barrier(); \ + preempt_check_resched(); \ + } while (0) +@@ -92,6 +98,7 @@ do { \ + #else /* !CONFIG_PREEMPT_COUNT */ + + #define preempt_disable() do { } while (0) ++#define __preempt_enable_no_resched() do { } while (0) + #define preempt_enable_no_resched() do { } while (0) + #define preempt_enable() do { } while (0) + +@@ -101,6 +108,27 @@ do { \ + + #endif /* CONFIG_PREEMPT_COUNT */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() do { } while (0) ++# define preempt_enable_nort() do { } while (0) ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() do { } while (0) ++# define migrate_enable() do { } while (0) ++# endif /* CONFIG_SMP */ ++#else ++# define preempt_disable_rt() do { } while (0) ++# define preempt_enable_rt() do { } while (0) ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; +Index: linux-3.2/include/linux/uaccess.h +=================================================================== +--- linux-3.2.orig/include/linux/uaccess.h ++++ linux-3.2/include/linux/uaccess.h +@@ -6,38 +6,37 @@ + + /* + * These routines enable/disable the pagefault handler in that +- * it will not take any locks and go straight to the fixup table. +- * +- * They have great resemblance to the preempt_disable/enable calls +- * and in fact they are identical; this is because currently there is +- * no other way to make the pagefault handlers do this. So we do +- * disable preemption but we don't necessarily care about that. ++ * it will not take any MM locks and go straight to the fixup table. + */ +-static inline void pagefault_disable(void) ++static inline void raw_pagefault_disable(void) + { + inc_preempt_count(); +- /* +- * make sure to have issued the store before a pagefault +- * can hit. +- */ + barrier(); + } + +-static inline void pagefault_enable(void) ++static inline void raw_pagefault_enable(void) + { +- /* +- * make sure to issue those last loads/stores before enabling +- * the pagefault handler again. +- */ + barrier(); + dec_preempt_count(); +- /* +- * make sure we do.. +- */ + barrier(); + preempt_check_resched(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline void pagefault_disable(void) ++{ ++ raw_pagefault_disable(); ++} ++ ++static inline void pagefault_enable(void) ++{ ++ raw_pagefault_enable(); ++} ++#else ++extern void pagefault_disable(void); ++extern void pagefault_enable(void); ++#endif ++ + #ifndef ARCH_HAS_NOCACHE_UACCESS + + static inline unsigned long __copy_from_user_inatomic_nocache(void *to, +@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_ + mm_segment_t old_fs = get_fs(); \ + \ + set_fs(KERNEL_DS); \ +- pagefault_disable(); \ ++ raw_pagefault_disable(); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ raw_pagefault_enable(); \ + set_fs(old_fs); \ + ret; \ + }) +Index: linux-3.2/mm/memory.c +=================================================================== +--- linux-3.2.orig/mm/memory.c ++++ linux-3.2/mm/memory.c +@@ -3436,6 +3436,32 @@ unlock: + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++void pagefault_disable(void) ++{ ++ migrate_disable(); ++ current->pagefault_disabled++; ++ /* ++ * make sure to have issued the store before a pagefault ++ * can hit. ++ */ ++ barrier(); ++} ++EXPORT_SYMBOL_GPL(pagefault_disable); ++ ++void pagefault_enable(void) ++{ ++ /* ++ * make sure to issue those last loads/stores before enabling ++ * the pagefault handler again. ++ */ ++ barrier(); ++ current->pagefault_disabled--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL_GPL(pagefault_enable); ++#endif ++ + /* + * By the time we get here, we already hold the mm semaphore + */ +@@ -3984,3 +4010,35 @@ void copy_user_huge_page(struct page *ds + } + } + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ ++ ++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) ++/* ++ * Heinous hack, relies on the caller doing something like: ++ * ++ * pte = alloc_pages(PGALLOC_GFP, 0); ++ * if (pte) ++ * pgtable_page_ctor(pte); ++ * return pte; ++ * ++ * This ensures we release the page and return NULL when the ++ * lock allocation fails. ++ */ ++struct page *pte_lock_init(struct page *page) ++{ ++ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); ++ if (page->ptl) { ++ spin_lock_init(__pte_lockptr(page)); ++ } else { ++ __free_page(page); ++ page = NULL; ++ } ++ return page; ++} ++ ++void pte_lock_deinit(struct page *page) ++{ ++ kfree(page->ptl); ++ page->mapping = NULL; ++} ++ ++#endif +Index: linux-3.2/arch/alpha/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/alpha/mm/fault.c ++++ linux-3.2/arch/alpha/mm/fault.c +@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns + + /* If we're in an interrupt context, or have no user context, + we must not take the fault. */ +- if (!mm || in_atomic()) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_ALPHA_LARGE_VMALLOC +Index: linux-3.2/arch/arm/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/arm/mm/fault.c ++++ linux-3.2/arch/arm/mm/fault.c +@@ -294,7 +294,7 @@ do_page_fault(unsigned long addr, unsign + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + /* +Index: linux-3.2/arch/avr32/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/avr32/mm/fault.c ++++ linux-3.2/arch/avr32/mm/fault.c +@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) ++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) + goto no_context; + + local_irq_enable(); +Index: linux-3.2/arch/cris/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/cris/mm/fault.c ++++ linux-3.2/arch/cris/mm/fault.c +@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/frv/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/frv/mm/fault.c ++++ linux-3.2/arch/frv/mm/fault.c +@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/ia64/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/ia64/mm/fault.c ++++ linux-3.2/arch/ia64/mm/fault.c +@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +Index: linux-3.2/arch/m32r/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/m32r/mm/fault.c ++++ linux-3.2/arch/m32r/mm/fault.c +@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + /* When running in the kernel we expect faults to occur only to +Index: linux-3.2/arch/m68k/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/m68k/mm/fault.c ++++ linux-3.2/arch/m68k/mm/fault.c +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/microblaze/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/microblaze/mm/fault.c ++++ linux-3.2/arch/microblaze/mm/fault.c +@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs, + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(!mm || pagefault_disabled())) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +Index: linux-3.2/arch/mips/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/mips/mm/fault.c ++++ linux-3.2/arch/mips/mm/fault.c +@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/mn10300/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/mn10300/mm/fault.c ++++ linux-3.2/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/parisc/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/parisc/mm/fault.c ++++ linux-3.2/arch/parisc/mm/fault.c +@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, + unsigned long acc_type; + int fault; + +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/powerpc/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/mm/fault.c ++++ linux-3.2/arch/powerpc/mm/fault.c +@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re + } + #endif + +- if (in_atomic() || mm == NULL) { ++ if (!mm || pagefault_disabled()) { + if (!user_mode(regs)) + return SIGSEGV; + /* in_atomic() in user mode is really bad, +Index: linux-3.2/arch/s390/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/s390/mm/fault.c ++++ linux-3.2/arch/s390/mm/fault.c +@@ -295,7 +295,8 @@ static inline int do_exception(struct pt + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(trans_exc_code) || ++ !mm || pagefault_disabled())) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +@@ -426,7 +427,8 @@ void __kprobes do_asce_exception(struct + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(trans_exc_code) || ++ !mm || pagefault_disabled())) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/score/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/score/mm/fault.c ++++ linux-3.2/arch/score/mm/fault.c +@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/sh/mm/fault_32.c +=================================================================== +--- linux-3.2.orig/arch/sh/mm/fault_32.c ++++ linux-3.2/arch/sh/mm/fault_32.c +@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/sparc/mm/fault_32.c +=================================================================== +--- linux-3.2.orig/arch/sparc/mm/fault_32.c ++++ linux-3.2/arch/sparc/mm/fault_32.c +@@ -247,8 +247,8 @@ asmlinkage void do_sparc_fault(struct pt + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) +- goto no_context; ++ if (!mm || pagefault_disabled()) ++ goto no_context; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +Index: linux-3.2/arch/sparc/mm/fault_64.c +=================================================================== +--- linux-3.2.orig/arch/sparc/mm/fault_64.c ++++ linux-3.2/arch/sparc/mm/fault_64.c +@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto intr_or_no_mm; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +Index: linux-3.2/arch/tile/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/tile/mm/fault.c ++++ linux-3.2/arch/tile/mm/fault.c +@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm) { ++ if (!mm || pagefault_disabled()) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +Index: linux-3.2/arch/um/kernel/trap.c +=================================================================== +--- linux-3.2.orig/arch/um/kernel/trap.c ++++ linux-3.2/arch/um/kernel/trap.c +@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic()) ++ if (!mm || pagefault_disabled()) + goto out_nosemaphore; + + down_read(&mm->mmap_sem); +Index: linux-3.2/arch/x86/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/x86/mm/fault.c ++++ linux-3.2/arch/x86/mm/fault.c +@@ -1084,7 +1084,7 @@ do_page_fault(struct pt_regs *regs, unsi + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +Index: linux-3.2/arch/xtensa/mm/fault.c +=================================================================== +--- linux-3.2.orig/arch/xtensa/mm/fault.c ++++ linux-3.2/arch/xtensa/mm/fault.c +@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) { ++ if (!mm || pagefault_disabled()) { + bad_page_fault(regs, address, SIGSEGV); + return; + } +Index: linux-3.2/mm/filemap.c +=================================================================== +--- linux-3.2.orig/mm/filemap.c ++++ linux-3.2/mm/filemap.c +@@ -2061,7 +2061,7 @@ size_t iov_iter_copy_from_user_atomic(st + char *kaddr; + size_t copied; + +- BUG_ON(!in_atomic()); ++ BUG_ON(!pagefault_disabled()); + kaddr = kmap_atomic(page, KM_USER0); + if (likely(i->nr_segs == 1)) { + int left; +Index: linux-3.2/arch/x86/mm/highmem_32.c +=================================================================== +--- linux-3.2.orig/arch/x86/mm/highmem_32.c ++++ linux-3.2/arch/x86/mm/highmem_32.c +@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- BUG_ON(!pte_none(*(kmap_pte-idx))); ++ WARN_ON(!pte_none(*(kmap_pte-idx))); + set_pte(kmap_pte-idx, mk_pte(page, prot)); + + return (void *)vaddr; +Index: linux-3.2/include/linux/kernel.h +=================================================================== +--- linux-3.2.orig/include/linux/kernel.h ++++ linux-3.2/include/linux/kernel.h +@@ -356,7 +356,7 @@ extern enum system_states { + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, +- SYSTEM_SUSPEND_DISK, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +Index: linux-3.2/kernel/power/hibernate.c +=================================================================== +--- linux-3.2.orig/kernel/power/hibernate.c ++++ linux-3.2/kernel/power/hibernate.c +@@ -282,6 +282,8 @@ static int create_image(int platform_mod + + local_irq_disable(); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (error) { + printk(KERN_ERR "PM: Some system devices failed to power down, " +@@ -309,6 +311,7 @@ static int create_image(int platform_mod + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -420,6 +423,7 @@ static int resume_target_kernel(bool pla + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + + error = syscore_suspend(); + if (error) +@@ -453,6 +457,7 @@ static int resume_target_kernel(bool pla + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -532,6 +537,7 @@ int hibernation_platform_enter(void) + goto Platform_finish; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + syscore_suspend(); + if (pm_wakeup_pending()) { + error = -EAGAIN; +@@ -544,6 +550,7 @@ int hibernation_platform_enter(void) + + Power_up: + syscore_resume(); ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + enable_nonboot_cpus(); + +Index: linux-3.2/kernel/power/suspend.c +=================================================================== +--- linux-3.2.orig/kernel/power/suspend.c ++++ linux-3.2/kernel/power/suspend.c +@@ -171,6 +171,8 @@ static int suspend_enter(suspend_state_t + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (!error) { + *wakeup = pm_wakeup_pending(); +@@ -181,6 +183,8 @@ static int suspend_enter(suspend_state_t + syscore_resume(); + } + ++ system_state = SYSTEM_RUNNING; ++ + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); + +Index: linux-3.2/drivers/of/base.c +=================================================================== +--- linux-3.2.orig/drivers/of/base.c ++++ linux-3.2/drivers/of/base.c +@@ -54,7 +54,7 @@ static DEFINE_MUTEX(of_aliases_mutex); + /* use when traversing tree through the allnext, child, sibling, + * or parent members of struct device_node. + */ +-DEFINE_RWLOCK(devtree_lock); ++DEFINE_RAW_SPINLOCK(devtree_lock); + + int of_n_addr_cells(struct device_node *np) + { +@@ -163,16 +163,14 @@ void of_node_put(struct device_node *nod + EXPORT_SYMBOL(of_node_put); + #endif /* !CONFIG_SPARC */ + +-struct property *of_find_property(const struct device_node *np, +- const char *name, +- int *lenp) ++static struct property *__of_find_property(const struct device_node *np, ++ const char *name, int *lenp) + { + struct property *pp; + + if (!np) + return NULL; + +- read_lock(&devtree_lock); + for (pp = np->properties; pp != 0; pp = pp->next) { + if (of_prop_cmp(pp->name, name) == 0) { + if (lenp != 0) +@@ -180,7 +178,20 @@ struct property *of_find_property(const + break; + } + } +- read_unlock(&devtree_lock); ++ ++ return pp; ++} ++ ++struct property *of_find_property(const struct device_node *np, ++ const char *name, ++ int *lenp) ++{ ++ struct property *pp; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&devtree_lock, flags); ++ pp = __of_find_property(np, name, lenp); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + + return pp; + } +@@ -198,13 +209,13 @@ struct device_node *of_find_all_nodes(st + { + struct device_node *np; + +- read_lock(&devtree_lock); ++ raw_spin_lock(&devtree_lock); + np = prev ? prev->allnext : allnodes; + for (; np != NULL; np = np->allnext) + if (of_node_get(np)) + break; + of_node_put(prev); +- read_unlock(&devtree_lock); ++ raw_spin_unlock(&devtree_lock); + return np; + } + EXPORT_SYMBOL(of_find_all_nodes); +@@ -213,8 +224,20 @@ EXPORT_SYMBOL(of_find_all_nodes); + * Find a property with a given name for a given node + * and return the value. + */ ++static const void *__of_get_property(const struct device_node *np, ++ const char *name, int *lenp) ++{ ++ struct property *pp = __of_find_property(np, name, lenp); ++ ++ return pp ? pp->value : NULL; ++} ++ ++/* ++ * Find a property with a given name for a given node ++ * and return the value. ++ */ + const void *of_get_property(const struct device_node *np, const char *name, +- int *lenp) ++ int *lenp) + { + struct property *pp = of_find_property(np, name, lenp); + +@@ -225,13 +248,13 @@ EXPORT_SYMBOL(of_get_property); + /** Checks if the given "compat" string matches one of the strings in + * the device's "compatible" property + */ +-int of_device_is_compatible(const struct device_node *device, +- const char *compat) ++static int __of_device_is_compatible(const struct device_node *device, ++ const char *compat) + { + const char* cp; +- int cplen, l; ++ int uninitialized_var(cplen), l; + +- cp = of_get_property(device, "compatible", &cplen); ++ cp = __of_get_property(device, "compatible", &cplen); + if (cp == NULL) + return 0; + while (cplen > 0) { +@@ -244,6 +267,21 @@ int of_device_is_compatible(const struct + + return 0; + } ++ ++/** Checks if the given "compat" string matches one of the strings in ++ * the device's "compatible" property ++ */ ++int of_device_is_compatible(const struct device_node *device, ++ const char *compat) ++{ ++ unsigned long flags; ++ int res; ++ ++ raw_spin_lock_irqsave(&devtree_lock, flags); ++ res = __of_device_is_compatible(device, compat); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); ++ return res; ++} + EXPORT_SYMBOL(of_device_is_compatible); + + /** +@@ -303,13 +341,14 @@ EXPORT_SYMBOL(of_device_is_available); + struct device_node *of_get_parent(const struct device_node *node) + { + struct device_node *np; ++ unsigned long flags; + + if (!node) + return NULL; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = of_node_get(node->parent); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_get_parent); +@@ -328,14 +367,15 @@ EXPORT_SYMBOL(of_get_parent); + struct device_node *of_get_next_parent(struct device_node *node) + { + struct device_node *parent; ++ unsigned long flags; + + if (!node) + return NULL; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + parent = of_node_get(node->parent); + of_node_put(node); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return parent; + } + +@@ -351,14 +391,15 @@ struct device_node *of_get_next_child(co + struct device_node *prev) + { + struct device_node *next; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) + if (of_node_get(next)) + break; + of_node_put(prev); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return next; + } + EXPORT_SYMBOL(of_get_next_child); +@@ -373,14 +414,15 @@ EXPORT_SYMBOL(of_get_next_child); + struct device_node *of_find_node_by_path(const char *path) + { + struct device_node *np = allnodes; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + for (; np; np = np->allnext) { + if (np->full_name && (of_node_cmp(np->full_name, path) == 0) + && of_node_get(np)) + break; + } +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_node_by_path); +@@ -400,15 +442,16 @@ struct device_node *of_find_node_by_name + const char *name) + { + struct device_node *np; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = from ? from->allnext : allnodes; + for (; np; np = np->allnext) + if (np->name && (of_node_cmp(np->name, name) == 0) + && of_node_get(np)) + break; + of_node_put(from); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_node_by_name); +@@ -429,15 +472,16 @@ struct device_node *of_find_node_by_type + const char *type) + { + struct device_node *np; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = from ? from->allnext : allnodes; + for (; np; np = np->allnext) + if (np->type && (of_node_cmp(np->type, type) == 0) + && of_node_get(np)) + break; + of_node_put(from); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_node_by_type); +@@ -460,18 +504,20 @@ struct device_node *of_find_compatible_n + const char *type, const char *compatible) + { + struct device_node *np; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = from ? from->allnext : allnodes; + for (; np; np = np->allnext) { + if (type + && !(np->type && (of_node_cmp(np->type, type) == 0))) + continue; +- if (of_device_is_compatible(np, compatible) && of_node_get(np)) ++ if (__of_device_is_compatible(np, compatible) && ++ of_node_get(np)) + break; + } + of_node_put(from); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_compatible_node); +@@ -493,8 +539,9 @@ struct device_node *of_find_node_with_pr + { + struct device_node *np; + struct property *pp; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = from ? from->allnext : allnodes; + for (; np; np = np->allnext) { + for (pp = np->properties; pp != 0; pp = pp->next) { +@@ -506,20 +553,14 @@ struct device_node *of_find_node_with_pr + } + out: + of_node_put(from); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_node_with_property); + +-/** +- * of_match_node - Tell if an device_node has a matching of_match structure +- * @matches: array of of device match structures to search in +- * @node: the of device structure to match against +- * +- * Low level utility function used by device matching. +- */ +-const struct of_device_id *of_match_node(const struct of_device_id *matches, +- const struct device_node *node) ++static ++const struct of_device_id *__of_match_node(const struct of_device_id *matches, ++ const struct device_node *node) + { + if (!matches) + return NULL; +@@ -533,14 +574,33 @@ const struct of_device_id *of_match_node + match &= node->type + && !strcmp(matches->type, node->type); + if (matches->compatible[0]) +- match &= of_device_is_compatible(node, +- matches->compatible); ++ match &= __of_device_is_compatible(node, ++ matches->compatible); + if (match) + return matches; + matches++; + } + return NULL; + } ++ ++/** ++ * of_match_node - Tell if an device_node has a matching of_match structure ++ * @matches: array of of device match structures to search in ++ * @node: the of device structure to match against ++ * ++ * Low level utility function used by device matching. ++ */ ++const struct of_device_id *of_match_node(const struct of_device_id *matches, ++ const struct device_node *node) ++{ ++ const struct of_device_id *match; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&devtree_lock, flags); ++ match = __of_match_node(matches, node); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); ++ return match; ++} + EXPORT_SYMBOL(of_match_node); + + /** +@@ -559,15 +619,16 @@ struct device_node *of_find_matching_nod + const struct of_device_id *matches) + { + struct device_node *np; ++ unsigned long flags; + +- read_lock(&devtree_lock); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np = from ? from->allnext : allnodes; + for (; np; np = np->allnext) { +- if (of_match_node(matches, np) && of_node_get(np)) ++ if (__of_match_node(matches, np) && of_node_get(np)) + break; + } + of_node_put(from); +- read_unlock(&devtree_lock); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; + } + EXPORT_SYMBOL(of_find_matching_node); +@@ -610,12 +671,12 @@ struct device_node *of_find_node_by_phan + { + struct device_node *np; + +- read_lock(&devtree_lock); ++ raw_spin_lock(&devtree_lock); + for (np = allnodes; np; np = np->allnext) + if (np->phandle == handle) + break; + of_node_get(np); +- read_unlock(&devtree_lock); ++ raw_spin_unlock(&devtree_lock); + return np; + } + EXPORT_SYMBOL(of_find_node_by_phandle); +@@ -949,18 +1010,18 @@ int prom_add_property(struct device_node + unsigned long flags; + + prop->next = NULL; +- write_lock_irqsave(&devtree_lock, flags); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + next = &np->properties; + while (*next) { + if (strcmp(prop->name, (*next)->name) == 0) { + /* duplicate ! don't insert it */ +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return -1; + } + next = &(*next)->next; + } + *next = prop; +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + + #ifdef CONFIG_PROC_DEVICETREE + /* try to add to proc as well if it was initialized */ +@@ -985,7 +1046,7 @@ int prom_remove_property(struct device_n + unsigned long flags; + int found = 0; + +- write_lock_irqsave(&devtree_lock, flags); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + next = &np->properties; + while (*next) { + if (*next == prop) { +@@ -998,7 +1059,7 @@ int prom_remove_property(struct device_n + } + next = &(*next)->next; + } +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (!found) + return -ENODEV; +@@ -1028,7 +1089,7 @@ int prom_update_property(struct device_n + unsigned long flags; + int found = 0; + +- write_lock_irqsave(&devtree_lock, flags); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + next = &np->properties; + while (*next) { + if (*next == oldprop) { +@@ -1042,7 +1103,7 @@ int prom_update_property(struct device_n + } + next = &(*next)->next; + } +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (!found) + return -ENODEV; +@@ -1072,12 +1133,12 @@ void of_attach_node(struct device_node * + { + unsigned long flags; + +- write_lock_irqsave(&devtree_lock, flags); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + np->sibling = np->parent->child; + np->allnext = allnodes; + np->parent->child = np; + allnodes = np; +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + } + + /** +@@ -1091,7 +1152,7 @@ void of_detach_node(struct device_node * + struct device_node *parent; + unsigned long flags; + +- write_lock_irqsave(&devtree_lock, flags); ++ raw_spin_lock_irqsave(&devtree_lock, flags); + + parent = np->parent; + if (!parent) +@@ -1122,7 +1183,7 @@ void of_detach_node(struct device_node * + of_node_set_flag(np, OF_DETACHED); + + out_unlock: +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + } + #endif /* defined(CONFIG_OF_DYNAMIC) */ + +Index: linux-3.2/arch/sparc/kernel/prom_common.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/prom_common.c ++++ linux-3.2/arch/sparc/kernel/prom_common.c +@@ -67,7 +67,7 @@ int of_set_property(struct device_node * + err = -ENODEV; + + mutex_lock(&of_set_property_mutex); +- write_lock(&devtree_lock); ++ raw_spin_lock(&devtree_lock); + prevp = &dp->properties; + while (*prevp) { + struct property *prop = *prevp; +@@ -94,7 +94,7 @@ int of_set_property(struct device_node * + } + prevp = &(*prevp)->next; + } +- write_unlock(&devtree_lock); ++ raw_spin_unlock(&devtree_lock); + mutex_unlock(&of_set_property_mutex); + + /* XXX Upate procfs if necessary... */ +Index: linux-3.2/include/linux/of.h +=================================================================== +--- linux-3.2.orig/include/linux/of.h ++++ linux-3.2/include/linux/of.h +@@ -71,7 +71,7 @@ struct device_node { + extern struct device_node *allnodes; + extern struct device_node *of_chosen; + extern struct device_node *of_aliases; +-extern rwlock_t devtree_lock; ++extern raw_spinlock_t devtree_lock; + + static inline bool of_have_populated_dt(void) + { +Index: linux-3.2/include/linux/list.h +=================================================================== +--- linux-3.2.orig/include/linux/list.h ++++ linux-3.2/include/linux/list.h +@@ -362,6 +362,17 @@ static inline void list_splice_tail_init + list_entry((ptr)->next, type, member) + + /** ++ * list_last_entry - get the last element from a list ++ * @ptr: the list head to take the element from. ++ * @type: the type of the struct this is embedded in. ++ * @member: the name of the list_struct within the struct. ++ * ++ * Note, that list is expected to be not empty. ++ */ ++#define list_last_entry(ptr, type, member) \ ++ list_entry((ptr)->prev, type, member) ++ ++/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. +Index: linux-3.2/mm/page_alloc.c +=================================================================== +--- linux-3.2.orig/mm/page_alloc.c ++++ linux-3.2/mm/page_alloc.c +@@ -57,6 +57,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + static void set_pageblock_migratetype(struct page *page, int migratetype) +@@ -581,7 +594,7 @@ static inline int free_pages_check(struc + } + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -592,16 +605,42 @@ static inline int free_pages_check(struc + * pinned" detection logic. + */ + static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++ struct list_head *list) + { +- int migratetype = 0; +- int batch_free = 0; + int to_free = count; ++ unsigned long flags; + +- spin_lock(&zone->lock); ++ spin_lock_irqsave(&zone->lock, flags); + zone->all_unreclaimable = 0; + zone->pages_scanned = 0; + ++ while (!list_empty(list)) { ++ struct page *page = list_first_entry(list, struct page, lru); ++ ++ /* must delete as __free_one_page list manipulates */ ++ list_del(&page->lru); ++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ ++ __free_one_page(page, zone, 0, page_private(page)); ++ trace_mm_page_pcpu_drain(page, 0, page_private(page)); ++ to_free--; ++ } ++ WARN_ON(to_free != 0); ++ __mod_zone_page_state(zone, NR_FREE_PAGES, count); ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++/* ++ * Moves a number of pages from the PCP lists to free list which ++ * is freed outside of the locked region. ++ * ++ * Assumes all pages on list are in same zone, and of same order. ++ * count is the number of pages to free. ++ */ ++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, ++ struct list_head *dst) ++{ ++ int migratetype = 0, batch_free = 0; ++ + while (to_free) { + struct page *page; + struct list_head *list; +@@ -617,7 +656,7 @@ static void free_pcppages_bulk(struct zo + batch_free++; + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; +- list = &pcp->lists[migratetype]; ++ list = &src->lists[migratetype]; + } while (list_empty(list)); + + /* This is the only non-empty list. Free them all. */ +@@ -625,28 +664,25 @@ static void free_pcppages_bulk(struct zo + batch_free = to_free; + + do { +- page = list_entry(list->prev, struct page, lru); +- /* must delete as __free_one_page list manipulates */ ++ page = list_last_entry(list, struct page, lru); + list_del(&page->lru); +- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ +- __free_one_page(page, zone, 0, page_private(page)); +- trace_mm_page_pcpu_drain(page, 0, page_private(page)); ++ list_add(&page->lru, dst); + } while (--to_free && --batch_free && !list_empty(list)); + } +- __mod_zone_page_state(zone, NR_FREE_PAGES, count); +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, struct page *page, int order, + int migratetype) + { +- spin_lock(&zone->lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + zone->all_unreclaimable = 0; + zone->pages_scanned = 0; + + __free_one_page(page, zone, order, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static bool free_pages_prepare(struct page *page, unsigned int order) +@@ -683,13 +719,13 @@ static void __free_pages_ok(struct page + if (!free_pages_prepare(page, order)) + return; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (unlikely(wasMlocked)) + free_page_mlock(page); + __count_vm_events(PGFREE, 1 << order); + free_one_page(page_zone(page), page, order, + get_pageblock_migratetype(page)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -1065,16 +1101,18 @@ static int rmqueue_bulk(struct zone *zon + void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; ++ LIST_HEAD(dst); + int to_drain; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (pcp->count >= pcp->batch) + to_drain = pcp->batch; + else + to_drain = pcp->count; +- free_pcppages_bulk(zone, to_drain, pcp); ++ isolate_pcp_pages(to_drain, pcp, &dst); + pcp->count -= to_drain; +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, to_drain, &dst); + } + #endif + +@@ -1093,16 +1131,21 @@ static void drain_pages(unsigned int cpu + for_each_populated_zone(zone) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) { +- free_pcppages_bulk(zone, pcp->count, pcp); ++ count = pcp->count; ++ if (count) { ++ isolate_pcp_pages(count, pcp, &dst); + pcp->count = 0; + } +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); ++ if (count) ++ free_pcppages_bulk(zone, count, &dst); + } + } + +@@ -1119,7 +1162,14 @@ void drain_local_pages(void *arg) + */ + void drain_all_pages(void) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + on_each_cpu(drain_local_pages, NULL, 1); ++#else ++ int i; ++ ++ for_each_online_cpu(i) ++ drain_pages(i); ++#endif + } + + #ifdef CONFIG_HIBERNATION +@@ -1175,7 +1225,7 @@ void free_hot_cold_page(struct page *pag + + migratetype = get_pageblock_migratetype(page); + set_page_private(page, migratetype); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (unlikely(wasMlocked)) + free_page_mlock(page); + __count_vm_event(PGFREE); +@@ -1202,12 +1252,19 @@ void free_hot_cold_page(struct page *pag + list_add(&page->lru, &pcp->lists[migratetype]); + pcp->count++; + if (pcp->count >= pcp->high) { +- free_pcppages_bulk(zone, pcp->batch, pcp); ++ LIST_HEAD(dst); ++ int count; ++ ++ isolate_pcp_pages(pcp->batch, pcp, &dst); + pcp->count -= pcp->batch; ++ count = pcp->batch; ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, count, &dst); ++ return; + } + + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -1302,7 +1359,7 @@ again: + struct per_cpu_pages *pcp; + struct list_head *list; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + if (list_empty(list)) { +@@ -1334,17 +1391,19 @@ again: + */ + WARN_ON_ONCE(order > 1); + } +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + page = __rmqueue(zone, order, migratetype); +- spin_unlock(&zone->lock); +- if (!page) ++ if (!page) { ++ spin_unlock(&zone->lock); + goto failed; ++ } + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); ++ spin_unlock(&zone->lock); + } + + __count_zone_vm_events(PGALLOC, zone, 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + VM_BUG_ON(bad_range(zone, page)); + if (prep_new_page(page, order, gfp_flags)) +@@ -1352,7 +1411,7 @@ again: + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -1901,8 +1960,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m + if (*did_some_progress != COMPACT_SKIPPED) { + + /* Page migration frees to the PCP lists but we want merging */ +- drain_pages(get_cpu()); +- put_cpu(); ++ drain_pages(get_cpu_light()); ++ put_cpu_light(); + + page = get_page_from_freelist(gfp_mask, nodemask, + order, zonelist, high_zoneidx, +@@ -3674,14 +3733,16 @@ static int __zone_pcp_update(void *data) + for_each_possible_cpu(cpu) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); + + pset = per_cpu_ptr(zone->pageset, cpu); + pcp = &pset->pcp; + +- local_irq_save(flags); +- free_pcppages_bulk(zone, pcp->count, pcp); ++ cpu_lock_irqsave(cpu, flags); ++ isolate_pcp_pages(pcp->count, pcp, &dst); ++ free_pcppages_bulk(zone, pcp->count, &dst); + setup_pageset(pset, batch); +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); + } + return 0; + } +@@ -5047,6 +5108,7 @@ static int page_alloc_cpu_notify(struct + void __init page_alloc_init(void) + { + hotcpu_notifier(page_alloc_cpu_notify, 0); ++ local_irq_lock_init(pa_lock); + } + + /* +Index: linux-3.2/mm/slab.c +=================================================================== +--- linux-3.2.orig/mm/slab.c ++++ linux-3.2/mm/slab.c +@@ -116,6 +116,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -718,12 +719,66 @@ static DEFINE_MUTEX(cache_chain_mutex); + static struct list_head cache_chain; + + static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); ++static DEFINE_PER_CPU(struct list_head, slab_free_list); ++static DEFINE_LOCAL_IRQ_LOCK(slab_lock); ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1) ++#else ++/* ++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have ++ * to run on the remote CPUs - we only have to take their CPU-locks. ++ * (This is a rare operation, so cacheline bouncing is not an issue.) ++ */ ++static void ++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) ++{ ++ unsigned int i; ++ ++ for_each_online_cpu(i) ++ func(arg, i); ++} ++#endif ++ ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_pages(page, page->index); ++ } ++} ++ ++static void unlock_l3_and_free_delayed(spinlock_t *list_lock) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_spin_unlock_irq(slab_lock, list_lock); ++ free_delayed(&tmp); ++} ++ ++static void unlock_slab_and_free_delayed(unsigned long flags) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_unlock_irqrestore(slab_lock, flags); ++ free_delayed(&tmp); ++} + + static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) + { + return cachep->array[smp_processor_id()]; + } + ++static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep, ++ int cpu) ++{ ++ return cachep->array[cpu]; ++} ++ + static inline struct kmem_cache *__find_general_cachep(size_t size, + gfp_t gfpflags) + { +@@ -1061,9 +1116,10 @@ static void reap_alien(struct kmem_cache + if (l3->alien) { + struct array_cache *ac = l3->alien[node]; + +- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { ++ if (ac && ac->avail && ++ local_spin_trylock_irq(slab_lock, &ac->lock)) { + __drain_alien_cache(cachep, ac, node); +- spin_unlock_irq(&ac->lock); ++ local_spin_unlock_irq(slab_lock, &ac->lock); + } + } + } +@@ -1078,9 +1134,9 @@ static void drain_alien_cache(struct kme + for_each_online_node(i) { + ac = alien[i]; + if (ac) { +- spin_lock_irqsave(&ac->lock, flags); ++ local_spin_lock_irqsave(slab_lock, &ac->lock, flags); + __drain_alien_cache(cachep, ac, i); +- spin_unlock_irqrestore(&ac->lock, flags); ++ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags); + } + } + } +@@ -1159,11 +1215,11 @@ static int init_cache_nodelists_node(int + cachep->nodelists[node] = l3; + } + +- spin_lock_irq(&cachep->nodelists[node]->list_lock); ++ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + cachep->nodelists[node]->free_limit = + (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&cachep->nodelists[node]->list_lock); ++ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + } + return 0; + } +@@ -1188,7 +1244,7 @@ static void __cpuinit cpuup_canceled(lon + if (!l3) + goto free_array_cache; + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + /* Free limit for this kmem_list3 */ + l3->free_limit -= cachep->batchcount; +@@ -1196,7 +1252,7 @@ static void __cpuinit cpuup_canceled(lon + free_block(cachep, nc->entry, nc->avail, node); + + if (!cpumask_empty(mask)) { +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); + goto free_array_cache; + } + +@@ -1210,7 +1266,7 @@ static void __cpuinit cpuup_canceled(lon + alien = l3->alien; + l3->alien = NULL; + +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); + + kfree(shared); + if (alien) { +@@ -1284,7 +1340,7 @@ static int __cpuinit cpuup_prepare(long + l3 = cachep->nodelists[node]; + BUG_ON(!l3); + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + if (!l3->shared) { + /* + * We are serialised from CPU_DEAD or +@@ -1299,7 +1355,7 @@ static int __cpuinit cpuup_prepare(long + alien = NULL; + } + #endif +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + kfree(shared); + free_alien_cache(alien); + if (cachep->flags & SLAB_DEBUG_OBJECTS) +@@ -1490,6 +1546,10 @@ void __init kmem_cache_init(void) + if (num_possible_nodes() == 1) + use_alien_caches = 0; + ++ local_irq_lock_init(slab_lock); ++ for_each_possible_cpu(i) ++ INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); ++ + for (i = 0; i < NUM_INIT_LISTS; i++) { + kmem_list3_init(&initkmem_list3[i]); + if (i < MAX_NUMNODES) +@@ -1766,12 +1826,14 @@ static void *kmem_getpages(struct kmem_c + /* + * Interface to system's page release. + */ +-static void kmem_freepages(struct kmem_cache *cachep, void *addr) ++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) + { + unsigned long i = (1 << cachep->gfporder); +- struct page *page = virt_to_page(addr); ++ struct page *page, *basepage = virt_to_page(addr); + const unsigned long nr_freed = i; + ++ page = basepage; ++ + kmemcheck_free_shadow(page, cachep->gfporder); + + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) +@@ -1787,7 +1849,13 @@ static void kmem_freepages(struct kmem_c + } + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += nr_freed; +- free_pages((unsigned long)addr, cachep->gfporder); ++ ++ if (!delayed) { ++ free_pages((unsigned long)addr, cachep->gfporder); ++ } else { ++ basepage->index = cachep->gfporder; ++ list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -1795,7 +1863,7 @@ static void kmem_rcu_free(struct rcu_hea + struct slab_rcu *slab_rcu = (struct slab_rcu *)head; + struct kmem_cache *cachep = slab_rcu->cachep; + +- kmem_freepages(cachep, slab_rcu->addr); ++ kmem_freepages(cachep, slab_rcu->addr, false); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slab_rcu); + } +@@ -2014,7 +2082,8 @@ static void slab_destroy_debugcheck(stru + * Before calling the slab must have been unlinked from the cache. The + * cache-lock is not held/needed. + */ +-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) ++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, ++ bool delayed) + { + void *addr = slabp->s_mem - slabp->colouroff; + +@@ -2027,7 +2096,7 @@ static void slab_destroy(struct kmem_cac + slab_rcu->addr = addr; + call_rcu(&slab_rcu->head, kmem_rcu_free); + } else { +- kmem_freepages(cachep, addr); ++ kmem_freepages(cachep, addr, delayed); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slabp); + } +@@ -2493,7 +2562,7 @@ EXPORT_SYMBOL(kmem_cache_create); + #if DEBUG + static void check_irq_off(void) + { +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + } + + static void check_irq_on(void) +@@ -2528,26 +2597,43 @@ static void drain_array(struct kmem_cach + struct array_cache *ac, + int force, int node); + +-static void do_drain(void *arg) ++static void __do_drain(void *arg, unsigned int cpu) + { + struct kmem_cache *cachep = arg; + struct array_cache *ac; +- int node = numa_mem_id(); ++ int node = cpu_to_mem(cpu); + +- check_irq_off(); +- ac = cpu_cache_get(cachep); ++ ac = cpu_cache_get_on_cpu(cachep, cpu); + spin_lock(&cachep->nodelists[node]->list_lock); + free_block(cachep, ac->entry, ac->avail, node); + spin_unlock(&cachep->nodelists[node]->list_lock); + ac->avail = 0; + } + ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_drain(void *arg) ++{ ++ __do_drain(arg, smp_processor_id()); ++} ++#else ++static void do_drain(void *arg, int cpu) ++{ ++ LIST_HEAD(tmp); ++ ++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); ++ __do_drain(arg, cpu); ++ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp); ++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); ++ free_delayed(&tmp); ++} ++#endif ++ + static void drain_cpu_caches(struct kmem_cache *cachep) + { + struct kmem_list3 *l3; + int node; + +- on_each_cpu(do_drain, cachep, 1); ++ slab_on_each_cpu(do_drain, cachep); + check_irq_on(); + for_each_online_node(node) { + l3 = cachep->nodelists[node]; +@@ -2578,10 +2664,10 @@ static int drain_freelist(struct kmem_ca + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + p = l3->slabs_free.prev; + if (p == &l3->slabs_free) { +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + goto out; + } + +@@ -2595,8 +2681,8 @@ static int drain_freelist(struct kmem_ca + * to the cache. + */ + l3->free_objects -= cache->num; +- spin_unlock_irq(&l3->list_lock); +- slab_destroy(cache, slabp); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); ++ slab_destroy(cache, slabp, false); + nr_freed++; + } + out: +@@ -2890,7 +2976,7 @@ static int cache_grow(struct kmem_cache + offset *= cachep->colour_off; + + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + + /* + * The test for missing atomic flag is performed here, rather than +@@ -2920,7 +3006,7 @@ static int cache_grow(struct kmem_cache + cache_init_objs(cachep, slabp); + + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + check_irq_off(); + spin_lock(&l3->list_lock); + +@@ -2931,10 +3017,10 @@ static int cache_grow(struct kmem_cache + spin_unlock(&l3->list_lock); + return 1; + opps1: +- kmem_freepages(cachep, objp); ++ kmem_freepages(cachep, objp, false); + failed: + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + return 0; + } + +@@ -3326,11 +3412,11 @@ retry: + * set and go into memory reserves if necessary. + */ + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + kmem_flagcheck(cache, flags); + obj = kmem_getpages(cache, local_flags, numa_mem_id()); + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + if (obj) { + /* + * Insert into the appropriate per node queues +@@ -3446,7 +3532,7 @@ __cache_alloc_node(struct kmem_cache *ca + return NULL; + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + + if (nodeid == NUMA_NO_NODE) + nodeid = slab_node; +@@ -3471,7 +3557,7 @@ __cache_alloc_node(struct kmem_cache *ca + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + out: +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, + flags); +@@ -3531,9 +3617,9 @@ __cache_alloc(struct kmem_cache *cachep, + return NULL; + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + objp = __do_cache_alloc(cachep, flags); +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, + flags); +@@ -3581,7 +3667,7 @@ static void free_block(struct kmem_cache + * a different cache, refer to comments before + * alloc_slabmgmt. + */ +- slab_destroy(cachep, slabp); ++ slab_destroy(cachep, slabp, true); + } else { + list_add(&slabp->list, &l3->slabs_free); + } +@@ -3844,12 +3930,12 @@ void kmem_cache_free(struct kmem_cache * + { + unsigned long flags; + +- local_irq_save(flags); + debug_check_no_locks_freed(objp, obj_size(cachep)); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, obj_size(cachep)); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(cachep, objp, __builtin_return_address(0)); +- local_irq_restore(flags); ++ unlock_slab_and_free_delayed(flags); + + trace_kmem_cache_free(_RET_IP_, objp); + } +@@ -3873,13 +3959,13 @@ void kfree(const void *objp) + + if (unlikely(ZERO_OR_NULL_PTR(objp))) + return; +- local_irq_save(flags); + kfree_debugcheck(objp); + c = virt_to_cache(objp); + debug_check_no_locks_freed(objp, obj_size(c)); + debug_check_no_obj_freed(objp, obj_size(c)); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(c, (void *)objp, __builtin_return_address(0)); +- local_irq_restore(flags); ++ unlock_slab_and_free_delayed(flags); + } + EXPORT_SYMBOL(kfree); + +@@ -3922,7 +4008,7 @@ static int alloc_kmemlist(struct kmem_ca + if (l3) { + struct array_cache *shared = l3->shared; + +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + if (shared) + free_block(cachep, shared->entry, +@@ -3935,7 +4021,8 @@ static int alloc_kmemlist(struct kmem_ca + } + l3->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&l3->list_lock); ++ unlock_l3_and_free_delayed(&l3->list_lock); ++ + kfree(shared); + free_alien_cache(new_alien); + continue; +@@ -3982,17 +4069,30 @@ struct ccupdate_struct { + struct array_cache *new[0]; + }; + +-static void do_ccupdate_local(void *info) ++static void __do_ccupdate_local(void *info, int cpu) + { + struct ccupdate_struct *new = info; + struct array_cache *old; + +- check_irq_off(); +- old = cpu_cache_get(new->cachep); ++ old = cpu_cache_get_on_cpu(new->cachep, cpu); ++ ++ new->cachep->array[cpu] = new->new[cpu]; ++ new->new[cpu] = old; ++} + +- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; +- new->new[smp_processor_id()] = old; ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_ccupdate_local(void *info) ++{ ++ __do_ccupdate_local(info, smp_processor_id()); + } ++#else ++static void do_ccupdate_local(void *info, int cpu) ++{ ++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock); ++ __do_ccupdate_local(info, cpu); ++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock); ++} ++#endif + + /* Always called with the cache_chain_mutex held */ + static int do_tune_cpucache(struct kmem_cache *cachep, int limit, +@@ -4018,7 +4118,7 @@ static int do_tune_cpucache(struct kmem_ + } + new->cachep = cachep; + +- on_each_cpu(do_ccupdate_local, (void *)new, 1); ++ slab_on_each_cpu(do_ccupdate_local, (void *)new); + + check_irq_on(); + cachep->batchcount = batchcount; +@@ -4029,9 +4129,11 @@ static int do_tune_cpucache(struct kmem_ + struct array_cache *ccold = new->new[i]; + if (!ccold) + continue; +- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); ++ local_spin_lock_irq(slab_lock, ++ &cachep->nodelists[cpu_to_mem(i)]->list_lock); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); +- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock); ++ ++ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock); + kfree(ccold); + } + kfree(new); +@@ -4107,7 +4209,7 @@ static void drain_array(struct kmem_cach + if (ac->touched && !force) { + ac->touched = 0; + } else { +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) +@@ -4117,7 +4219,7 @@ static void drain_array(struct kmem_cach + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); + } +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + } + +@@ -4256,7 +4358,7 @@ static int s_show(struct seq_file *m, vo + continue; + + check_irq_on(); +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) { + if (slabp->inuse != cachep->num && !error) +@@ -4281,7 +4383,7 @@ static int s_show(struct seq_file *m, vo + if (l3->shared) + shared_avail += l3->shared->avail; + +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + num_slabs += active_slabs; + num_objs = num_slabs * cachep->num; +@@ -4510,13 +4612,13 @@ static int leaks_show(struct seq_file *m + continue; + + check_irq_on(); +- spin_lock_irq(&l3->list_lock); ++ local_spin_lock_irq(slab_lock, &l3->list_lock); + + list_for_each_entry(slabp, &l3->slabs_full, list) + handle_slab(n, cachep, slabp); + list_for_each_entry(slabp, &l3->slabs_partial, list) + handle_slab(n, cachep, slabp); +- spin_unlock_irq(&l3->list_lock); ++ local_spin_unlock_irq(slab_lock, &l3->list_lock); + } + name = cachep->name; + if (n[0] == n[1]) { +Index: linux-3.2/include/linux/pid.h +=================================================================== +--- linux-3.2.orig/include/linux/pid.h ++++ linux-3.2/include/linux/pid.h +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { +Index: linux-3.2/include/linux/sysctl.h +=================================================================== +--- linux-3.2.orig/include/linux/sysctl.h ++++ linux-3.2/include/linux/sysctl.h +@@ -932,6 +932,7 @@ enum + #include + #include + #include ++#include + + /* For the /proc/sys support */ + struct ctl_table; +Index: linux-3.2/net/core/sock.c +=================================================================== +--- linux-3.2.orig/net/core/sock.c ++++ linux-3.2/net/core/sock.c +@@ -2035,12 +2035,11 @@ void lock_sock_nested(struct sock *sk, i + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + +Index: linux-3.2/include/linux/interrupt.h +=================================================================== +--- linux-3.2.orig/include/linux/interrupt.h ++++ linux-3.2/include/linux/interrupt.h +@@ -219,7 +219,7 @@ extern void devm_free_irq(struct device + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +@@ -396,9 +396,13 @@ static inline int disable_irq_wake(unsig + + + #ifdef CONFIG_IRQ_FORCED_THREADING +-extern bool force_irqthreads; ++# ifndef CONFIG_PREEMPT_RT_BASE ++ extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +@@ -452,8 +456,14 @@ struct softirq_action + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); ++static inline void thread_do_softirq(void) { do_softirq(); } ++#else ++extern void thread_do_softirq(void); ++#endif ++ + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + static inline void __raise_softirq_irqoff(unsigned int nr) +@@ -465,6 +475,8 @@ static inline void __raise_softirq_irqof + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); + ++extern void softirq_check_pending_idle(void); ++ + /* This is the worklist that queues up per-cpu softirq work. + * + * send_remote_sendirq() adds work to these lists, and +@@ -642,6 +654,12 @@ void tasklet_hrtimer_cancel(struct taskl + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + /* + * Autoprobing for irqs: + * +Index: linux-3.2/net/core/dev.c +=================================================================== +--- linux-3.2.orig/net/core/dev.c ++++ linux-3.2/net/core/dev.c +@@ -222,14 +222,14 @@ static inline struct hlist_head *dev_ind + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -3033,16 +3033,46 @@ int netif_rx_ni(struct sk_buff *skb) + { + int err; + +- preempt_disable(); ++ migrate_disable(); + err = netif_rx(skb); + if (local_softirq_pending()) +- do_softirq(); +- preempt_enable(); ++ thread_do_softirq(); ++ migrate_enable(); + + return err; + } + EXPORT_SYMBOL(netif_rx_ni); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT runs ksoftirqd as a real time thread and the root_lock is a ++ * "sleeping spinlock". If the trylock fails then we can go into an ++ * infinite loop when ksoftirqd preempted the task which actually ++ * holds the lock, because we requeue q and raise NET_TX softirq ++ * causing ksoftirqd to loop forever. ++ * ++ * It's safe to use spin_lock on RT here as softirqs run in thread ++ * context and cannot deadlock against the thread which is holding ++ * root_lock. ++ * ++ * On !RT the trylock might fail, but there we bail out from the ++ * softirq loop after 10 attempts which we can't do on RT. And the ++ * task holding root_lock cannot be preempted, so the only downside of ++ * that trylock is that we need 10 loops to decide that we should have ++ * given up in the first one :) ++ */ ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ return 1; ++} ++#else ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ return spin_trylock(lock); ++} ++#endif ++ + static void net_tx_action(struct softirq_action *h) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); +@@ -3081,7 +3111,7 @@ static void net_tx_action(struct softirq + head = head->next_sched; + + root_lock = qdisc_lock(q); +- if (spin_trylock(root_lock)) { ++ if (take_root_lock(root_lock)) { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); +@@ -3406,7 +3436,7 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -3415,10 +3445,13 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + } + + static int napi_gro_complete(struct sk_buff *skb) +@@ -3895,10 +3928,17 @@ static void net_rx_action(struct softirq + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; + int budget = netdev_budget; ++ struct sk_buff *skb; + void *have; + + local_irq_disable(); + ++ while ((skb = __skb_dequeue(&sd->tofree_queue))) { ++ local_irq_enable(); ++ kfree_skb(skb); ++ local_irq_disable(); ++ } ++ + while (!list_empty(&sd->poll_list)) { + struct napi_struct *n; + int work, weight; +@@ -6358,6 +6398,9 @@ static int dev_cpu_callback(struct notif + netif_rx(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; + } +@@ -6624,8 +6667,9 @@ static int __init net_dev_init(void) + struct softnet_data *sd = &per_cpu(softnet_data, i); + + memset(sd, 0, sizeof(*sd)); +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + sd->completion_queue = NULL; + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue = NULL; +Index: linux-3.2/arch/x86/kernel/entry_32.S +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/entry_32.S ++++ linux-3.2/arch/x86/kernel/entry_32.S +@@ -626,7 +626,11 @@ work_notifysig: # deal with pending s + jne work_notifysig_v86 # returning to kernel-space or + # vm86-space + xorl %edx, %edx ++ TRACE_IRQS_ON ++ ENABLE_INTERRUPTS(CLBR_NONE) + call do_notify_resume ++ DISABLE_INTERRUPTS(CLBR_ANY) ++ TRACE_IRQS_OFF + jmp resume_userspace_sig + + ALIGN +@@ -639,7 +643,11 @@ work_notifysig_v86: + movl %esp, %eax + #endif + xorl %edx, %edx ++ TRACE_IRQS_ON ++ ENABLE_INTERRUPTS(CLBR_NONE) + call do_notify_resume ++ DISABLE_INTERRUPTS(CLBR_ANY) ++ TRACE_IRQS_OFF + jmp resume_userspace_sig + END(work_pending) + +Index: linux-3.2/kernel/rcutree.c +=================================================================== +--- linux-3.2.orig/kernel/rcutree.c ++++ linux-3.2/kernel/rcutree.c +@@ -170,6 +170,12 @@ void rcu_sched_qs(int cpu) + rdp->passed_quiesce = 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++void rcu_bh_qs(int cpu) ++{ ++ rcu_preempt_qs(cpu); ++} ++#else + void rcu_bh_qs(int cpu) + { + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); +@@ -180,6 +186,7 @@ void rcu_bh_qs(int cpu) + trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); + rdp->passed_quiesce = 1; + } ++#endif + + /* + * Note a context switch. This is a quiescent state for RCU-sched, +@@ -225,6 +232,7 @@ long rcu_batches_completed_sched(void) + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU BH batches processed thus far for debug & stats. + */ +@@ -242,6 +250,7 @@ void rcu_bh_force_quiescent_state(void) + force_quiescent_state(&rcu_bh_state, 0); + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); ++#endif + + /* + * Record the number of times rcutorture tests have been initiated and +@@ -1221,7 +1230,7 @@ static void __rcu_offline_cpu(int cpu, s + else + raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (need_report & RCU_OFL_TASKS_EXP_GP) +- rcu_report_exp_rnp(rsp, rnp); ++ rcu_report_exp_rnp(rsp, rnp, true); + rcu_node_kthread_setaffinity(rnp, -1); + } + +@@ -1667,6 +1676,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue an RCU for invocation after a quicker grace period. + */ +@@ -1675,6 +1685,7 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_state); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /** + * synchronize_sched - wait until an rcu-sched grace period has elapsed. +@@ -1707,6 +1718,7 @@ void synchronize_sched(void) + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -1723,6 +1735,7 @@ void synchronize_rcu_bh(void) + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + /* + * Check to see if there is any immediate RCU-related work to be done +@@ -1877,6 +1890,7 @@ static void _rcu_barrier(struct rcu_stat + mutex_unlock(&rcu_barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -1885,6 +1899,7 @@ void rcu_barrier_bh(void) + _rcu_barrier(&rcu_bh_state, call_rcu_bh); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. +Index: linux-3.2/kernel/rcutree.h +=================================================================== +--- linux-3.2.orig/kernel/rcutree.h ++++ linux-3.2/kernel/rcutree.h +@@ -430,6 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); + /* Forward declarations for rcutree_plugin.h */ + static void rcu_bootup_announce(void); + long rcu_batches_completed(void); ++static void rcu_preempt_qs(int cpu); + static void rcu_preempt_note_context_switch(int cpu); + static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); + #ifdef CONFIG_HOTPLUG_CPU +@@ -451,7 +452,8 @@ static void rcu_preempt_check_callbacks( + static void rcu_preempt_process_callbacks(void); + void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); + #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) +-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); ++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ++ bool wake); + #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ + static int rcu_preempt_pending(int cpu); + static int rcu_preempt_needs_cpu(int cpu); +Index: linux-3.2/kernel/rcutree_plugin.h +=================================================================== +--- linux-3.2.orig/kernel/rcutree_plugin.h ++++ linux-3.2/kernel/rcutree_plugin.h +@@ -336,7 +336,7 @@ static noinline void rcu_read_unlock_spe + } + + /* Hardware IRQ handlers cannot block. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + local_irq_restore(flags); + return; + } +@@ -407,7 +407,7 @@ static noinline void rcu_read_unlock_spe + * then we need to report up the rcu_node hierarchy. + */ + if (!empty_exp && !rcu_preempted_readers_exp(rnp)) +- rcu_report_exp_rnp(&rcu_preempt_state, rnp); ++ rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); + } else { + local_irq_restore(flags); + } +@@ -731,7 +731,8 @@ static int sync_rcu_preempt_exp_done(str + * + * Caller must hold sync_rcu_preempt_exp_mutex. + */ +-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) ++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ++ bool wake) + { + unsigned long flags; + unsigned long mask; +@@ -744,7 +745,8 @@ static void rcu_report_exp_rnp(struct rc + } + if (rnp->parent == NULL) { + raw_spin_unlock_irqrestore(&rnp->lock, flags); +- wake_up(&sync_rcu_preempt_exp_wq); ++ if (wake) ++ wake_up(&sync_rcu_preempt_exp_wq); + break; + } + mask = rnp->grpmask; +@@ -777,7 +779,7 @@ sync_rcu_preempt_exp_init(struct rcu_sta + must_wait = 1; + } + if (!must_wait) +- rcu_report_exp_rnp(rsp, rnp); ++ rcu_report_exp_rnp(rsp, rnp, false); + } + + /* +@@ -1069,9 +1071,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedi + * report on tasks preempted in RCU read-side critical sections during + * expedited RCU grace periods. + */ +-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) ++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ++ bool wake) + { +- return; + } + + #endif /* #ifdef CONFIG_HOTPLUG_CPU */ +@@ -1931,7 +1933,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe + + #endif /* #else #ifndef CONFIG_SMP */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if 1 /* !defined(CONFIG_RCU_FAST_NO_HZ) */ + + /* + * Check to see if any future RCU-related work will need to be done +Index: linux-3.2/drivers/usb/gadget/ci13xxx_udc.c +=================================================================== +--- linux-3.2.orig/drivers/usb/gadget/ci13xxx_udc.c ++++ linux-3.2/drivers/usb/gadget/ci13xxx_udc.c +@@ -816,7 +816,7 @@ static struct { + } dbg_data = { + .idx = 0, + .tty = 0, +- .lck = __RW_LOCK_UNLOCKED(lck) ++ .lck = __RW_LOCK_UNLOCKED(dbg_data.lck) + }; + + /** +Index: linux-3.2/fs/file.c +=================================================================== +--- linux-3.2.orig/fs/file.c ++++ linux-3.2/fs/file.c +@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *r + kfree(fdt->open_fds); + kfree(fdt); + } else { +- fddef = &get_cpu_var(fdtable_defer_list); ++ fddef = &per_cpu(fdtable_defer_list, get_cpu_light()); + spin_lock(&fddef->lock); + fdt->next = fddef->next; + fddef->next = fdt; + /* vmallocs are handled from the workqueue context */ + schedule_work(&fddef->wq); + spin_unlock(&fddef->lock); +- put_cpu_var(fdtable_defer_list); ++ put_cpu_light(); + } + } + +@@ -422,7 +422,7 @@ struct files_struct init_files = { + .close_on_exec = (fd_set *)&init_files.close_on_exec_init, + .open_fds = (fd_set *)&init_files.open_fds_init, + }, +- .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), ++ .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), + }; + + /* +Index: linux-3.2/include/linux/idr.h +=================================================================== +--- linux-3.2.orig/include/linux/idr.h ++++ linux-3.2/include/linux/idr.h +@@ -136,7 +136,7 @@ struct ida { + struct ida_bitmap *free_bitmap; + }; + +-#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } ++#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } + #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) + + int ida_pre_get(struct ida *ida, gfp_t gfp_mask); +Index: linux-3.2/kernel/cred.c +=================================================================== +--- linux-3.2.orig/kernel/cred.c ++++ linux-3.2/kernel/cred.c +@@ -35,7 +35,7 @@ static struct kmem_cache *cred_jar; + static struct thread_group_cred init_tgcred = { + .usage = ATOMIC_INIT(2), + .tgid = 0, +- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock), ++ .lock = __SPIN_LOCK_UNLOCKED(init_tgcred.lock), + }; + #endif + +Index: linux-3.2/include/linux/seqlock.h +=================================================================== +--- linux-3.2.orig/include/linux/seqlock.h ++++ linux-3.2/include/linux/seqlock.h +@@ -30,92 +30,12 @@ + #include + #include + +-typedef struct { +- unsigned sequence; +- spinlock_t lock; +-} seqlock_t; +- +-/* +- * These macros triggered gcc-3.x compile-time problems. We think these are +- * OK now. Be cautious. +- */ +-#define __SEQLOCK_UNLOCKED(lockname) \ +- { 0, __SPIN_LOCK_UNLOCKED(lockname) } +- +-#define seqlock_init(x) \ +- do { \ +- (x)->sequence = 0; \ +- spin_lock_init(&(x)->lock); \ +- } while (0) +- +-#define DEFINE_SEQLOCK(x) \ +- seqlock_t x = __SEQLOCK_UNLOCKED(x) +- +-/* Lock out other writers and update the count. +- * Acts like a normal spin_lock/unlock. +- * Don't need preempt_disable() because that is in the spin_lock already. +- */ +-static inline void write_seqlock(seqlock_t *sl) +-{ +- spin_lock(&sl->lock); +- ++sl->sequence; +- smp_wmb(); +-} +- +-static inline void write_sequnlock(seqlock_t *sl) +-{ +- smp_wmb(); +- sl->sequence++; +- spin_unlock(&sl->lock); +-} +- +-static inline int write_tryseqlock(seqlock_t *sl) +-{ +- int ret = spin_trylock(&sl->lock); +- +- if (ret) { +- ++sl->sequence; +- smp_wmb(); +- } +- return ret; +-} +- +-/* Start of read calculation -- fetch last complete writer token */ +-static __always_inline unsigned read_seqbegin(const seqlock_t *sl) +-{ +- unsigned ret; +- +-repeat: +- ret = ACCESS_ONCE(sl->sequence); +- if (unlikely(ret & 1)) { +- cpu_relax(); +- goto repeat; +- } +- smp_rmb(); +- +- return ret; +-} +- +-/* +- * Test if reader processed invalid data. +- * +- * If sequence value changed then writer changed data while in section. +- */ +-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) +-{ +- smp_rmb(); +- +- return unlikely(sl->sequence != start); +-} +- +- + /* + * Version using sequence counter only. + * This can be used when code has its own mutex protecting the + * updating starting before the write_seqcountbeqin() and ending + * after the write_seqcount_end(). + */ +- + typedef struct seqcount { + unsigned sequence; + } seqcount_t; +@@ -197,7 +117,6 @@ static inline int __read_seqcount_retry( + static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) + { + smp_rmb(); +- + return __read_seqcount_retry(s, start); + } + +@@ -231,31 +150,154 @@ static inline void write_seqcount_barrie + s->sequence+=2; + } + ++typedef struct { ++ struct seqcount seqcount; ++ raw_spinlock_t lock; ++} raw_seqlock_t; ++ ++typedef struct { ++ struct seqcount seqcount; ++ spinlock_t lock; ++} seqlock_t; ++ ++/* ++ * These macros triggered gcc-3.x compile-time problems. We think these are ++ * OK now. Be cautious. ++ */ ++#define __RAW_SEQLOCK_UNLOCKED(lockname) \ ++ { \ ++ .seqcount = SEQCNT_ZERO, \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ } ++ ++#define raw_seqlock_init(x) \ ++ do { \ ++ seqcount_init(&(x)->seqcount); \ ++ raw_spin_lock_init(&(x)->lock); \ ++ } while (0) ++ ++#define DEFINE_RAW_SEQLOCK(x) \ ++ raw_seqlock_t x = __RAW_SEQLOCK_UNLOCKED(x) ++ ++#define __SEQLOCK_UNLOCKED(lockname) \ ++ { \ ++ .seqcount = SEQCNT_ZERO, \ ++ .lock = __SPIN_LOCK_UNLOCKED(lockname) \ ++ } ++ ++#define seqlock_init(x) \ ++ do { \ ++ seqcount_init(&(x)->seqcount); \ ++ spin_lock_init(&(x)->lock); \ ++ } while (0) ++ ++#define DEFINE_SEQLOCK(x) \ ++ seqlock_t x = __SEQLOCK_UNLOCKED(x) ++ ++#define read_seqbegin(sl) read_seqcount_begin(&(sl)->seqcount) ++#define read_seqretry(sl, start) read_seqcount_retry(&(sl)->seqcount, start) ++ + /* +- * Possible sw/hw IRQ protected versions of the interfaces. ++ * Lock out other writers and update the count. ++ * Acts like a normal spin_lock/unlock. ++ * Don't need preempt_disable() because that is in the spin_lock already. + */ ++static inline void raw_write_seqlock(raw_seqlock_t *sl) ++{ ++ raw_spin_lock(&sl->lock); ++ write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline void raw_write_sequnlock(raw_seqlock_t *sl) ++{ ++ write_seqcount_end(&sl->seqcount); ++ raw_spin_unlock(&sl->lock); ++} ++ ++static inline void raw_write_seqlock_irq(raw_seqlock_t *sl) ++{ ++ raw_spin_lock_irq(&sl->lock); ++ write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline void raw_write_sequnlock_irq(raw_seqlock_t *sl) ++{ ++ write_seqcount_end(&sl->seqcount); ++ raw_spin_unlock_irq(&sl->lock); ++} ++ ++static inline unsigned long __raw_write_seqlock_irqsave(raw_seqlock_t *sl) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&sl->lock, flags); ++ write_seqcount_begin(&sl->seqcount); ++ return flags; ++} ++ ++#define raw_write_seqlock_irqsave(lock, flags) \ ++ do { flags = __raw_write_seqlock_irqsave(lock); } while (0) ++ ++static inline void ++raw_write_sequnlock_irqrestore(raw_seqlock_t *sl, unsigned long flags) ++{ ++ write_seqcount_end(&sl->seqcount); ++ raw_spin_unlock_irqrestore(&sl->lock, flags); ++} ++ ++static inline void write_seqlock(seqlock_t *sl) ++{ ++ spin_lock(&sl->lock); ++ write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline void write_sequnlock(seqlock_t *sl) ++{ ++ write_seqcount_end(&sl->seqcount); ++ spin_unlock(&sl->lock); ++} ++ ++static inline void write_seqlock_bh(seqlock_t *sl) ++{ ++ spin_lock_bh(&sl->lock); ++ write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline void write_sequnlock_bh(seqlock_t *sl) ++{ ++ write_seqcount_end(&sl->seqcount); ++ spin_unlock_bh(&sl->lock); ++} ++ ++static inline void write_seqlock_irq(seqlock_t *sl) ++{ ++ spin_lock_irq(&sl->lock); ++ write_seqcount_begin(&sl->seqcount); ++} ++ ++static inline void write_sequnlock_irq(seqlock_t *sl) ++{ ++ write_seqcount_end(&sl->seqcount); ++ spin_unlock_irq(&sl->lock); ++} ++ ++static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&sl->lock, flags); ++ write_seqcount_begin(&sl->seqcount); ++ return flags; ++} ++ + #define write_seqlock_irqsave(lock, flags) \ +- do { local_irq_save(flags); write_seqlock(lock); } while (0) +-#define write_seqlock_irq(lock) \ +- do { local_irq_disable(); write_seqlock(lock); } while (0) +-#define write_seqlock_bh(lock) \ +- do { local_bh_disable(); write_seqlock(lock); } while (0) +- +-#define write_sequnlock_irqrestore(lock, flags) \ +- do { write_sequnlock(lock); local_irq_restore(flags); } while(0) +-#define write_sequnlock_irq(lock) \ +- do { write_sequnlock(lock); local_irq_enable(); } while(0) +-#define write_sequnlock_bh(lock) \ +- do { write_sequnlock(lock); local_bh_enable(); } while(0) +- +-#define read_seqbegin_irqsave(lock, flags) \ +- ({ local_irq_save(flags); read_seqbegin(lock); }) +- +-#define read_seqretry_irqrestore(lock, iv, flags) \ +- ({ \ +- int ret = read_seqretry(lock, iv); \ +- local_irq_restore(flags); \ +- ret; \ +- }) ++ do { flags = __write_seqlock_irqsave(lock); } while (0) ++ ++static inline void ++write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) ++{ ++ write_seqcount_end(&sl->seqcount); ++ spin_unlock_irqrestore(&sl->lock, flags); ++} + + #endif /* __LINUX_SEQLOCK_H */ +Index: linux-3.2/arch/ia64/kernel/time.c +=================================================================== +--- linux-3.2.orig/arch/ia64/kernel/time.c ++++ linux-3.2/arch/ia64/kernel/time.c +@@ -36,7 +36,7 @@ + static cycle_t itc_get_cycles(struct clocksource *cs); + + struct fsyscall_gtod_data_t fsyscall_gtod_data = { +- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock), ++ .lock = __RAW_SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock), + }; + + struct itc_jitter_data_t itc_jitter_data; +@@ -462,7 +462,7 @@ void update_vsyscall(struct timespec *wa + { + unsigned long flags; + +- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); ++ raw_write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + + /* copy fsyscall clock data */ + fsyscall_gtod_data.clk_mask = c->mask; +@@ -485,6 +485,6 @@ void update_vsyscall(struct timespec *wa + fsyscall_gtod_data.monotonic_time.tv_sec++; + } + +- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); ++ raw_write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); + } + +Index: linux-3.2/arch/x86/include/asm/vgtod.h +=================================================================== +--- linux-3.2.orig/arch/x86/include/asm/vgtod.h ++++ linux-3.2/arch/x86/include/asm/vgtod.h +@@ -5,7 +5,7 @@ + #include + + struct vsyscall_gtod_data { +- seqlock_t lock; ++ raw_seqlock_t lock; + + /* open coded 'struct timespec' */ + time_t wall_time_sec; +Index: linux-3.2/arch/x86/kernel/vsyscall_64.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c ++++ linux-3.2/arch/x86/kernel/vsyscall_64.c +@@ -54,7 +54,7 @@ + DEFINE_VVAR(int, vgetcpu_mode); + DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = + { +- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), ++ .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), + }; + + static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; +@@ -82,10 +82,10 @@ void update_vsyscall_tz(void) + { + unsigned long flags; + +- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); ++ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + /* sys_tz has changed */ + vsyscall_gtod_data.sys_tz = sys_tz; +- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); ++ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + } + + void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, +@@ -93,7 +93,7 @@ void update_vsyscall(struct timespec *wa + { + unsigned long flags; + +- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); ++ raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + + /* copy vsyscall data */ + vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; +@@ -106,7 +106,7 @@ void update_vsyscall(struct timespec *wa + vsyscall_gtod_data.wall_to_monotonic = *wtm; + vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); + +- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); ++ raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); + } + + static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, +Index: linux-3.2/kernel/time/ntp.c +=================================================================== +--- linux-3.2.orig/kernel/time/ntp.c ++++ linux-3.2/kernel/time/ntp.c +@@ -358,7 +358,7 @@ static enum hrtimer_restart ntp_leap_sec + { + enum hrtimer_restart res = HRTIMER_NORESTART; + +- write_seqlock(&xtime_lock); ++ raw_write_seqlock(&xtime_lock); + + switch (time_state) { + case TIME_OK: +@@ -388,7 +388,7 @@ static enum hrtimer_restart ntp_leap_sec + break; + } + +- write_sequnlock(&xtime_lock); ++ raw_write_sequnlock(&xtime_lock); + + return res; + } +@@ -663,7 +663,7 @@ int do_adjtimex(struct timex *txc) + + getnstimeofday(&ts); + +- write_seqlock_irq(&xtime_lock); ++ raw_write_seqlock_irq(&xtime_lock); + + if (txc->modes & ADJ_ADJTIME) { + long save_adjust = time_adjust; +@@ -705,7 +705,7 @@ int do_adjtimex(struct timex *txc) + /* fill PPS status fields */ + pps_fill_timex(txc); + +- write_sequnlock_irq(&xtime_lock); ++ raw_write_sequnlock_irq(&xtime_lock); + + txc->time.tv_sec = ts.tv_sec; + txc->time.tv_usec = ts.tv_nsec; +@@ -903,7 +903,7 @@ void hardpps(const struct timespec *phas + + pts_norm = pps_normalize_ts(*phase_ts); + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + + /* clear the error bits, they will be set again if needed */ + time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); +@@ -916,7 +916,7 @@ void hardpps(const struct timespec *phas + * just start the frequency interval */ + if (unlikely(pps_fbase.tv_sec == 0)) { + pps_fbase = *raw_ts; +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + return; + } + +@@ -931,7 +931,7 @@ void hardpps(const struct timespec *phas + time_status |= STA_PPSJITTER; + /* restart the frequency calibration interval */ + pps_fbase = *raw_ts; +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + pr_err("hardpps: PPSJITTER: bad pulse\n"); + return; + } +@@ -948,7 +948,7 @@ void hardpps(const struct timespec *phas + + hardpps_update_phase(pts_norm.nsec); + +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + } + EXPORT_SYMBOL(hardpps); + +Index: linux-3.2/kernel/time/tick-common.c +=================================================================== +--- linux-3.2.orig/kernel/time/tick-common.c ++++ linux-3.2/kernel/time/tick-common.c +@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void) + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&xtime_lock); ++ raw_write_seqlock(&xtime_lock); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&xtime_lock); ++ raw_write_sequnlock(&xtime_lock); + } + + update_process_times(user_mode(get_irq_regs())); +Index: linux-3.2/kernel/time/tick-internal.h +=================================================================== +--- linux-3.2.orig/kernel/time/tick-internal.h ++++ linux-3.2/kernel/time/tick-internal.h +@@ -141,4 +141,4 @@ static inline int tick_device_is_functio + #endif + + extern void do_timer(unsigned long ticks); +-extern seqlock_t xtime_lock; ++extern raw_seqlock_t xtime_lock; +Index: linux-3.2/kernel/time/tick-sched.c +=================================================================== +--- linux-3.2.orig/kernel/time/tick-sched.c ++++ linux-3.2/kernel/time/tick-sched.c +@@ -56,7 +56,7 @@ static void tick_do_update_jiffies64(kti + return; + + /* Reevalute with xtime_lock held */ +- write_seqlock(&xtime_lock); ++ raw_write_seqlock(&xtime_lock); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(kti + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } +- write_sequnlock(&xtime_lock); ++ raw_write_sequnlock(&xtime_lock); + } + + /* +@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(vo + { + ktime_t period; + +- write_seqlock(&xtime_lock); ++ raw_write_seqlock(&xtime_lock); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&xtime_lock); ++ raw_write_sequnlock(&xtime_lock); + return period; + } + +@@ -332,13 +332,7 @@ void tick_nohz_stop_sched_tick(int inidl + goto end; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10) { +- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + goto end; + } + +@@ -798,6 +792,7 @@ void tick_setup_sched_timer(void) + * Emulate tick processing via per-CPU hrtimers: + */ + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ ts->sched_timer.irqsafe = 1; + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per cpu) */ +Index: linux-3.2/kernel/time/timekeeping.c +=================================================================== +--- linux-3.2.orig/kernel/time/timekeeping.c ++++ linux-3.2/kernel/time/timekeeping.c +@@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw + * This read-write spinlock protects us from races in SMP while + * playing with xtime. + */ +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); +- ++__cacheline_aligned_in_smp DEFINE_RAW_SEQLOCK(xtime_lock); + + /* + * The current time +@@ -361,7 +360,7 @@ int do_settimeofday(const struct timespe + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + + timekeeping_forward_now(); + +@@ -377,7 +376,7 @@ int do_settimeofday(const struct timespe + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -401,7 +400,7 @@ int timekeeping_inject_offset(struct tim + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + + timekeeping_forward_now(); + +@@ -414,7 +413,7 @@ int timekeeping_inject_offset(struct tim + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -568,7 +567,7 @@ void __init timekeeping_init(void) + read_persistent_clock(&now); + read_boot_clock(&boot); + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + + ntp_init(); + +@@ -589,7 +588,7 @@ void __init timekeeping_init(void) + -boot.tv_sec, -boot.tv_nsec); + total_sleep_time.tv_sec = 0; + total_sleep_time.tv_nsec = 0; +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + } + + /* time in seconds when suspend began */ +@@ -636,7 +635,7 @@ void timekeeping_inject_sleeptime(struct + if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) + return; + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_forward_now(); + + __timekeeping_inject_sleeptime(delta); +@@ -646,7 +645,7 @@ void timekeeping_inject_sleeptime(struct + update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, + timekeeper.mult); + +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -669,7 +668,7 @@ static void timekeeping_resume(void) + + clocksource_resume(); + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + + if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { + ts = timespec_sub(ts, timekeeping_suspend_time); +@@ -679,7 +678,7 @@ static void timekeeping_resume(void) + timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); + timekeeper.ntp_error = 0; + timekeeping_suspended = 0; +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + + touch_softlockup_watchdog(); + +@@ -697,7 +696,7 @@ static int timekeeping_suspend(void) + + read_persistent_clock(&timekeeping_suspend_time); + +- write_seqlock_irqsave(&xtime_lock, flags); ++ raw_write_seqlock_irqsave(&xtime_lock, flags); + timekeeping_forward_now(); + timekeeping_suspended = 1; + +@@ -720,7 +719,7 @@ static int timekeeping_suspend(void) + timekeeping_suspend_time = + timespec_add(timekeeping_suspend_time, delta_delta); + } +- write_sequnlock_irqrestore(&xtime_lock, flags); ++ raw_write_sequnlock_irqrestore(&xtime_lock, flags); + + clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); + clocksource_suspend(); +@@ -1149,7 +1148,7 @@ ktime_t ktime_get_monotonic_offset(void) + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&xtime_lock); ++ raw_write_seqlock(&xtime_lock); + do_timer(ticks); +- write_sequnlock(&xtime_lock); ++ raw_write_sequnlock(&xtime_lock); + } +Index: linux-3.2/Documentation/trace/histograms.txt +=================================================================== +--- /dev/null ++++ linux-3.2/Documentation/trace/histograms.txt +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms ++ ++ ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. ++ ++ ++* Purpose of latency histograms ++ ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 ++ ++ ++* Wakeup latency of a selected process ++ ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid ++ ++PIDs are not considered, if this variable is set to 0. ++ ++ ++* Details of the process with the highest wakeup latency so far ++ ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. ++ ++In addition, other relevant system data at the time when the ++latency occurred are given. ++ ++The format of the data is (all in one line): ++ () \ ++<- ++ ++The value of is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as . ++ ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. ++ ++Finally, the timestamp of the time when the latency occurred ++in . after the most recent system boot ++is provided. ++ ++These data are also reset when the wakeup histogram is reset. +Index: linux-3.2/include/trace/events/hist.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/trace/events/hist.h +@@ -0,0 +1,69 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist ++ ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H ++ ++#include "latency_hist.h" ++#include ++ ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a,b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, ++ ++ TP_PROTO(int reason, int starthist), ++ ++ TP_ARGS(reason, starthist), ++ ++ TP_STRUCT__entry( ++ __field(int, reason ) ++ __field(int, starthist ) ++ ), ++ ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), ++ ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif ++ ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a,b,c,d) ++#else ++TRACE_EVENT(hrtimer_interrupt, ++ ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task), ++ ++ TP_ARGS(cpu, offset, curr, task), ++ ++ TP_STRUCT__entry( ++ __field(int, cpu ) ++ __field(long long, offset ) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio ) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio ) ++ ), ++ ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "", task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), ++ ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif ++ ++#endif /* _TRACE_HIST_H */ ++ ++/* This part must be outside protection */ ++#include +Index: linux-3.2/include/trace/events/latency_hist.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/trace/events/latency_hist.h +@@ -0,0 +1,30 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H ++ ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; ++ ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; ++ ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return(actions[action]); ++ return("unknown"); ++} ++ ++#endif /* _LATENCY_HIST_H */ ++ +Index: linux-3.2/kernel/hrtimer.c +=================================================================== +--- linux-3.2.orig/kernel/hrtimer.c ++++ linux-3.2/kernel/hrtimer.c +@@ -49,6 +49,7 @@ + #include + + #include ++#include + + /* + * The timer bases: +@@ -588,8 +589,7 @@ static int hrtimer_reprogram(struct hrti + * When the callback is running, we do not reprogram the clock event + * device. The timer callback is either running on a different CPU or + * the callback is executed in the hrtimer_interrupt context. The +- * reprogramming is handled either by the softirq, which called the +- * callback or at the end of the hrtimer_interrupt. ++ * reprogramming is handled at the end of the hrtimer_interrupt. + */ + if (hrtimer_callback_running(timer)) + return 0; +@@ -624,6 +624,9 @@ static int hrtimer_reprogram(struct hrti + return res; + } + ++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); ++static int hrtimer_rt_defer(struct hrtimer *timer); ++ + /* + * Initialize the high resolution related parts of cpu_base + */ +@@ -644,14 +647,23 @@ static inline int hrtimer_enqueue_reprog + int wakeup) + { + if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { +- if (wakeup) { +- raw_spin_unlock(&base->cpu_base->lock); +- raise_softirq_irqoff(HRTIMER_SOFTIRQ); +- raw_spin_lock(&base->cpu_base->lock); +- } else +- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ if (!wakeup) ++ return -ETIME; + +- return 1; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* ++ * Move softirq based timers away from the rbtree in ++ * case it expired already. Otherwise we would have a ++ * stale base->first entry until the softirq runs. ++ */ ++ if (!hrtimer_rt_defer(timer)) ++ return -ETIME; ++#endif ++ raw_spin_unlock(&base->cpu_base->lock); ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ raw_spin_lock(&base->cpu_base->lock); ++ ++ return 0; + } + + return 0; +@@ -732,6 +744,11 @@ static inline int hrtimer_enqueue_reprog + } + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } + static inline void retrigger_next_event(void *arg) { } ++static inline int hrtimer_reprogram(struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ return 0; ++} + + #endif /* CONFIG_HIGH_RES_TIMERS */ + +@@ -846,6 +863,32 @@ u64 hrtimer_forward(struct hrtimer *time + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define wake_up_timer_waiters(b) wake_up(&(b)->wait) ++ ++/** ++ * hrtimer_wait_for_timer - Wait for a running timer ++ * ++ * @timer: timer to wait for ++ * ++ * The function waits in case the timers callback function is ++ * currently executed on the waitqueue of the timer base. The ++ * waitqueue is woken up after the timer callback function has ++ * finished execution. ++ */ ++void hrtimer_wait_for_timer(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = timer->base; ++ ++ if (base && base->cpu_base && !timer->irqsafe) ++ wait_event(base->cpu_base->wait, ++ !(timer->state & HRTIMER_STATE_CALLBACK)); ++} ++ ++#else ++# define wake_up_timer_waiters(b) do { } while (0) ++#endif ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -888,6 +931,11 @@ static void __remove_hrtimer(struct hrti + if (!(timer->state & HRTIMER_STATE_ENQUEUED)) + goto out; + ++ if (unlikely(!list_empty(&timer->cb_entry))) { ++ list_del_init(&timer->cb_entry); ++ goto out; ++ } ++ + if (&timer->node == timerqueue_getnext(&base->active)) { + #ifdef CONFIG_HIGH_RES_TIMERS + /* Reprogram the clock event device. if enabled */ +@@ -983,8 +1031,20 @@ int __hrtimer_start_range_ns(struct hrti + * + * XXX send_remote_softirq() ? + */ +- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) +- hrtimer_enqueue_reprogram(timer, new_base, wakeup); ++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { ++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); ++ if (ret) { ++ /* ++ * In case we failed to reprogram the timer (mostly ++ * because out current timer is already elapsed), ++ * remove it again and report a failure. This avoids ++ * stale base->first entries. ++ */ ++ debug_deactivate(timer); ++ __remove_hrtimer(timer, new_base, ++ timer->state & HRTIMER_STATE_CALLBACK, 0); ++ } ++ } + + unlock_hrtimer_base(timer, &flags); + +@@ -1070,7 +1130,7 @@ int hrtimer_cancel(struct hrtimer *timer + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_wait_for_timer(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1149,6 +1209,7 @@ static void __hrtimer_init(struct hrtime + + base = hrtimer_clockid_to_base(clock_id); + timer->base = &cpu_base->clock_base[base]; ++ INIT_LIST_HEAD(&timer->cb_entry); + timerqueue_init(&timer->node); + + #ifdef CONFIG_TIMER_STATS +@@ -1232,6 +1293,122 @@ static void __run_hrtimer(struct hrtimer + timer->state &= ~HRTIMER_STATE_CALLBACK; + } + ++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ /* ++ * Note, we clear the callback flag before we requeue the ++ * timer otherwise we trigger the callback_running() check ++ * in hrtimer_reprogram(). ++ */ ++ timer->state &= ~HRTIMER_STATE_CALLBACK; ++ ++ if (restart != HRTIMER_NORESTART) { ++ BUG_ON(hrtimer_active(timer)); ++ /* ++ * Enqueue the timer, if it's the leftmost timer then ++ * we need to reprogram it. ++ */ ++ if (!enqueue_hrtimer(timer, base)) ++ return; ++ ++#ifndef CONFIG_HIGH_RES_TIMERS ++ } ++#else ++ if (base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ ++ } else if (hrtimer_active(timer)) { ++ /* ++ * If the timer was rearmed on another CPU, reprogram ++ * the event device. ++ */ ++ if (&timer->node == base->active.next && ++ base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ } ++ return; ++ ++requeue: ++ /* ++ * Timer is expired. Thus move it from tree to pending list ++ * again. ++ */ ++ __remove_hrtimer(timer, base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &base->expired); ++#endif ++} ++ ++/* ++ * The changes in mainline which removed the callback modes from ++ * hrtimer are not yet working with -rt. The non wakeup_process() ++ * based callbacks which involve sleeping locks need to be treated ++ * seperately. ++ */ ++static void hrtimer_rt_run_pending(void) ++{ ++ enum hrtimer_restart (*fn)(struct hrtimer *); ++ struct hrtimer_cpu_base *cpu_base; ++ struct hrtimer_clock_base *base; ++ struct hrtimer *timer; ++ int index, restart; ++ ++ local_irq_disable(); ++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); ++ ++ raw_spin_lock(&cpu_base->lock); ++ ++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { ++ base = &cpu_base->clock_base[index]; ++ ++ while (!list_empty(&base->expired)) { ++ timer = list_first_entry(&base->expired, ++ struct hrtimer, cb_entry); ++ ++ /* ++ * Same as the above __run_hrtimer function ++ * just we run with interrupts enabled. ++ */ ++ debug_hrtimer_deactivate(timer); ++ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); ++ timer_stats_account_hrtimer(timer); ++ fn = timer->function; ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ restart = fn(timer); ++ raw_spin_lock_irq(&cpu_base->lock); ++ ++ hrtimer_rt_reprogram(restart, timer, base); ++ } ++ } ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ ++ wake_up_timer_waiters(cpu_base); ++} ++ ++static int hrtimer_rt_defer(struct hrtimer *timer) ++{ ++ if (timer->irqsafe) ++ return 0; ++ ++ __remove_hrtimer(timer, timer->base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &timer->base->expired); ++ return 1; ++} ++ ++#else ++ ++static inline void hrtimer_rt_run_pending(void) { } ++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } ++ ++#endif ++ + #ifdef CONFIG_HIGH_RES_TIMERS + + /* +@@ -1242,7 +1419,7 @@ void hrtimer_interrupt(struct clock_even + { + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + ktime_t expires_next, now, entry_time, delta; +- int i, retries = 0; ++ int i, retries = 0, raise = 0; + + BUG_ON(!cpu_base->hres_active); + cpu_base->nr_events++; +@@ -1278,6 +1455,14 @@ retry: + + timer = container_of(node, struct hrtimer, node); + ++ trace_hrtimer_interrupt(raw_smp_processor_id(), ++ ktime_to_ns(ktime_sub( ++ hrtimer_get_expires(timer), basenow)), ++ current, ++ timer->function == hrtimer_wakeup ? ++ container_of(timer, struct hrtimer_sleeper, ++ timer)->task : NULL); ++ + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the +@@ -1301,7 +1486,10 @@ retry: + break; + } + +- __run_hrtimer(timer, &basenow); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &basenow); ++ else ++ raise = 1; + } + } + +@@ -1316,6 +1504,10 @@ retry: + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; ++ ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ + return; + } + +@@ -1391,17 +1583,17 @@ void hrtimer_peek_ahead_timers(void) + local_irq_restore(flags); + } + +-static void run_hrtimer_softirq(struct softirq_action *h) +-{ +- hrtimer_peek_ahead_timers(); +-} +- + #else /* CONFIG_HIGH_RES_TIMERS */ + + static inline void __hrtimer_peek_ahead_timers(void) { } + + #endif /* !CONFIG_HIGH_RES_TIMERS */ + ++static void run_hrtimer_softirq(struct softirq_action *h) ++{ ++ hrtimer_rt_run_pending(); ++} ++ + /* + * Called from timer softirq every jiffy, expire hrtimers: + * +@@ -1434,7 +1626,7 @@ void hrtimer_run_queues(void) + struct timerqueue_node *node; + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base; +- int index, gettime = 1; ++ int index, gettime = 1, raise = 0; + + if (hrtimer_hres_active()) + return; +@@ -1459,10 +1651,16 @@ void hrtimer_run_queues(void) + hrtimer_get_expires_tv64(timer)) + break; + +- __run_hrtimer(timer, &base->softirq_time); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &base->softirq_time); ++ else ++ raise = 1; + } + raw_spin_unlock(&cpu_base->lock); + } ++ ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + /* +@@ -1484,6 +1682,7 @@ static enum hrtimer_restart hrtimer_wake + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) + { + sl->timer.function = hrtimer_wakeup; ++ sl->timer.irqsafe = 1; + sl->task = task; + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); +@@ -1622,9 +1821,13 @@ static void __cpuinit init_hrtimers_cpu( + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + cpu_base->clock_base[i].cpu_base = cpu_base; + timerqueue_init_head(&cpu_base->clock_base[i].active); ++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); + } + + hrtimer_init_hres(cpu_base); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ init_waitqueue_head(&cpu_base->wait); ++#endif + } + + #ifdef CONFIG_HOTPLUG_CPU +@@ -1737,9 +1940,7 @@ void __init hrtimers_init(void) + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&hrtimers_nb); +-#ifdef CONFIG_HIGH_RES_TIMERS + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); +-#endif + } + + /** +Index: linux-3.2/kernel/trace/Kconfig +=================================================================== +--- linux-3.2.orig/kernel/trace/Kconfig ++++ linux-3.2/kernel/trace/Kconfig +@@ -192,6 +192,24 @@ config IRQSOFF_TRACER + enabled. This option and the preempt-off timing option can be + used together or separately.) + ++config INTERRUPT_OFF_HIST ++ bool "Interrupts-off Latency Histogram" ++ depends on IRQSOFF_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with interrupts disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If PREEMPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/irqsoff ++ + config PREEMPT_TRACER + bool "Preemption-off Latency Tracer" + default n +@@ -214,6 +232,24 @@ config PREEMPT_TRACER + enabled. This option and the irqs-off timing option can be + used together or separately.) + ++config PREEMPT_OFF_HIST ++ bool "Preemption-off Latency Histogram" ++ depends on PREEMPT_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with preemption disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/preemptoff ++ + config SCHED_TRACER + bool "Scheduling Latency Tracer" + select GENERIC_TRACER +@@ -223,6 +259,74 @@ config SCHED_TRACER + This tracer tracks the latency of the highest priority task + to be scheduled in, starting from the point it has woken up. + ++config WAKEUP_LATENCY_HIST ++ bool "Scheduling Latency Histogram" ++ depends on SCHED_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the scheduling latency of the highest priority task. ++ The histograms are disabled by default. To enable them, write a ++ non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup ++ ++ Two different algorithms are used, one to determine the latency of ++ processes that exclusively use the highest priority of the system and ++ another one to determine the latency of processes that share the ++ highest system priority with other processes. The former is used to ++ improve hardware and system software, the latter to optimize the ++ priority design of a given system. The histogram data will be ++ located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup ++ ++ and ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ ++config MISSED_TIMER_OFFSETS_HIST ++ depends on HIGH_RES_TIMERS ++ select GENERIC_TRACER ++ bool "Missed Timer Offsets Histogram" ++ help ++ Generate a histogram of missed timer offsets in microseconds. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets ++ ++ The histogram data will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ + config ENABLE_DEFAULT_TRACERS + bool "Trace process context switches and events" + depends on !GENERIC_TRACER +Index: linux-3.2/kernel/trace/Makefile +=================================================================== +--- linux-3.2.orig/kernel/trace/Makefile ++++ linux-3.2/kernel/trace/Makefile +@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f + obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o + obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o + obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o ++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o ++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o + obj-$(CONFIG_NOP_TRACER) += trace_nop.o + obj-$(CONFIG_STACK_TRACER) += trace_stack.o + obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +Index: linux-3.2/kernel/trace/latency_hist.c +=================================================================== +--- /dev/null ++++ linux-3.2/kernel/trace/latency_hist.c +@@ -0,0 +1,1170 @@ ++/* ++ * kernel/trace/latency_hist.c ++ * ++ * Add support for histograms of preemption-off latency and ++ * interrupt-off latency and wakeup latency, it depends on ++ * Real-Time Preemption Support. ++ * ++ * Copyright (C) 2005 MontaVista Software, Inc. ++ * Yi Yang ++ * ++ * Converted to work with the new latency tracer. ++ * Copyright (C) 2008 Red Hat, Inc. ++ * Steven Rostedt ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "trace.h" ++#include ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++enum { ++ IRQSOFF_LATENCY = 0, ++ PREEMPTOFF_LATENCY, ++ PREEMPTIRQSOFF_LATENCY, ++ WAKEUP_LATENCY, ++ WAKEUP_LATENCY_SHAREDPRIO, ++ MISSED_TIMER_OFFSETS, ++ TIMERANDWAKEUP_LATENCY, ++ MAX_LATENCY_TYPE, ++}; ++ ++#define MAX_ENTRY_NUM 10240 ++ ++struct hist_data { ++ atomic_t hist_mode; /* 0 log, 1 don't log */ ++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ ++ unsigned long min_lat; ++ unsigned long max_lat; ++ unsigned long long below_hist_bound_samples; ++ unsigned long long above_hist_bound_samples; ++ unsigned long long accumulate_lat; ++ unsigned long long total_samples; ++ unsigned long long hist_array[MAX_ENTRY_NUM]; ++}; ++ ++struct enable_data { ++ int latency_type; ++ int enabled; ++}; ++ ++static char *latency_hist_dir_root = "latency_hist"; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); ++static char *irqsoff_hist_dir = "irqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); ++static DEFINE_PER_CPU(int, hist_irqsoff_counting); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); ++static char *preemptoff_hist_dir = "preemptoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); ++static DEFINE_PER_CPU(int, hist_preemptoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); ++static char *preemptirqsoff_hist_dir = "preemptirqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); ++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); ++static struct enable_data preemptirqsoff_enabled_data = { ++ .latency_type = PREEMPTIRQSOFF_LATENCY, ++ .enabled = 0, ++}; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++struct maxlatproc_data { ++ char comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ int pid; ++ int current_pid; ++ int prio; ++ int current_prio; ++ long latency; ++ long timeroffset; ++ cycle_t timestamp; ++}; ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); ++static char *wakeup_latency_hist_dir = "wakeup"; ++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success); ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next); ++static notrace void probe_sched_migrate_task(void *, ++ struct task_struct *task, int cpu); ++static struct enable_data wakeup_latency_enabled_data = { ++ .latency_type = WAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); ++static DEFINE_PER_CPU(struct task_struct *, wakeup_task); ++static DEFINE_PER_CPU(int, wakeup_sharedprio); ++static unsigned long wakeup_pid; ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); ++static char *missed_timer_offsets_dir = "missed_timer_offsets"; ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long offset, struct task_struct *curr, struct task_struct *task); ++static struct enable_data missed_timer_offsets_enabled_data = { ++ .latency_type = MISSED_TIMER_OFFSETS, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); ++static unsigned long missed_timer_offsets_pid; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); ++static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; ++static struct enable_data timerandwakeup_enabled_data = { ++ .latency_type = TIMERANDWAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); ++#endif ++ ++void notrace latency_hist(int latency_type, int cpu, unsigned long latency, ++ unsigned long timeroffset, cycle_t stop, ++ struct task_struct *p) ++{ ++ struct hist_data *my_hist; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 || ++ latency_type >= MAX_LATENCY_TYPE) ++ return; ++ ++ switch (latency_type) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ my_hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ my_hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ my_hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ my_hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ my_hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ ++ default: ++ return; ++ } ++ ++ latency += my_hist->offset; ++ ++ if (atomic_read(&my_hist->hist_mode) == 0) ++ return; ++ ++ if (latency < 0 || latency >= MAX_ENTRY_NUM) { ++ if (latency < 0) ++ my_hist->below_hist_bound_samples++; ++ else ++ my_hist->above_hist_bound_samples++; ++ } else ++ my_hist->hist_array[latency]++; ++ ++ if (unlikely(latency > my_hist->max_lat || ++ my_hist->min_lat == ULONG_MAX)) { ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) { ++ strncpy(mp->comm, p->comm, sizeof(mp->comm)); ++ strncpy(mp->current_comm, current->comm, ++ sizeof(mp->current_comm)); ++ mp->pid = task_pid_nr(p); ++ mp->current_pid = task_pid_nr(current); ++ mp->prio = p->prio; ++ mp->current_prio = current->prio; ++ mp->latency = latency; ++ mp->timeroffset = timeroffset; ++ mp->timestamp = stop; ++ } ++#endif ++ my_hist->max_lat = latency; ++ } ++ if (unlikely(latency < my_hist->min_lat)) ++ my_hist->min_lat = latency; ++ my_hist->total_samples++; ++ my_hist->accumulate_lat += latency; ++} ++ ++static void *l_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t *index_ptr = NULL; ++ loff_t index = *pos; ++ struct hist_data *my_hist = m->private; ++ ++ if (index == 0) { ++ char minstr[32], avgstr[32], maxstr[32]; ++ ++ atomic_dec(&my_hist->hist_mode); ++ ++ if (likely(my_hist->total_samples)) { ++ unsigned long avg = (unsigned long) ++ div64_u64(my_hist->accumulate_lat, ++ my_hist->total_samples); ++ snprintf(minstr, sizeof(minstr), "%ld", ++ (long) my_hist->min_lat - my_hist->offset); ++ snprintf(avgstr, sizeof(avgstr), "%ld", ++ (long) avg - my_hist->offset); ++ snprintf(maxstr, sizeof(maxstr), "%ld", ++ (long) my_hist->max_lat - my_hist->offset); ++ } else { ++ strcpy(minstr, ""); ++ strcpy(avgstr, minstr); ++ strcpy(maxstr, minstr); ++ } ++ ++ seq_printf(m, "#Minimum latency: %s microseconds\n" ++ "#Average latency: %s microseconds\n" ++ "#Maximum latency: %s microseconds\n" ++ "#Total samples: %llu\n" ++ "#There are %llu samples lower than %ld" ++ " microseconds.\n" ++ "#There are %llu samples greater or equal" ++ " than %ld microseconds.\n" ++ "#usecs\t%16s\n", ++ minstr, avgstr, maxstr, ++ my_hist->total_samples, ++ my_hist->below_hist_bound_samples, ++ -my_hist->offset, ++ my_hist->above_hist_bound_samples, ++ MAX_ENTRY_NUM - my_hist->offset, ++ "samples"); ++ } ++ if (index < MAX_ENTRY_NUM) { ++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); ++ if (index_ptr) ++ *index_ptr = index; ++ } ++ ++ return index_ptr; ++} ++ ++static void *l_next(struct seq_file *m, void *p, loff_t *pos) ++{ ++ loff_t *index_ptr = p; ++ struct hist_data *my_hist = m->private; ++ ++ if (++*pos >= MAX_ENTRY_NUM) { ++ atomic_inc(&my_hist->hist_mode); ++ return NULL; ++ } ++ *index_ptr = *pos; ++ return index_ptr; ++} ++ ++static void l_stop(struct seq_file *m, void *p) ++{ ++ kfree(p); ++} ++ ++static int l_show(struct seq_file *m, void *p) ++{ ++ int index = *(loff_t *) p; ++ struct hist_data *my_hist = m->private; ++ ++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, ++ my_hist->hist_array[index]); ++ return 0; ++} ++ ++static struct seq_operations latency_hist_seq_op = { ++ .start = l_start, ++ .next = l_next, ++ .stop = l_stop, ++ .show = l_show ++}; ++ ++static int latency_hist_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ ret = seq_open(file, &latency_hist_seq_op); ++ if (!ret) { ++ struct seq_file *seq = file->private_data; ++ seq->private = inode->i_private; ++ } ++ return ret; ++} ++ ++static struct file_operations latency_hist_fops = { ++ .open = latency_hist_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static void clear_maxlatprocdata(struct maxlatproc_data *mp) ++{ ++ mp->comm[0] = mp->current_comm[0] = '\0'; ++ mp->prio = mp->current_prio = mp->pid = mp->current_pid = ++ mp->latency = mp->timeroffset = -1; ++ mp->timestamp = 0; ++} ++#endif ++ ++static void hist_reset(struct hist_data *hist) ++{ ++ atomic_dec(&hist->hist_mode); ++ ++ memset(hist->hist_array, 0, sizeof(hist->hist_array)); ++ hist->below_hist_bound_samples = 0ULL; ++ hist->above_hist_bound_samples = 0ULL; ++ hist->min_lat = ULONG_MAX; ++ hist->max_lat = 0UL; ++ hist->total_samples = 0ULL; ++ hist->accumulate_lat = 0ULL; ++ ++ atomic_inc(&hist->hist_mode); ++} ++ ++static ssize_t ++latency_hist_reset(struct file *file, const char __user *a, ++ size_t size, loff_t *off) ++{ ++ int cpu; ++ struct hist_data *hist = NULL; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ off_t latency_type = (off_t) file->private_data; ++ ++ for_each_online_cpu(cpu) { ++ ++ switch (latency_type) { ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ } ++ ++ hist_reset(hist); ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) ++ clear_maxlatprocdata(mp); ++#endif ++ } ++ ++ return size; ++} ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ int r; ++ unsigned long *this_pid = file->private_data; ++ ++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t do_pid(struct file *file, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ unsigned long pid; ++ unsigned long *this_pid = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = '\0'; ++ ++ if (strict_strtoul(buf, 10, &pid)) ++ return(-EINVAL); ++ ++ *this_pid = pid; ++ ++ return cnt; ++} ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ int r; ++ struct maxlatproc_data *mp = file->private_data; ++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); ++ unsigned long long t; ++ unsigned long usecs, secs; ++ char *buf; ++ ++ if (mp->pid == -1 || mp->current_pid == -1) { ++ buf = "(none)\n"; ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ++ strlen(buf)); ++ } ++ ++ buf = kmalloc(strmaxlen, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ t = ns2usecs(mp->timestamp); ++ usecs = do_div(t, USEC_PER_SEC); ++ secs = (unsigned long) t; ++ r = snprintf(buf, strmaxlen, ++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, ++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, ++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, ++ secs, usecs); ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ kfree(buf); ++ return r; ++} ++#endif ++ ++static ssize_t ++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ struct enable_data *ed = file->private_data; ++ int r; ++ ++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t ++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ long enable; ++ struct enable_data *ed = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (strict_strtol(buf, 10, &enable)) ++ return(-EINVAL); ++ ++ if ((enable && ed->enabled) || (!enable && !ed->enabled)) ++ return cnt; ++ ++ if (enable) { ++ int ret; ++ ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ ret = register_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_preemptirqsoff_hist " ++ "to trace_preemptirqsoff_hist\n"); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ ret = register_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup\n"); ++ return ret; ++ } ++ ret = register_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup_new\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_stop " ++ "to trace_sched_switch\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_sched_migrate_task " ++ "to trace_sched_migrate_task\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ ret = register_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_hrtimer_interrupt " ++ "to trace_hrtimer_interrupt\n"); ++ return ret; ++ } ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ if (!wakeup_latency_enabled_data.enabled || ++ !missed_timer_offsets_enabled_data.enabled) ++ return -EINVAL; ++ break; ++#endif ++ default: ++ break; ++ } ++ } else { ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ for_each_online_cpu(cpu) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ per_cpu(hist_irqsoff_counting, ++ cpu) = 0; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ per_cpu(hist_preemptoff_counting, ++ cpu) = 0; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ per_cpu(hist_preemptirqsoff_counting, ++ cpu) = 0; ++#endif ++ } ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ unregister_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(wakeup_task, cpu) = NULL; ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } ++ } ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ unregister_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++ default: ++ break; ++ } ++ } ++ ed->enabled = enable; ++ return cnt; ++} ++ ++static const struct file_operations latency_hist_reset_fops = { ++ .open = tracing_open_generic, ++ .write = latency_hist_reset, ++}; ++ ++static const struct file_operations enable_fops = { ++ .open = tracing_open_generic, ++ .read = show_enable, ++ .write = do_enable, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static const struct file_operations pid_fops = { ++ .open = tracing_open_generic, ++ .read = show_pid, ++ .write = do_pid, ++}; ++ ++static const struct file_operations maxlatproc_fops = { ++ .open = tracing_open_generic, ++ .read = show_maxlatproc, ++}; ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, ++ int starthist) ++{ ++ int cpu = raw_smp_processor_id(); ++ int time_set = 0; ++ ++ if (starthist) { ++ cycle_t uninitialized_var(start); ++ ++ if (!preempt_count() && !irqs_disabled()) ++ return; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_OFF || reason == TRACE_START) && ++ !per_cpu(hist_irqsoff_counting, cpu)) { ++ per_cpu(hist_irqsoff_counting, cpu) = 1; ++ start = ftrace_now(cpu); ++ time_set++; ++ per_cpu(hist_irqsoff_start, cpu) = start; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_OFF || reason == TRACE_START) && ++ !per_cpu(hist_preemptoff_counting, cpu)) { ++ per_cpu(hist_preemptoff_counting, cpu) = 1; ++ if (!(time_set++)) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptoff_start, cpu) = start; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if (per_cpu(hist_irqsoff_counting, cpu) && ++ per_cpu(hist_preemptoff_counting, cpu) && ++ !per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; ++ if (!time_set) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptirqsoff_start, cpu) = start; ++ } ++#endif ++ } else { ++ cycle_t uninitialized_var(stop); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_ON || reason == TRACE_STOP) && ++ per_cpu(hist_irqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_irqsoff_start, cpu); ++ ++ stop = ftrace_now(cpu); ++ time_set++; ++ if (start && stop >= start) { ++ unsigned long latency = ++ nsecs_to_usecs(stop - start); ++ ++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, ++ stop, NULL); ++ } ++ per_cpu(hist_irqsoff_counting, cpu) = 0; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && ++ per_cpu(hist_preemptoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptoff_start, cpu); ++ ++ if (!(time_set++)) ++ stop = ftrace_now(cpu); ++ if (start && stop >= start) { ++ unsigned long latency = ++ nsecs_to_usecs(stop - start); ++ ++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, ++ 0, stop, NULL); ++ } ++ per_cpu(hist_preemptoff_counting, cpu) = 0; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if ((!per_cpu(hist_irqsoff_counting, cpu) || ++ !per_cpu(hist_preemptoff_counting, cpu)) && ++ per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); ++ ++ if (!time_set) ++ stop = ftrace_now(cpu); ++ if (start && stop >= start) { ++ unsigned long latency = ++ nsecs_to_usecs(stop - start); ++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, ++ latency, 0, stop, NULL); ++ } ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; ++ } ++#endif ++ } ++} ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_RAW_SPINLOCK(wakeup_lock); ++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, ++ int cpu) ++{ ++ int old_cpu = task_cpu(task); ++ ++ if (cpu != old_cpu) { ++ unsigned long flags; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); ++ if (task == cpu_wakeup_task) { ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, old_cpu) = NULL; ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; ++ get_task_struct(cpu_wakeup_task); ++ } ++ ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++ } ++} ++ ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success) ++{ ++ unsigned long flags; ++ struct task_struct *curr = current; ++ int cpu = task_cpu(p); ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (wakeup_pid) { ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ if (likely(wakeup_pid != task_pid_nr(p))) ++ goto out; ++ } else { ++ if (likely(!rt_task(p)) || ++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || ++ p->prio > curr->prio) ++ goto out; ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ } ++ ++ if (cpu_wakeup_task) ++ put_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; ++ get_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task->preempt_timestamp_hist = ++ ftrace_now(raw_smp_processor_id()); ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++ ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next) ++{ ++ unsigned long flags; ++ int cpu = task_cpu(next); ++ unsigned long latency; ++ cycle_t stop; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (cpu_wakeup_task == NULL) ++ goto out; ++ ++ /* Already running? */ ++ if (unlikely(current == cpu_wakeup_task)) ++ goto out_reset; ++ ++ if (next != cpu_wakeup_task) { ++ if (next->prio < cpu_wakeup_task->prio) ++ goto out_reset; ++ ++ if (next->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ goto out; ++ } ++ ++ /* ++ * The task we are waiting for is about to be switched to. ++ * Calculate latency and store it in histogram. ++ */ ++ stop = ftrace_now(raw_smp_processor_id()); ++ ++ latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist); ++ ++ if (per_cpu(wakeup_sharedprio, cpu)) { ++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, ++ next); ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } else { ++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ if (timerandwakeup_enabled_data.enabled) { ++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, ++ next->timer_offset + latency, next->timer_offset, ++ stop, next); ++ } ++#endif ++ } ++ ++out_reset: ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ next->timer_offset = 0; ++#endif ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, cpu) = NULL; ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long latency_ns, struct task_struct *curr, struct task_struct *task) ++{ ++ if (latency_ns <= 0 && task != NULL && rt_task(task) && ++ (task->prio < curr->prio || ++ (task->prio == curr->prio && ++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { ++ unsigned long latency; ++ cycle_t now; ++ ++ if (missed_timer_offsets_pid) { ++ if (likely(missed_timer_offsets_pid != ++ task_pid_nr(task))) ++ return; ++ } ++ ++ now = ftrace_now(cpu); ++ latency = (unsigned long) div_s64(-latency_ns, 1000); ++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, ++ task); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ task->timer_offset = latency; ++#endif ++ } ++} ++#endif ++ ++static __init int latency_hist_init(void) ++{ ++ struct dentry *latency_hist_root = NULL; ++ struct dentry *dentry; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ struct dentry *dentry_sharedprio; ++#endif ++ struct dentry *entry; ++ struct dentry *enable_root; ++ int i = 0; ++ struct hist_data *my_hist; ++ char name[64]; ++ char *cpufmt = "CPU%d"; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ char *cpufmt_maxlatproc = "max_latency-CPU%d"; ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ dentry = tracing_init_dentry(); ++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); ++ enable_root = debugfs_create_dir("enable", latency_hist_root); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(irqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(irqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ dentry = debugfs_create_dir(preemptoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptirqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ entry = debugfs_create_file("preemptirqsoff", 0644, ++ enable_root, (void *)&preemptirqsoff_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ dentry = debugfs_create_dir(wakeup_latency_hist_dir, ++ latency_hist_root); ++ dentry_sharedprio = debugfs_create_dir( ++ wakeup_latency_hist_dir_sharedprio, dentry); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(wakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, ++ &per_cpu(wakeup_latency_hist_sharedprio, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ ++ mp = &per_cpu(wakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&wakeup_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, ++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); ++ entry = debugfs_create_file("wakeup", 0644, ++ enable_root, (void *)&wakeup_latency_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ dentry = debugfs_create_dir(missed_timer_offsets_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); ++ my_hist = &per_cpu(missed_timer_offsets, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&missed_timer_offsets_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); ++ entry = debugfs_create_file("missed_timer_offsets", 0644, ++ enable_root, (void *)&missed_timer_offsets_enabled_data, ++ &enable_fops); ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(timerandwakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(timerandwakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = 0xFFFFFFFFUL; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(timerandwakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("timerandwakeup", 0644, ++ enable_root, (void *)&timerandwakeup_enabled_data, ++ &enable_fops); ++#endif ++ return 0; ++} ++ ++__initcall(latency_hist_init); +Index: linux-3.2/kernel/trace/trace_irqsoff.c +=================================================================== +--- linux-3.2.orig/kernel/trace/trace_irqsoff.c ++++ linux-3.2/kernel/trace/trace_irqsoff.c +@@ -17,6 +17,7 @@ + #include + + #include "trace.h" ++#include + + static struct trace_array *irqsoff_trace __read_mostly; + static int tracer_enabled __read_mostly; +@@ -426,11 +427,13 @@ void start_critical_timings(void) + { + if (preempt_trace() || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(TRACE_START, 1); + } + EXPORT_SYMBOL_GPL(start_critical_timings); + + void stop_critical_timings(void) + { ++ trace_preemptirqsoff_hist(TRACE_STOP, 0); + if (preempt_trace() || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -440,6 +443,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) + #ifdef CONFIG_PROVE_LOCKING + void time_hardirqs_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(a0, a1); + } +@@ -448,6 +452,7 @@ void time_hardirqs_off(unsigned long a0, + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(a0, a1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + + #else /* !CONFIG_PROVE_LOCKING */ +@@ -473,6 +478,7 @@ inline void print_irqtrace_events(struct + */ + void trace_hardirqs_on(void) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -482,11 +488,13 @@ void trace_hardirqs_off(void) + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off); + + void trace_hardirqs_on_caller(unsigned long caller_addr) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, caller_addr); + } +@@ -496,6 +504,7 @@ void trace_hardirqs_off_caller(unsigned + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, caller_addr); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off_caller); + +@@ -505,12 +514,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) + #ifdef CONFIG_PREEMPT_TRACER + void trace_preempt_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 0); + if (preempt_trace() && !irq_trace()) + stop_critical_timing(a0, a1); + } + + void trace_preempt_off(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 1); + if (preempt_trace() && !irq_trace()) + start_critical_timing(a0, a1); + } +Index: linux-3.2/Documentation/hwlat_detector.txt +=================================================================== +--- /dev/null ++++ linux-3.2/Documentation/hwlat_detector.txt +@@ -0,0 +1,64 @@ ++Introduction: ++------------- ++ ++The module hwlat_detector is a special purpose kernel module that is used to ++detect large system latencies induced by the behavior of certain underlying ++hardware or firmware, independent of Linux itself. The code was developed ++originally to detect SMIs (System Management Interrupts) on x86 systems, ++however there is nothing x86 specific about this patchset. It was ++originally written for use by the "RT" patch since the Real Time ++kernel is highly latency sensitive. ++ ++SMIs are usually not serviced by the Linux kernel, which typically does not ++even know that they are occuring. SMIs are instead are set up by BIOS code ++and are serviced by BIOS code, usually for "critical" events such as ++management of thermal sensors and fans. Sometimes though, SMIs are used for ++other tasks and those tasks can spend an inordinate amount of time in the ++handler (sometimes measured in milliseconds). Obviously this is a problem if ++you are trying to keep event service latencies down in the microsecond range. ++ ++The hardware latency detector works by hogging all of the cpus for configurable ++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter ++for some period, then looking for gaps in the TSC data. Any gap indicates a ++time when the polling was interrupted and since the machine is stopped and ++interrupts turned off the only thing that could do that would be an SMI. ++ ++Note that the SMI detector should *NEVER* be used in a production environment. ++It is intended to be run manually to determine if the hardware platform has a ++problem with long system firmware service routines. ++ ++Usage: ++------ ++ ++Loading the module hwlat_detector passing the parameter "enabled=1" (or by ++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only ++step required to start the hwlat_detector. It is possible to redefine the ++threshold in microseconds (us) above which latency spikes will be taken ++into account (parameter "threshold="). ++ ++Example: ++ ++ # modprobe hwlat_detector enabled=1 threshold=100 ++ ++After the module is loaded, it creates a directory named "hwlat_detector" under ++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary ++to have debugfs mounted, which might be on /sys/debug on your system. ++ ++The /debug/hwlat_detector interface contains the following files: ++ ++count - number of latency spikes observed since last reset ++enable - a global enable/disable toggle (0/1), resets count ++max - maximum hardware latency actually observed (usecs) ++sample - a pipe from which to read current raw sample data ++ in the format ++ (can be opened O_NONBLOCK for a single sample) ++threshold - minimum latency value to be considered (usecs) ++width - time period to sample with CPUs held (usecs) ++ must be less than the total window size (enforced) ++window - total period of sampling, width being inside (usecs) ++ ++By default we will set width to 500,000 and window to 1,000,000, meaning that ++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we ++observe any latencies that exceed the threshold (initially 100 usecs), ++then we write to a global sample ring buffer of 8K samples, which is ++consumed by reading from the "sample" (pipe) debugfs file interface. +Index: linux-3.2/MAINTAINERS +=================================================================== +--- linux-3.2.orig/MAINTAINERS ++++ linux-3.2/MAINTAINERS +@@ -3008,6 +3008,15 @@ L: linuxppc-dev@lists.ozlabs.org + S: Odd Fixes + F: drivers/tty/hvc/ + ++HARDWARE LATENCY DETECTOR ++P: Jon Masters ++M: jcm@jonmasters.org ++W: http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/ ++S: Supported ++L: linux-kernel@vger.kernel.org ++F: Documentation/hwlat_detector.txt ++F: drivers/misc/hwlat_detector.c ++ + HARDWARE MONITORING + M: Jean Delvare + M: Guenter Roeck +Index: linux-3.2/drivers/misc/Makefile +=================================================================== +--- linux-3.2.orig/drivers/misc/Makefile ++++ linux-3.2/drivers/misc/Makefile +@@ -48,3 +48,4 @@ obj-y += lis3lv02d/ + obj-y += carma/ + obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o + obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ ++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o +Index: linux-3.2/drivers/misc/hwlat_detector.c +=================================================================== +--- /dev/null ++++ linux-3.2/drivers/misc/hwlat_detector.c +@@ -0,0 +1,1212 @@ ++/* ++ * hwlat_detector.c - A simple Hardware Latency detector. ++ * ++ * Use this module to detect large system latencies induced by the behavior of ++ * certain underlying system hardware or firmware, independent of Linux itself. ++ * The code was developed originally to detect the presence of SMIs on Intel ++ * and AMD systems, although there is no dependency upon x86 herein. ++ * ++ * The classical example usage of this module is in detecting the presence of ++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a ++ * somewhat special form of hardware interrupt spawned from earlier CPU debug ++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge ++ * LPC (or other device) to generate a special interrupt under certain ++ * circumstances, for example, upon expiration of a special SMI timer device, ++ * due to certain external thermal readings, on certain I/O address accesses, ++ * and other situations. An SMI hits a special CPU pin, triggers a special ++ * SMI mode (complete with special memory map), and the OS is unaware. ++ * ++ * Although certain hardware-inducing latencies are necessary (for example, ++ * a modern system often requires an SMI handler for correct thermal control ++ * and remote management) they can wreak havoc upon any OS-level performance ++ * guarantees toward low-latency, especially when the OS is not even made ++ * aware of the presence of these interrupts. For this reason, we need a ++ * somewhat brute force mechanism to detect these interrupts. In this case, ++ * we do it by hogging all of the CPU(s) for configurable timer intervals, ++ * sampling the built-in CPU timer, looking for discontiguous readings. ++ * ++ * WARNING: This implementation necessarily introduces latencies. Therefore, ++ * you should NEVER use this module in a production environment ++ * requiring any kind of low-latency performance guarantee(s). ++ * ++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. ++ * ++ * Includes useful feedback from Clark Williams ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ ++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ ++#define U64STR_SIZE 22 /* 20 digits max */ ++ ++#define VERSION "1.0.0" ++#define BANNER "hwlat_detector: " ++#define DRVNAME "hwlat_detector" ++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ ++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ ++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ ++ ++/* Module metadata */ ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Jon Masters "); ++MODULE_DESCRIPTION("A simple hardware latency detector"); ++MODULE_VERSION(VERSION); ++ ++/* Module parameters */ ++ ++static int debug; ++static int enabled; ++static int threshold; ++ ++module_param(debug, int, 0); /* enable debug */ ++module_param(enabled, int, 0); /* enable detector */ ++module_param(threshold, int, 0); /* latency threshold */ ++ ++/* Buffering and sampling */ ++ ++static struct ring_buffer *ring_buffer; /* sample buffer */ ++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ ++static unsigned long buf_size = BUF_SIZE_DEFAULT; ++static struct task_struct *kthread; /* sampling thread */ ++ ++/* DebugFS filesystem entries */ ++ ++static struct dentry *debug_dir; /* debugfs directory */ ++static struct dentry *debug_max; /* maximum TSC delta */ ++static struct dentry *debug_count; /* total detect count */ ++static struct dentry *debug_sample_width; /* sample width us */ ++static struct dentry *debug_sample_window; /* sample window us */ ++static struct dentry *debug_sample; /* raw samples us */ ++static struct dentry *debug_threshold; /* threshold us */ ++static struct dentry *debug_enable; /* enable/disable */ ++ ++/* Individual samples and global state */ ++ ++struct sample; /* latency sample */ ++struct data; /* Global state */ ++ ++/* Sampling functions */ ++static int __buffer_add_sample(struct sample *sample); ++static struct sample *buffer_get_sample(struct sample *sample); ++static int get_sample(void *unused); ++ ++/* Threading and state */ ++static int kthread_fn(void *unused); ++static int start_kthread(void); ++static int stop_kthread(void); ++static void __reset_stats(void); ++static int init_stats(void); ++ ++/* Debugfs interface */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry); ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry); ++static int debug_sample_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static int debug_sample_release(struct inode *inode, struct file *filp); ++static int debug_enable_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static ssize_t debug_enable_fwrite(struct file *file, ++ const char __user *user_buffer, ++ size_t user_size, loff_t *offset); ++ ++/* Initialization functions */ ++static int init_debugfs(void); ++static void free_debugfs(void); ++static int detector_init(void); ++static void detector_exit(void); ++ ++/* Individual latency samples are stored here when detected and packed into ++ * the ring_buffer circular buffer, where they are overwritten when ++ * more than buf_size/sizeof(sample) samples are received. */ ++struct sample { ++ u64 seqnum; /* unique sequence */ ++ u64 duration; /* ktime delta */ ++ struct timespec timestamp; /* wall time */ ++ unsigned long lost; ++}; ++ ++/* keep the global state somewhere. Mostly used under stop_machine. */ ++static struct data { ++ ++ struct mutex lock; /* protect changes */ ++ ++ u64 count; /* total since reset */ ++ u64 max_sample; /* max hardware latency */ ++ u64 threshold; /* sample threshold level */ ++ ++ u64 sample_window; /* total sampling window (on+off) */ ++ u64 sample_width; /* active sampling portion of window */ ++ ++ atomic_t sample_open; /* whether the sample file is open */ ++ ++ wait_queue_head_t wq; /* waitqeue for new sample values */ ++ ++} data; ++ ++/** ++ * __buffer_add_sample - add a new latency sample recording to the ring buffer ++ * @sample: The new latency sample value ++ * ++ * This receives a new latency sample and records it in a global ring buffer. ++ * No additional locking is used in this case - suited for stop_machine use. ++ */ ++static int __buffer_add_sample(struct sample *sample) ++{ ++ return ring_buffer_write(ring_buffer, ++ sizeof(struct sample), sample); ++} ++ ++/** ++ * buffer_get_sample - remove a hardware latency sample from the ring buffer ++ * @sample: Pre-allocated storage for the sample ++ * ++ * This retrieves a hardware latency sample from the global circular buffer ++ */ ++static struct sample *buffer_get_sample(struct sample *sample) ++{ ++ struct ring_buffer_event *e = NULL; ++ struct sample *s = NULL; ++ unsigned int cpu = 0; ++ ++ if (!sample) ++ return NULL; ++ ++ mutex_lock(&ring_buffer_mutex); ++ for_each_online_cpu(cpu) { ++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); ++ if (e) ++ break; ++ } ++ ++ if (e) { ++ s = ring_buffer_event_data(e); ++ memcpy(sample, s, sizeof(struct sample)); ++ } else ++ sample = NULL; ++ mutex_unlock(&ring_buffer_mutex); ++ ++ return sample; ++} ++ ++/** ++ * get_sample - sample the CPU TSC and look for likely hardware latencies ++ * @unused: This is not used but is a part of the stop_machine API ++ * ++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential ++ * hardware-induced latency. Called under stop_machine, with data.lock held. ++ */ ++static int get_sample(void *unused) ++{ ++ ktime_t start, t1, t2; ++ s64 diff, total = 0; ++ u64 sample = 0; ++ int ret = 1; ++ ++ start = ktime_get(); /* start timestamp */ ++ ++ do { ++ ++ t1 = ktime_get(); /* we'll look for a discontinuity */ ++ t2 = ktime_get(); ++ ++ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ ++ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ ++ ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ printk(KERN_ERR BANNER "time running backwards\n"); ++ goto out; ++ } ++ ++ if (diff > sample) ++ sample = diff; /* only want highest value */ ++ ++ } while (total <= data.sample_width); ++ ++ /* If we exceed the threshold value, we have found a hardware latency */ ++ if (sample > data.threshold) { ++ struct sample s; ++ ++ data.count++; ++ s.seqnum = data.count; ++ s.duration = sample; ++ s.timestamp = CURRENT_TIME; ++ __buffer_add_sample(&s); ++ ++ /* Keep a running maximum ever recorded hardware latency */ ++ if (sample > data.max_sample) ++ data.max_sample = sample; ++ } ++ ++ ret = 0; ++out: ++ return ret; ++} ++ ++/* ++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread ++ * @unused: A required part of the kthread API. ++ * ++ * Used to periodically sample the CPU TSC via a call to get_sample. We ++ * use stop_machine, whith does (intentionally) introduce latency since we ++ * need to ensure nothing else might be running (and thus pre-empting). ++ * Obviously this should never be used in production environments. ++ * ++ * stop_machine will schedule us typically only on CPU0 which is fine for ++ * almost every real-world hardware latency situation - but we might later ++ * generalize this if we find there are any actualy systems with alternate ++ * SMI delivery or other non CPU0 hardware latencies. ++ */ ++static int kthread_fn(void *unused) ++{ ++ int err = 0; ++ u64 interval = 0; ++ ++ while (!kthread_should_stop()) { ++ ++ mutex_lock(&data.lock); ++ ++ err = stop_machine(get_sample, unused, 0); ++ if (err) { ++ /* Houston, we have a problem */ ++ mutex_unlock(&data.lock); ++ goto err_out; ++ } ++ ++ wake_up(&data.wq); /* wake up reader(s) */ ++ ++ interval = data.sample_window - data.sample_width; ++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ ++ ++ mutex_unlock(&data.lock); ++ ++ if (msleep_interruptible(interval)) ++ goto out; ++ } ++ goto out; ++err_out: ++ printk(KERN_ERR BANNER "could not call stop_machine, disabling\n"); ++ enabled = 0; ++out: ++ return err; ++ ++} ++ ++/** ++ * start_kthread - Kick off the hardware latency sampling/detector kthread ++ * ++ * This starts a kernel thread that will sit and sample the CPU timestamp ++ * counter (TSC or similar) and look for potential hardware latencies. ++ */ ++static int start_kthread(void) ++{ ++ kthread = kthread_run(kthread_fn, NULL, ++ DRVNAME); ++ if (IS_ERR(kthread)) { ++ printk(KERN_ERR BANNER "could not start sampling thread\n"); ++ enabled = 0; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop ++ * ++ * This kicks the running hardware latency sampling/detector kernel thread and ++ * tells it to stop sampling now. Use this on unload and at system shutdown. ++ */ ++static int stop_kthread(void) ++{ ++ int ret; ++ ++ ret = kthread_stop(kthread); ++ ++ return ret; ++} ++ ++/** ++ * __reset_stats - Reset statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We call this ++ * function in order to reset those when "enable" is toggled on or off, and ++ * also at initialization. Should be called with data.lock held. ++ */ ++static void __reset_stats(void) ++{ ++ data.count = 0; ++ data.max_sample = 0; ++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ ++} ++ ++/** ++ * init_stats - Setup global state statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We also use ++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware ++ * induced system latencies. This function initializes these structures and ++ * allocates the global ring buffer also. ++ */ ++static int init_stats(void) ++{ ++ int ret = -ENOMEM; ++ ++ mutex_init(&data.lock); ++ init_waitqueue_head(&data.wq); ++ atomic_set(&data.sample_open, 0); ++ ++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); ++ ++ if (WARN(!ring_buffer, KERN_ERR BANNER ++ "failed to allocate ring buffer!\n")) ++ goto out; ++ ++ __reset_stats(); ++ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ ++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ ++ ++ ret = 0; ++ ++out: ++ return ret; ++ ++} ++ ++/* ++ * simple_data_read - Wrapper read function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * @entry: The entry to read from ++ * ++ * This function provides a generic read implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_read directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual read (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ u64 val = 0; ++ int len = 0; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (!entry) ++ return -EFAULT; ++ ++ mutex_lock(&data.lock); ++ val = *entry; ++ mutex_unlock(&data.lock); ++ ++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); ++ ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); ++ ++} ++ ++/* ++ * simple_data_write - Wrapper write function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to write value from ++ * @cnt: The maximum number of bytes to write ++ * @ppos: The current "file" position ++ * @entry: The entry to write to ++ * ++ * This function provides a generic write implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_write directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual write (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ *entry = val; ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/** ++ * debug_count_fopen - Open function for "count" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "count" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_count_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_count_fread - Read function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to read the ++ * number of latency readings exceeding the configured threshold since ++ * the detector was last reset (e.g. by writing a zero into "count"). ++ */ ++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_count_fwrite - Write function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to write a ++ * desired value, especially to zero the total count. ++ */ ++static ssize_t debug_count_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "enable" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_enable_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_enable_fread - Read function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * whether the detector is currently enabled ("0\n" or "1\n" returned). ++ */ ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[4]; ++ ++ if ((cnt < sizeof(buf)) || (*ppos)) ++ return 0; ++ ++ buf[0] = enabled ? '1' : '0'; ++ buf[1] = '\n'; ++ buf[2] = '\0'; ++ if (copy_to_user(ubuf, buf, strlen(buf))) ++ return -EFAULT; ++ return *ppos = strlen(buf); ++} ++ ++/** ++ * debug_enable_fwrite - Write function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to enable or ++ * disable the detector, which will have the side-effect of possibly ++ * also resetting the global stats and kicking off the measuring ++ * kthread (on an enable) or the converse (upon a disable). ++ */ ++static ssize_t debug_enable_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[4]; ++ int csize = min(cnt, sizeof(buf)); ++ long val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[sizeof(buf)-1] = '\0'; /* just in case */ ++ err = strict_strtoul(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ if (val) { ++ if (enabled) ++ goto unlock; ++ enabled = 1; ++ __reset_stats(); ++ if (start_kthread()) ++ return -EFAULT; ++ } else { ++ if (!enabled) ++ goto unlock; ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) { ++ printk(KERN_ERR BANNER "cannot stop kthread\n"); ++ return -EFAULT; ++ } ++ wake_up(&data.wq); /* reader(s) should return */ ++ } ++unlock: ++ return csize; ++} ++ ++/** ++ * debug_max_fopen - Open function for "max" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "max" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_max_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_max_fread - Read function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * the maximum latency value observed since it was last reset. ++ */ ++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++/** ++ * debug_max_fwrite - Write function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to reset the ++ * maximum or set it to some other desired value - if, then, subsequent ++ * measurements exceed this value, the maximum will be updated. ++ */ ++static ssize_t debug_max_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++ ++/** ++ * debug_sample_fopen - An open function for "sample" debugfs interface ++ * @inode: The in-kernel inode representation of this debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function handles opening the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. Can be opened blocking or non-blocking, ++ * affecting whether it behaves as a buffer read pipe, or does not. ++ * Implements simple locking to prevent multiple simultaneous use. ++ */ ++static int debug_sample_fopen(struct inode *inode, struct file *filp) ++{ ++ if (!atomic_add_unless(&data.sample_open, 1, 1)) ++ return -EBUSY; ++ else ++ return 0; ++} ++ ++/** ++ * debug_sample_fread - A read function for "sample" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that will contain the samples read ++ * @cnt: The maximum bytes to read from the debugfs "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function handles reading from the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. By default this will block pending a new ++ * value written into the sample buffer, unless there are already a ++ * number of value(s) waiting in the buffer, or the sample file was ++ * previously opened in a non-blocking mode of operation. ++ */ ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ int len = 0; ++ char buf[64]; ++ struct sample *sample = NULL; ++ ++ if (!enabled) ++ return 0; ++ ++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); ++ if (!sample) ++ return -ENOMEM; ++ ++ while (!buffer_get_sample(sample)) { ++ ++ DEFINE_WAIT(wait); ++ ++ if (filp->f_flags & O_NONBLOCK) { ++ len = -EAGAIN; ++ goto out; ++ } ++ ++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&data.wq, &wait); ++ ++ if (signal_pending(current)) { ++ len = -EINTR; ++ goto out; ++ } ++ ++ if (!enabled) { /* enable was toggled */ ++ len = 0; ++ goto out; ++ } ++ } ++ ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration); ++ ++ ++ /* handling partial reads is more trouble than it's worth */ ++ if (len > cnt) ++ goto out; ++ ++ if (copy_to_user(ubuf, buf, len)) ++ len = -EFAULT; ++ ++out: ++ kfree(sample); ++ return len; ++} ++ ++/** ++ * debug_sample_release - Release function for "sample" debugfs interface ++ * @inode: The in-kernel inode represenation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function completes the close of the debugfs interface "sample" file. ++ * Frees the sample_open "lock" so that other users may open the interface. ++ */ ++static int debug_sample_release(struct inode *inode, struct file *filp) ++{ ++ atomic_dec(&data.sample_open); ++ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fopen - Open function for "threshold" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_threshold_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fread - Read function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * the current threshold level at which a latency will be recorded in the ++ * global ring buffer, typically on the order of 10us. ++ */ ++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); ++} ++ ++/** ++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * the threshold level at which any subsequently detected latencies will ++ * be recorded into the global ring buffer. ++ */ ++static ssize_t debug_threshold_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return ret; ++} ++ ++/** ++ * debug_width_fopen - Open function for "width" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "width" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_width_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_width_fread - Read function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latecy periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. ++ */ ++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); ++} ++ ++/** ++ * debug_width_fwrite - Write function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latency periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. It ++ * is enforced that width is less that the total window size. ++ */ ++static ssize_t debug_width_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (val < data.sample_window) ++ data.sample_width = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return csize; ++} ++ ++/** ++ * debug_window_fopen - Open function for "window" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. ++ */ ++static int debug_window_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_window_fread - Read function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to read the total window size. ++ */ ++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); ++} ++ ++/** ++ * debug_window_fwrite - Write function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "window" debufds ++ * interface to the hardware latency detetector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to write a new total window size. It ++ * is enfoced that any value written must be greater than the sample width ++ * size, or an error results. ++ */ ++static ssize_t debug_window_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (data.sample_width < val) ++ data.sample_window = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/* ++ * Function pointers for the "count" debugfs file operations ++ */ ++static const struct file_operations count_fops = { ++ .open = debug_count_fopen, ++ .read = debug_count_fread, ++ .write = debug_count_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "enable" debugfs file operations ++ */ ++static const struct file_operations enable_fops = { ++ .open = debug_enable_fopen, ++ .read = debug_enable_fread, ++ .write = debug_enable_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "max" debugfs file operations ++ */ ++static const struct file_operations max_fops = { ++ .open = debug_max_fopen, ++ .read = debug_max_fread, ++ .write = debug_max_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "sample" debugfs file operations ++ */ ++static const struct file_operations sample_fops = { ++ .open = debug_sample_fopen, ++ .read = debug_sample_fread, ++ .release = debug_sample_release, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "threshold" debugfs file operations ++ */ ++static const struct file_operations threshold_fops = { ++ .open = debug_threshold_fopen, ++ .read = debug_threshold_fread, ++ .write = debug_threshold_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "width" debugfs file operations ++ */ ++static const struct file_operations width_fops = { ++ .open = debug_width_fopen, ++ .read = debug_width_fread, ++ .write = debug_width_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "window" debugfs file operations ++ */ ++static const struct file_operations window_fops = { ++ .open = debug_window_fopen, ++ .read = debug_window_fread, ++ .write = debug_window_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/** ++ * init_debugfs - A function to initialize the debugfs interface files ++ * ++ * This function creates entries in debugfs for "hwlat_detector", including ++ * files to read values from the detector, current samples, and the ++ * maximum sample that has been captured since the hardware latency ++ * dectector was started. ++ */ ++static int init_debugfs(void) ++{ ++ int ret = -ENOMEM; ++ ++ debug_dir = debugfs_create_dir(DRVNAME, NULL); ++ if (!debug_dir) ++ goto err_debug_dir; ++ ++ debug_sample = debugfs_create_file("sample", 0444, ++ debug_dir, NULL, ++ &sample_fops); ++ if (!debug_sample) ++ goto err_sample; ++ ++ debug_count = debugfs_create_file("count", 0444, ++ debug_dir, NULL, ++ &count_fops); ++ if (!debug_count) ++ goto err_count; ++ ++ debug_max = debugfs_create_file("max", 0444, ++ debug_dir, NULL, ++ &max_fops); ++ if (!debug_max) ++ goto err_max; ++ ++ debug_sample_window = debugfs_create_file("window", 0644, ++ debug_dir, NULL, ++ &window_fops); ++ if (!debug_sample_window) ++ goto err_window; ++ ++ debug_sample_width = debugfs_create_file("width", 0644, ++ debug_dir, NULL, ++ &width_fops); ++ if (!debug_sample_width) ++ goto err_width; ++ ++ debug_threshold = debugfs_create_file("threshold", 0644, ++ debug_dir, NULL, ++ &threshold_fops); ++ if (!debug_threshold) ++ goto err_threshold; ++ ++ debug_enable = debugfs_create_file("enable", 0644, ++ debug_dir, &enabled, ++ &enable_fops); ++ if (!debug_enable) ++ goto err_enable; ++ ++ else { ++ ret = 0; ++ goto out; ++ } ++ ++err_enable: ++ debugfs_remove(debug_threshold); ++err_threshold: ++ debugfs_remove(debug_sample_width); ++err_width: ++ debugfs_remove(debug_sample_window); ++err_window: ++ debugfs_remove(debug_max); ++err_max: ++ debugfs_remove(debug_count); ++err_count: ++ debugfs_remove(debug_sample); ++err_sample: ++ debugfs_remove(debug_dir); ++err_debug_dir: ++out: ++ return ret; ++} ++ ++/** ++ * free_debugfs - A function to cleanup the debugfs file interface ++ */ ++static void free_debugfs(void) ++{ ++ /* could also use a debugfs_remove_recursive */ ++ debugfs_remove(debug_enable); ++ debugfs_remove(debug_threshold); ++ debugfs_remove(debug_sample_width); ++ debugfs_remove(debug_sample_window); ++ debugfs_remove(debug_max); ++ debugfs_remove(debug_count); ++ debugfs_remove(debug_sample); ++ debugfs_remove(debug_dir); ++} ++ ++/** ++ * detector_init - Standard module initialization code ++ */ ++static int detector_init(void) ++{ ++ int ret = -ENOMEM; ++ ++ printk(KERN_INFO BANNER "version %s\n", VERSION); ++ ++ ret = init_stats(); ++ if (0 != ret) ++ goto out; ++ ++ ret = init_debugfs(); ++ if (0 != ret) ++ goto err_stats; ++ ++ if (enabled) ++ ret = start_kthread(); ++ ++ goto out; ++ ++err_stats: ++ ring_buffer_free(ring_buffer); ++out: ++ return ret; ++ ++} ++ ++/** ++ * detector_exit - Standard module cleanup code ++ */ ++static void detector_exit(void) ++{ ++ int err; ++ ++ if (enabled) { ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) ++ printk(KERN_ERR BANNER "cannot stop kthread\n"); ++ } ++ ++ free_debugfs(); ++ ring_buffer_free(ring_buffer); /* free up the ring buffer */ ++ ++} ++ ++module_init(detector_init); ++module_exit(detector_exit); +Index: linux-3.2/localversion-rt +=================================================================== +Index: linux-3.2/arch/arm/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/arm/kernel/early_printk.c ++++ linux-3.2/arch/arm/kernel/early_printk.c +@@ -29,28 +29,17 @@ static void early_console_write(struct c + early_write(s, n); + } + +-static struct console early_console = { ++static struct console early_console_dev = { + .name = "earlycon", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, + }; + +-asmlinkage void early_printk(const char *fmt, ...) +-{ +- char buf[512]; +- int n; +- va_list ap; +- +- va_start(ap, fmt); +- n = vscnprintf(buf, sizeof(buf), fmt, ap); +- early_write(buf, n); +- va_end(ap); +-} +- + static int __init setup_early_printk(char *buf) + { +- register_console(&early_console); ++ early_console = &early_console_dev; ++ register_console(&early_console_dev); + return 0; + } + +Index: linux-3.2/arch/blackfin/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/blackfin/kernel/early_printk.c ++++ linux-3.2/arch/blackfin/kernel/early_printk.c +@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_ + extern struct console *bfin_jc_early_init(void); + #endif + +-static struct console *early_console; +- + /* Default console */ + #define DEFAULT_PORT 0 + #define DEFAULT_CFLAG CS8|B57600 +Index: linux-3.2/arch/microblaze/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/microblaze/kernel/early_printk.c ++++ linux-3.2/arch/microblaze/kernel/early_printk.c +@@ -21,7 +21,6 @@ + #include + #include + +-static u32 early_console_initialized; + static u32 base_addr; + + #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE +@@ -109,27 +108,11 @@ static struct console early_serial_uart1 + }; + #endif /* CONFIG_SERIAL_8250_CONSOLE */ + +-static struct console *early_console; +- +-void early_printk(const char *fmt, ...) +-{ +- char buf[512]; +- int n; +- va_list ap; +- +- if (early_console_initialized) { +- va_start(ap, fmt); +- n = vscnprintf(buf, 512, fmt, ap); +- early_console->write(early_console, buf, n); +- va_end(ap); +- } +-} +- + int __init setup_early_printk(char *opt) + { + int version = 0; + +- if (early_console_initialized) ++ if (early_console) + return 1; + + base_addr = of_early_console(&version); +@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt) + } + + register_console(early_console); +- early_console_initialized = 1; + return 0; + } + return 1; +@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt) + * only for early console because of performance degression */ + void __init remap_early_printk(void) + { +- if (!early_console_initialized || !early_console) ++ if (!early_console) + return; + printk(KERN_INFO "early_printk_console remaping from 0x%x to ", + base_addr); +@@ -179,9 +161,9 @@ void __init remap_early_printk(void) + + void __init disable_early_printk(void) + { +- if (!early_console_initialized || !early_console) ++ if (!early_console) + return; + printk(KERN_WARNING "disabling early console\n"); + unregister_console(early_console); +- early_console_initialized = 0; ++ early_console = NULL; + } +Index: linux-3.2/arch/mips/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/mips/kernel/early_printk.c ++++ linux-3.2/arch/mips/kernel/early_printk.c +@@ -25,20 +25,18 @@ early_console_write(struct console *con, + } + } + +-static struct console early_console __initdata = { ++static struct console early_console_prom = { + .name = "early", + .write = early_console_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1 + }; + +-static int early_console_initialized __initdata; +- + void __init setup_early_printk(void) + { +- if (early_console_initialized) ++ if (early_console) + return; +- early_console_initialized = 1; ++ early_console = &early_console_prom; + +- register_console(&early_console); ++ register_console(&early_console_prom); + } +Index: linux-3.2/arch/powerpc/kernel/udbg.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/udbg.c ++++ linux-3.2/arch/powerpc/kernel/udbg.c +@@ -182,15 +182,13 @@ static struct console udbg_console = { + .index = 0, + }; + +-static int early_console_initialized; +- + /* + * Called by setup_system after ppc_md->probe and ppc_md->early_init. + * Call it again after setting udbg_putc in ppc_md->setup_arch. + */ + void __init register_early_udbg_console(void) + { +- if (early_console_initialized) ++ if (early_console) + return; + + if (!udbg_putc) +@@ -200,7 +198,7 @@ void __init register_early_udbg_console( + printk(KERN_INFO "early console immortal !\n"); + udbg_console.flags &= ~CON_BOOT; + } +- early_console_initialized = 1; ++ early_console = &udbg_console; + register_console(&udbg_console); + } + +Index: linux-3.2/arch/sh/kernel/sh_bios.c +=================================================================== +--- linux-3.2.orig/arch/sh/kernel/sh_bios.c ++++ linux-3.2/arch/sh/kernel/sh_bios.c +@@ -144,8 +144,6 @@ static struct console bios_console = { + .index = -1, + }; + +-static struct console *early_console; +- + static int __init setup_early_printk(char *buf) + { + int keep_early = 0; +Index: linux-3.2/arch/sparc/kernel/setup_32.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/setup_32.c ++++ linux-3.2/arch/sparc/kernel/setup_32.c +@@ -221,6 +221,7 @@ void __init setup_arch(char **cmdline_p) + + boot_flags_init(*cmdline_p); + ++ early_console = &prom_early_console; + register_console(&prom_early_console); + + /* Set sparc_cpu_model */ +Index: linux-3.2/arch/sparc/kernel/setup_64.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/setup_64.c ++++ linux-3.2/arch/sparc/kernel/setup_64.c +@@ -477,6 +477,12 @@ static void __init init_sparc64_elf_hwca + popc_patch(); + } + ++static inline void register_prom_console(void) ++{ ++ early_console = &prom_early_console; ++ register_console(&prom_early_console); ++} ++ + void __init setup_arch(char **cmdline_p) + { + /* Initialize PROM console and command line. */ +@@ -488,7 +494,7 @@ void __init setup_arch(char **cmdline_p) + #ifdef CONFIG_EARLYFB + if (btext_find_display()) + #endif +- register_console(&prom_early_console); ++ register_prom_console(); + + if (tlb_type == hypervisor) + printk("ARCH: SUN4V\n"); +Index: linux-3.2/arch/tile/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/tile/kernel/early_printk.c ++++ linux-3.2/arch/tile/kernel/early_printk.c +@@ -32,25 +32,8 @@ static struct console early_hv_console = + }; + + /* Direct interface for emergencies */ +-static struct console *early_console = &early_hv_console; +-static int early_console_initialized; + static int early_console_complete; + +-static void early_vprintk(const char *fmt, va_list ap) +-{ +- char buf[512]; +- int n = vscnprintf(buf, sizeof(buf), fmt, ap); +- early_console->write(early_console, buf, n); +-} +- +-void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- va_start(ap, fmt); +- early_vprintk(fmt, ap); +- va_end(ap); +-} +- + void early_panic(const char *fmt, ...) + { + va_list ap; +@@ -68,14 +51,13 @@ static int __initdata keep_early; + + static int __init setup_early_printk(char *str) + { +- if (early_console_initialized) ++ if (early_console) + return 1; + + if (str != NULL && strncmp(str, "keep", 4) == 0) + keep_early = 1; + + early_console = &early_hv_console; +- early_console_initialized = 1; + register_console(early_console); + + return 0; +@@ -84,12 +66,12 @@ static int __init setup_early_printk(cha + void __init disable_early_printk(void) + { + early_console_complete = 1; +- if (!early_console_initialized || !early_console) ++ if (!early_console) + return; + if (!keep_early) { + early_printk("disabling early console\n"); + unregister_console(early_console); +- early_console_initialized = 0; ++ early_console = NULL; + } else { + early_printk("keeping early console\n"); + } +@@ -97,7 +79,7 @@ void __init disable_early_printk(void) + + void warn_early_printk(void) + { +- if (early_console_complete || early_console_initialized) ++ if (early_console_complete || early_console) + return; + early_printk("\ + Machine shutting down before console output is fully initialized.\n\ +Index: linux-3.2/arch/um/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/um/kernel/early_printk.c ++++ linux-3.2/arch/um/kernel/early_printk.c +@@ -16,7 +16,7 @@ static void early_console_write(struct c + um_early_printk(s, n); + } + +-static struct console early_console = { ++static struct console early_console_dev = { + .name = "earlycon", + .write = early_console_write, + .flags = CON_BOOT, +@@ -25,8 +25,10 @@ static struct console early_console = { + + static int __init setup_early_printk(char *buf) + { +- register_console(&early_console); +- ++ if (!early_console) { ++ early_console = &early_console_dev; ++ register_console(&early_console_dev); ++ } + return 0; + } + +Index: linux-3.2/arch/unicore32/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/unicore32/kernel/early_printk.c ++++ linux-3.2/arch/unicore32/kernel/early_printk.c +@@ -33,21 +33,17 @@ static struct console early_ocd_console + .index = -1, + }; + +-/* Direct interface for emergencies */ +-static struct console *early_console = &early_ocd_console; +- +-static int __initdata keep_early; +- + static int __init setup_early_printk(char *buf) + { +- if (!buf) ++ int keep_early; ++ ++ if (!buf || early_console) + return 0; + + if (strstr(buf, "keep")) + keep_early = 1; + +- if (!strncmp(buf, "ocd", 3)) +- early_console = &early_ocd_console; ++ early_console = &early_ocd_console; + + if (keep_early) + early_console->flags &= ~CON_BOOT; +Index: linux-3.2/arch/x86/kernel/early_printk.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/early_printk.c ++++ linux-3.2/arch/x86/kernel/early_printk.c +@@ -169,25 +169,9 @@ static struct console early_serial_conso + .index = -1, + }; + +-/* Direct interface for emergencies */ +-static struct console *early_console = &early_vga_console; +-static int __initdata early_console_initialized; +- +-asmlinkage void early_printk(const char *fmt, ...) +-{ +- char buf[512]; +- int n; +- va_list ap; +- +- va_start(ap, fmt); +- n = vscnprintf(buf, sizeof(buf), fmt, ap); +- early_console->write(early_console, buf, n); +- va_end(ap); +-} +- + static inline void early_console_register(struct console *con, int keep_early) + { +- if (early_console->index != -1) { ++ if (con->index != -1) { + printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n", + con->name); + return; +@@ -207,9 +191,8 @@ static int __init setup_early_printk(cha + if (!buf) + return 0; + +- if (early_console_initialized) ++ if (early_console) + return 0; +- early_console_initialized = 1; + + keep = (strstr(buf, "keep") != NULL); + +Index: linux-3.2/include/linux/console.h +=================================================================== +--- linux-3.2.orig/include/linux/console.h ++++ linux-3.2/include/linux/console.h +@@ -133,6 +133,7 @@ struct console { + for (con = console_drivers; con != NULL; con = con->next) + + extern int console_set_on_cmdline; ++extern struct console *early_console; + + extern int add_preferred_console(char *name, int idx, char *options); + extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); +Index: linux-3.2/include/linux/printk.h +=================================================================== +--- linux-3.2.orig/include/linux/printk.h ++++ linux-3.2/include/linux/printk.h +@@ -88,8 +88,15 @@ int no_printk(const char *fmt, ...) + return 0; + } + ++#ifdef CONFIG_EARLY_PRINTK + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); ++extern void printk_kill(void); ++#else ++static inline __printf(1, 2) __cold ++void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } ++#endif + + extern int printk_needs_cpu(int cpu); + extern void printk_tick(void); +@@ -109,7 +116,6 @@ extern int __printk_ratelimit(const char + #define printk_ratelimit() __printk_ratelimit(__func__) + extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); +- + extern int printk_delay_msec; + extern int dmesg_restrict; + extern int kptr_restrict; +Index: linux-3.2/kernel/printk.c +=================================================================== +--- linux-3.2.orig/kernel/printk.c ++++ linux-3.2/kernel/printk.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -44,13 +45,6 @@ + + #include + +-/* +- * Architectures can override it: +- */ +-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) +-{ +-} +- + #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) + + /* printk's without a loglevel use this.. */ +@@ -511,6 +505,7 @@ static void __call_console_drivers(unsig + { + struct console *con; + ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -519,7 +514,61 @@ static void __call_console_drivers(unsig + (con->flags & CON_ANYTIME))) + con->write(con, &LOG_BUF(start), end - start); + } ++ migrate_enable(); ++} ++ ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++static void early_vprintk(const char *fmt, va_list ap) ++{ ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ if (early_console) ++ early_console->write(early_console, buf, n); ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static int __read_mostly printk_killswitch; ++ ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = 1; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ ++void printk_kill(void) ++{ ++ printk_killswitch = 1; ++} ++ ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; + } ++#endif + + static int __read_mostly ignore_loglevel; + +@@ -781,12 +830,18 @@ static inline int can_use_console(unsign + * interrupts disabled. It should return with 'lockbuf_lock' + * released but interrupts still disabled. + */ +-static int console_trylock_for_printk(unsigned int cpu) ++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) + __releases(&logbuf_lock) + { + int retval = 0, wake = 0; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) && ++ !preempt_count()) || sysrq_in_progress; ++#else ++ int lock = 1; ++#endif + +- if (console_trylock()) { ++ if (lock && console_trylock()) { + retval = 1; + + /* +@@ -837,6 +892,13 @@ asmlinkage int vprintk(const char *fmt, + size_t plen; + char special; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + boot_delay_msec(); + printk_delay(); + +@@ -957,8 +1019,15 @@ asmlinkage int vprintk(const char *fmt, + * will release 'logbuf_lock' regardless of whether it + * actually gets the semaphore or not. + */ +- if (console_trylock_for_printk(this_cpu)) ++ if (console_trylock_for_printk(this_cpu, flags)) { ++#ifndef CONFIG_PREEMPT_RT_FULL + console_unlock(); ++#else ++ raw_local_irq_restore(flags); ++ console_unlock(); ++ raw_local_irq_save(flags); ++#endif ++ } + + lockdep_on(); + out_restore_irqs: +@@ -1221,8 +1290,8 @@ void printk_tick(void) + + int printk_needs_cpu(int cpu) + { +- if (cpu_is_offline(cpu)) +- printk_tick(); ++ if (unlikely(cpu_is_offline(cpu))) ++ __this_cpu_write(printk_pending, 0); + return __this_cpu_read(printk_pending); + } + +@@ -1268,11 +1337,16 @@ again: + _con_start = con_start; + _log_end = log_end; + con_start = log_end; /* Flush */ ++#ifndef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(_con_start, _log_end); + start_critical_timings(); + local_irq_restore(flags); ++#else ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(_con_start, _log_end); ++#endif + } + console_locked = 0; + +Index: linux-3.2/kernel/watchdog.c +=================================================================== +--- linux-3.2.orig/kernel/watchdog.c ++++ linux-3.2/kernel/watchdog.c +@@ -201,6 +201,8 @@ static int is_softlockup(unsigned long t + + #ifdef CONFIG_HARDLOCKUP_DETECTOR + ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); ++ + static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, +@@ -235,10 +237,19 @@ static void watchdog_overflow_callback(s + if (__this_cpu_read(hard_watchdog_warn) == true) + return; + +- if (hardlockup_panic) ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ if (hardlockup_panic) { + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); +- else ++ } else { ++ raw_spin_lock(&watchdog_output_lock); + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); ++ raw_spin_unlock(&watchdog_output_lock); ++ } + + __this_cpu_write(hard_watchdog_warn, true); + return; +@@ -425,6 +436,7 @@ static void watchdog_prepare_cpu(int cpu + WARN_ON(per_cpu(softlockup_watchdog, cpu)); + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; ++ hrtimer->irqsafe = 1; + } + + static int watchdog_enable(int cpu) +Index: linux-3.2/kernel/Kconfig.preempt +=================================================================== +--- linux-3.2.orig/kernel/Kconfig.preempt ++++ linux-3.2/kernel/Kconfig.preempt +@@ -1,3 +1,10 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT + + choice + prompt "Preemption Model" +@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" +- select PREEMPT_COUNT ++ select PREEMPT + help + This option reduces the latency of the kernel by making + all kernel code (that is not executing in a critical section) +@@ -51,6 +58,21 @@ config PREEMPT + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptiple ++ RT kernel. ++ ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +Index: linux-3.2/include/asm-generic/bug.h +=================================================================== +--- linux-3.2.orig/include/asm-generic/bug.h ++++ linux-3.2/include/asm-generic/bug.h +@@ -3,6 +3,10 @@ + + #include + ++#ifndef __ASSEMBLY__ ++extern void __WARN_ON(const char *func, const char *file, const int line); ++#endif /* __ASSEMBLY__ */ ++ + #ifdef CONFIG_BUG + + #ifdef CONFIG_GENERIC_BUG +@@ -202,4 +206,18 @@ extern void warn_slowpath_null(const cha + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif ++ + #endif +Index: linux-3.2/include/linux/irqflags.h +=================================================================== +--- linux-3.2.orig/include/linux/irqflags.h ++++ linux-3.2/include/linux/irqflags.h +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif ++ ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +@@ -147,4 +151,23 @@ + + #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + ++/* ++ * local_irq* variants depending on RT/!RT ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0) ++# define local_irq_restore_nort(flags) do { (void)(flags); } while (0) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif ++ + #endif +Index: linux-3.2/drivers/ata/libata-sff.c +=================================================================== +--- linux-3.2.orig/drivers/ata/libata-sff.c ++++ linux-3.2/drivers/ata/libata-sff.c +@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu + do_write); + + kunmap_atomic(buf, KM_IRQ0); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ next_sg: + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page, KM_IRQ0); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ next_sg: + count, rw); + + kunmap_atomic(buf, KM_IRQ0); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, +Index: linux-3.2/drivers/ide/alim15x3.c +=================================================================== +--- linux-3.2.orig/drivers/ide/alim15x3.c ++++ linux-3.2/drivers/ide/alim15x3.c +@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ out: + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +Index: linux-3.2/drivers/ide/hpt366.c +=================================================================== +--- linux-3.2.orig/drivers/ide/hpt366.c ++++ linux-3.2/drivers/ide/hpt366.c +@@ -1241,7 +1241,7 @@ static int __devinit init_dma_hpt366(ide + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1252,7 +1252,7 @@ static int __devinit init_dma_hpt366(ide + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +Index: linux-3.2/drivers/ide/ide-io-std.c +=================================================================== +--- linux-3.2.orig/drivers/ide/ide-io-std.c ++++ linux-3.2/drivers/ide/ide-io-std.c +@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +Index: linux-3.2/drivers/ide/ide-io.c +=================================================================== +--- linux-3.2.orig/drivers/ide/ide-io.c ++++ linux-3.2/drivers/ide/ide-io.c +@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +Index: linux-3.2/drivers/ide/ide-iops.c +=================================================================== +--- linux-3.2.orig/drivers/ide/ide-iops.c ++++ linux-3.2/drivers/ide/ide-iops.c +@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +Index: linux-3.2/drivers/ide/ide-probe.c +=================================================================== +--- linux-3.2.orig/drivers/ide/ide-probe.c ++++ linux-3.2/drivers/ide/ide-probe.c +@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +Index: linux-3.2/drivers/ide/ide-taskfile.c +=================================================================== +--- linux-3.2.orig/drivers/ide/ide-taskfile.c ++++ linux-3.2/drivers/ide/ide-taskfile.c +@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; + +@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + kunmap_atomic(buf, KM_BIO_SRC_IRQ); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + +Index: linux-3.2/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +=================================================================== +--- linux-3.2.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ linux-3.2/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +@@ -801,7 +801,7 @@ void ipoib_mcast_restart_task(struct wor + + ipoib_mcast_stop_thread(dev, 0); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); + +@@ -883,7 +883,7 @@ void ipoib_mcast_restart_task(struct wor + + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* We have to cancel outside of the spinlock */ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { +Index: linux-3.2/drivers/input/gameport/gameport.c +=================================================================== +--- linux-3.2.orig/drivers/input/gameport/gameport.c ++++ linux-3.2/drivers/input/gameport/gameport.c +@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + rdtscl(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + rdtscl(t2); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } +Index: linux-3.2/arch/x86/include/asm/acpi.h +=================================================================== +--- linux-3.2.orig/arch/x86/include/asm/acpi.h ++++ linux-3.2/arch/x86/include/asm/acpi.h +@@ -51,8 +51,8 @@ + + #define ACPI_ASM_MACROS + #define BREAKPOINT3 +-#define ACPI_DISABLE_IRQS() local_irq_disable() +-#define ACPI_ENABLE_IRQS() local_irq_enable() ++#define ACPI_DISABLE_IRQS() local_irq_disable_nort() ++#define ACPI_ENABLE_IRQS() local_irq_enable_nort() + #define ACPI_FLUSH_CPU_CACHE() wbinvd() + + int __acpi_acquire_global_lock(unsigned int *lock); +Index: linux-3.2/kernel/user.c +=================================================================== +--- linux-3.2.orig/kernel/user.c ++++ linux-3.2/kernel/user.c +@@ -129,11 +129,11 @@ void free_uid(struct user_struct *up) + if (!up) + return; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) +Index: linux-3.2/kernel/res_counter.c +=================================================================== +--- linux-3.2.orig/kernel/res_counter.c ++++ linux-3.2/kernel/res_counter.c +@@ -43,7 +43,7 @@ int res_counter_charge(struct res_counte + struct res_counter *c, *u; + + *limit_fail_at = NULL; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + ret = res_counter_charge_locked(c, val); +@@ -62,7 +62,7 @@ undo: + spin_unlock(&u->lock); + } + done: +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + +@@ -79,13 +79,13 @@ void res_counter_uncharge(struct res_cou + unsigned long flags; + struct res_counter *c; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + res_counter_uncharge_locked(c, val); + spin_unlock(&c->lock); + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + +Index: linux-3.2/drivers/usb/core/hcd.c +=================================================================== +--- linux-3.2.orig/drivers/usb/core/hcd.c ++++ linux-3.2/drivers/usb/core/hcd.c +@@ -2146,7 +2146,7 @@ irqreturn_t usb_hcd_irq (int irq, void * + * when the first handler doesn't use it. So let's just + * assume it's never used. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) { + rc = IRQ_NONE; +@@ -2159,7 +2159,7 @@ irqreturn_t usb_hcd_irq (int irq, void * + rc = IRQ_HANDLED; + } + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return rc; + } + EXPORT_SYMBOL_GPL(usb_hcd_irq); +Index: linux-3.2/drivers/tty/tty_ldisc.c +=================================================================== +--- linux-3.2.orig/drivers/tty/tty_ldisc.c ++++ linux-3.2/drivers/tty/tty_ldisc.c +@@ -70,7 +70,7 @@ static void put_ldisc(struct tty_ldisc * + * We really want an "atomic_dec_and_lock_irqsave()", + * but we don't have it, so this does it by hand. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { + struct tty_ldisc_ops *ldo = ld->ops; + +@@ -81,7 +81,7 @@ static void put_ldisc(struct tty_ldisc * + kfree(ld); + return; + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + wake_up(&tty_ldisc_idle); + } + +Index: linux-3.2/lib/scatterlist.c +=================================================================== +--- linux-3.2.orig/lib/scatterlist.c ++++ linux-3.2/lib/scatterlist.c +@@ -423,7 +423,7 @@ void sg_miter_stop(struct sg_mapping_ite + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); + } else + kunmap(miter->page); +@@ -463,7 +463,7 @@ static size_t sg_copy_buffer(struct scat + + sg_miter_start(&miter, sgl, nents, sg_flags); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; +@@ -480,7 +480,7 @@ static size_t sg_copy_buffer(struct scat + + sg_miter_stop(&miter); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return offset; + } + +Index: linux-3.2/net/mac80211/rx.c +=================================================================== +--- linux-3.2.orig/net/mac80211/rx.c ++++ linux-3.2/net/mac80211/rx.c +@@ -2949,7 +2949,7 @@ void ieee80211_rx(struct ieee80211_hw *h + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band < 0 || + status->band >= IEEE80211_NUM_BANDS)) +Index: linux-3.2/include/linux/page_cgroup.h +=================================================================== +--- linux-3.2.orig/include/linux/page_cgroup.h ++++ linux-3.2/include/linux/page_cgroup.h +@@ -30,6 +30,10 @@ enum { + */ + struct page_cgroup { + unsigned long flags; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t pcg_lock; ++ spinlock_t pcm_lock; ++#endif + struct mem_cgroup *mem_cgroup; + struct list_head lru; /* per cgroup LRU list */ + }; +@@ -96,30 +100,54 @@ static inline void lock_page_cgroup(stru + * Don't take this lock in IRQ context. + * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION + */ ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(PCG_LOCK, &pc->flags); ++#else ++ spin_lock(&pc->pcg_lock); ++#endif + } + + static inline void unlock_page_cgroup(struct page_cgroup *pc) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(PCG_LOCK, &pc->flags); ++#else ++ spin_unlock(&pc->pcg_lock); ++#endif + } + + static inline void move_lock_page_cgroup(struct page_cgroup *pc, + unsigned long *flags) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + /* + * We know updates to pc->flags of page cache's stats are from both of + * usual context or IRQ context. Disable IRQ to avoid deadlock. + */ + local_irq_save(*flags); + bit_spin_lock(PCG_MOVE_LOCK, &pc->flags); ++#else ++ spin_lock_irqsave(&pc->pcm_lock, *flags); ++#endif + } + + static inline void move_unlock_page_cgroup(struct page_cgroup *pc, + unsigned long *flags) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags); + local_irq_restore(*flags); ++#else ++ spin_unlock_irqrestore(&pc->pcm_lock, *flags); ++#endif ++} ++ ++static inline void page_cgroup_lock_init(struct page_cgroup *pc) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&pc->pcg_lock); ++ spin_lock_init(&pc->pcm_lock); ++#endif + } + + #ifdef CONFIG_SPARSEMEM +Index: linux-3.2/mm/page_cgroup.c +=================================================================== +--- linux-3.2.orig/mm/page_cgroup.c ++++ linux-3.2/mm/page_cgroup.c +@@ -17,6 +17,7 @@ static void __meminit init_page_cgroup(s + set_page_cgroup_array_id(pc, id); + pc->mem_cgroup = NULL; + INIT_LIST_HEAD(&pc->lru); ++ page_cgroup_lock_init(pc); + } + static unsigned long total_usage; + +Index: linux-3.2/fs/buffer.c +=================================================================== +--- linux-3.2.orig/fs/buffer.c ++++ linux-3.2/fs/buffer.c +@@ -331,8 +331,7 @@ static void end_buffer_async_read(struct + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -345,8 +344,7 @@ static void end_buffer_async_read(struct + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -358,9 +356,7 @@ static void end_buffer_async_read(struct + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -394,8 +390,7 @@ void end_buffer_async_write(struct buffe + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -407,15 +402,12 @@ void end_buffer_async_write(struct buffe + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3223,6 +3215,7 @@ struct buffer_head *alloc_buffer_head(gf + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +Index: linux-3.2/fs/ntfs/aops.c +=================================================================== +--- linux-3.2.orig/fs/ntfs/aops.c ++++ linux-3.2/fs/ntfs/aops.c +@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(s + recs = PAGE_CACHE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); +@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +Index: linux-3.2/include/linux/buffer_head.h +=================================================================== +--- linux-3.2.orig/include/linux/buffer_head.h ++++ linux-3.2/include/linux/buffer_head.h +@@ -72,8 +72,52 @@ struct buffer_head { + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. +Index: linux-3.2/include/linux/jbd_common.h +=================================================================== +--- linux-3.2.orig/include/linux/jbd_common.h ++++ linux-3.2/include/linux/jbd_common.h +@@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #endif +Index: linux-3.2/lib/Kconfig.debug +=================================================================== +--- linux-3.2.orig/lib/Kconfig.debug ++++ linux-3.2/lib/Kconfig.debug +@@ -62,6 +62,28 @@ config MAGIC_SYSRQ + keys are documented in . Don't say Y + unless you really know what this hack does. + ++config MAGIC_SYSRQ_FORCE_PRINTK ++ bool "Force printk from Magic SysRq" ++ depends on MAGIC_SYSRQ && PREEMPT_RT_FULL ++ default n ++ help ++ Allow the output from Magic SysRq to be output immediately, even if ++ this causes large latencies. This can cause performance problems ++ for real-time processes. ++ ++ If PREEMPT_RT_FULL, printk() will not try to acquire the console lock ++ when interrupts or preemption are disabled. If the console lock is ++ not acquired the printk() output will be buffered, but will not be ++ output immediately. Some drivers call into the Magic SysRq code ++ with interrupts or preemption disabled, so the output of Magic SysRq ++ will be buffered instead of printing immediately if this option is ++ not selected. ++ ++ Even with this option selected, Magic SysRq output will be delayed ++ if the attempt to acquire the console lock fails. ++ ++ Don't say Y unless you really know what this hack does. ++ + config MAGIC_SYSRQ_DEFAULT_MASK + hex "Default mask for Magic SysRq keys on the console" + depends on MAGIC_SYSRQ +@@ -151,7 +173,7 @@ config DEBUG_KERNEL + + config DEBUG_SHIRQ + bool "Debug shared IRQ handlers" +- depends on DEBUG_KERNEL && GENERIC_HARDIRQS ++ depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE + help + Enable this to generate a spurious interrupt as soon as a shared + interrupt handler is registered, and just before one is deregistered. +Index: linux-3.2/kernel/irq/handle.c +=================================================================== +--- linux-3.2.orig/kernel/irq/handle.c ++++ linux-3.2/kernel/irq/handle.c +@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc + action = action->next; + } while (action); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ /* FIXME: Can we unbreak that ? */ + if (random & IRQF_SAMPLE_RANDOM) + add_interrupt_randomness(irq); ++#endif + + if (!noirqdebug) + note_interrupt(irq, desc, retval); +Index: linux-3.2/kernel/irq/spurious.c +=================================================================== +--- linux-3.2.orig/kernel/irq/spurious.c ++++ linux-3.2/kernel/irq/spurious.c +@@ -339,6 +339,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ printk(KERN_WARNING "irqfixup boot option not supported " ++ "w/ CONFIG_PREEMPT_RT\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -351,6 +356,11 @@ module_param(irqfixup, int, 0644); + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ printk(KERN_WARNING "irqpoll boot option not supported " ++ "w/ CONFIG_PREEMPT_RT\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); +Index: linux-3.2/kernel/irq/manage.c +=================================================================== +--- linux-3.2.orig/kernel/irq/manage.c ++++ linux-3.2/kernel/irq/manage.c +@@ -18,6 +18,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + + static int __init setup_forced_irqthreads(char *arg) +@@ -26,6 +27,7 @@ static int __init setup_forced_irqthread + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + /** +Index: linux-3.2/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ linux-3.2/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -2236,11 +2236,8 @@ static netdev_tx_t atl1c_xmit_frame(stru + } + + tpd_req = atl1c_cal_tpd_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { +- if (netif_msg_pktdata(adapter)) +- dev_info(&adapter->pdev->dev, "tx locked\n"); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&adapter->tx_lock, flags); ++ + if (skb->mark == 0x01) + type = atl1c_trans_high; + else +Index: linux-3.2/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ++++ linux-3.2/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +@@ -1819,8 +1819,7 @@ static netdev_tx_t atl1e_xmit_frame(stru + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) +- return NETDEV_TX_LOCKED; ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ +Index: linux-3.2/drivers/net/ethernet/chelsio/cxgb/sge.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/chelsio/cxgb/sge.c ++++ linux-3.2/drivers/net/ethernet/chelsio/cxgb/sge.c +@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + +- if (!spin_trylock(&q->lock)) +- return NETDEV_TX_LOCKED; ++ spin_lock(&q->lock); + + reclaim_completed_tx(sge, q); + +Index: linux-3.2/drivers/net/ethernet/neterion/s2io.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/neterion/s2io.c ++++ linux-3.2/drivers/net/ethernet/neterion/s2io.c +@@ -4090,12 +4090,7 @@ static netdev_tx_t s2io_xmit(struct sk_b + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + +- if (do_spin_lock) +- spin_lock_irqsave(&fifo->tx_lock, flags); +- else { +- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&fifo->tx_lock, flags); + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { +Index: linux-3.2/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ linux-3.2/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -1931,10 +1931,9 @@ static int pch_gbe_xmit_frame(struct sk_ + adapter->stats.tx_length_errors++; + return NETDEV_TX_OK; + } +- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { +- /* Collision - tell upper layer to requeue */ +- return NETDEV_TX_LOCKED; +- } ++ ++ spin_lock_irqsave(&tx_ring->tx_lock, flags); ++ + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); +Index: linux-3.2/drivers/net/ethernet/tehuti/tehuti.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/tehuti/tehuti.c ++++ linux-3.2/drivers/net/ethernet/tehuti/tehuti.c +@@ -1605,13 +1605,8 @@ static netdev_tx_t bdx_tx_transmit(struc + unsigned long flags; + + ENTER; +- local_irq_save(flags); +- if (!spin_trylock(&priv->tx_lock)) { +- local_irq_restore(flags); +- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", +- BDX_DRV_NAME, ndev->name); +- return NETDEV_TX_LOCKED; +- } ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ +Index: linux-3.2/drivers/net/rionet.c +=================================================================== +--- linux-3.2.orig/drivers/net/rionet.c ++++ linux-3.2/drivers/net/rionet.c +@@ -176,11 +176,7 @@ static int rionet_start_xmit(struct sk_b + u16 destid; + unsigned long flags; + +- local_irq_save(flags); +- if (!spin_trylock(&rnet->tx_lock)) { +- local_irq_restore(flags); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&rnet->tx_lock, flags); + + if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { + netif_stop_queue(ndev); +Index: linux-3.2/drivers/net/ethernet/3com/3c59x.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/3com/3c59x.c ++++ linux-3.2/drivers/net/ethernet/3com/3c59x.c +@@ -843,9 +843,9 @@ static void poll_vortex(struct net_devic + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1921,12 +1921,12 @@ static void vortex_tx_timeout(struct net + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + +Index: linux-3.2/drivers/net/ethernet/freescale/gianfar.c +=================================================================== +--- linux-3.2.orig/drivers/net/ethernet/freescale/gianfar.c ++++ linux-3.2/drivers/net/ethernet/freescale/gianfar.c +@@ -1671,7 +1671,7 @@ void stop_gfar(struct net_device *dev) + + + /* Lock it down */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -1679,7 +1679,7 @@ void stop_gfar(struct net_device *dev) + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { +@@ -2949,7 +2949,7 @@ static void adjust_link(struct net_devic + struct phy_device *phydev = priv->phydev; + int new_state = 0; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + if (phydev->link) { +@@ -3016,7 +3016,7 @@ static void adjust_link(struct net_devic + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + /* Update the hash table based on the current list of multicast +Index: linux-3.2/drivers/usb/host/ohci-hcd.c +=================================================================== +--- linux-3.2.orig/drivers/usb/host/ohci-hcd.c ++++ linux-3.2/drivers/usb/host/ohci-hcd.c +@@ -833,9 +833,13 @@ static irqreturn_t ohci_irq (struct usb_ + } + + if (ints & OHCI_INTR_WDH) { +- spin_lock (&ohci->lock); +- dl_done_list (ohci); +- spin_unlock (&ohci->lock); ++ if (ohci->hcca->done_head == 0) { ++ ints &= ~OHCI_INTR_WDH; ++ } else { ++ spin_lock (&ohci->lock); ++ dl_done_list (ohci); ++ spin_unlock (&ohci->lock); ++ } + } + + if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) { +Index: linux-3.2/include/linux/percpu.h +=================================================================== +--- linux-3.2.orig/include/linux/percpu.h ++++ linux-3.2/include/linux/percpu.h +@@ -48,6 +48,31 @@ + preempt_enable(); \ + } while (0) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define get_local_var(var) get_cpu_var(var) ++# define put_local_var(var) put_cpu_var(var) ++# define get_local_ptr(var) get_cpu_ptr(var) ++# define put_local_ptr(var) put_cpu_ptr(var) ++#else ++# define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ &__get_cpu_var(var); })) ++ ++# define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++#endif ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +Index: linux-3.2/include/linux/locallock.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/locallock.h +@@ -0,0 +1,230 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __get_cpu_var(lvar).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ &__get_cpu_var(var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar) ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif +Index: linux-3.2/include/linux/smp.h +=================================================================== +--- linux-3.2.orig/include/linux/smp.h ++++ linux-3.2/include/linux/smp.h +@@ -80,7 +80,6 @@ void __smp_call_function_single(int cpui + + int smp_call_function_any(const struct cpumask *mask, + smp_call_func_t func, void *info, int wait); +- + /* + * Generic and arch helpers + */ +@@ -173,6 +172,9 @@ smp_call_function_any(const struct cpuma + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: +Index: linux-3.2/mm/swap.c +=================================================================== +--- linux-3.2.orig/mm/swap.c ++++ linux-3.2/mm/swap.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_ + static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); + static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); + ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++static DEFINE_LOCAL_IRQ_LOCK(swap_lock); ++ + /* + * This path almost never happens for VM activity - pages are normally + * freed via pagevecs. But it gets used by networking. +@@ -267,11 +271,11 @@ void rotate_reclaimable_page(struct page + unsigned long flags; + + page_cache_get(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = &__get_cpu_var(lru_rotate_pvecs); + if (!pagevec_add(pvec, page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -327,12 +331,13 @@ static void activate_page_drain(int cpu) + void activate_page(struct page *page) + { + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swap_lock, ++ activate_page_pvecs); + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swap_lock, activate_page_pvecs); + } + } + +@@ -373,12 +378,12 @@ EXPORT_SYMBOL(mark_page_accessed); + + void __lru_cache_add(struct page *page, enum lru_list lru) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; ++ struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru]; + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + ____pagevec_lru_add(pvec, lru); +- put_cpu_var(lru_add_pvecs); ++ put_locked_var(swap_lock, lru_add_pvecs); + } + EXPORT_SYMBOL(__lru_cache_add); + +@@ -512,9 +517,9 @@ static void drain_cpu_pagevecs(int cpu) + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + + pvec = &per_cpu(lru_deactivate_pvecs, cpu); +@@ -542,18 +547,19 @@ void deactivate_page(struct page *page) + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); ++ struct pagevec *pvec = &get_locked_var(swap_lock, ++ lru_deactivate_pvecs); + + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); +- put_cpu_var(lru_deactivate_pvecs); ++ put_locked_var(swap_lock, lru_deactivate_pvecs); + } + } + + void lru_add_drain(void) + { +- drain_cpu_pagevecs(get_cpu()); +- put_cpu(); ++ drain_cpu_pagevecs(local_lock_cpu(swap_lock)); ++ local_unlock_cpu(swap_lock); + } + + static void lru_add_drain_per_cpu(struct work_struct *dummy) +@@ -783,6 +789,9 @@ void __init swap_setup(void) + { + unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); + ++ local_irq_lock_init(rotate_lock); ++ local_irq_lock_init(swap_lock); ++ + #ifdef CONFIG_SWAP + bdi_init(swapper_space.backing_dev_info); + #endif +Index: linux-3.2/mm/vmscan.c +=================================================================== +--- linux-3.2.orig/mm/vmscan.c ++++ linux-3.2/mm/vmscan.c +@@ -1344,8 +1344,8 @@ static int too_many_isolated(struct zone + */ + static noinline_for_stack void + putback_lru_pages(struct zone *zone, struct scan_control *sc, +- unsigned long nr_anon, unsigned long nr_file, +- struct list_head *page_list) ++ unsigned long nr_anon, unsigned long nr_file, ++ struct list_head *page_list, unsigned long nr_reclaimed) + { + struct page *page; + struct pagevec pvec; +@@ -1356,7 +1356,12 @@ putback_lru_pages(struct zone *zone, str + /* + * Put back any unfreeable pages. + */ +- spin_lock(&zone->lru_lock); ++ spin_lock_irq(&zone->lru_lock); ++ ++ if (current_is_kswapd()) ++ __count_vm_events(KSWAPD_STEAL, nr_reclaimed); ++ __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); ++ + while (!list_empty(page_list)) { + int lru; + page = lru_to_page(page_list); +@@ -1539,12 +1544,7 @@ shrink_inactive_list(unsigned long nr_to + priority, &nr_dirty, &nr_writeback); + } + +- local_irq_disable(); +- if (current_is_kswapd()) +- __count_vm_events(KSWAPD_STEAL, nr_reclaimed); +- __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); +- +- putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); ++ putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list, nr_reclaimed); + + /* + * If reclaim is isolating dirty pages under writeback, it implies +Index: linux-3.2/include/linux/vmstat.h +=================================================================== +--- linux-3.2.orig/include/linux/vmstat.h ++++ linux-3.2/include/linux/vmstat.h +@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v + + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + __this_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -39,7 +41,9 @@ static inline void count_vm_event(enum v + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + __this_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +Index: linux-3.2/mm/vmstat.c +=================================================================== +--- linux-3.2.orig/mm/vmstat.c ++++ linux-3.2/mm/vmstat.c +@@ -216,6 +216,7 @@ void __mod_zone_page_state(struct zone * + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -225,6 +226,7 @@ void __mod_zone_page_state(struct zone * + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -257,6 +259,7 @@ void __inc_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -265,6 +268,7 @@ void __inc_zone_state(struct zone *zone, + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -279,6 +283,7 @@ void __dec_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -287,6 +292,7 @@ void __dec_zone_state(struct zone *zone, + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +Index: linux-3.2/include/linux/mm.h +=================================================================== +--- linux-3.2.orig/include/linux/mm.h ++++ linux-3.2/include/linux/mm.h +@@ -1194,27 +1194,59 @@ static inline pmd_t *pmd_alloc(struct mm + * overflow into the next struct page (as it might with DEBUG_SPINLOCK). + * When freeing, reset page->mapping so free_pages_check won't complain. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #define __pte_lockptr(page) &((page)->ptl) +-#define pte_lock_init(_page) do { \ +- spin_lock_init(__pte_lockptr(_page)); \ +-} while (0) ++ ++static inline struct page *pte_lock_init(struct page *page) ++{ ++ spin_lock_init(__pte_lockptr(page)); ++ return page; ++} ++ + #define pte_lock_deinit(page) ((page)->mapping = NULL) ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the ++ * page frame, hence it only has a pointer and we need to dynamically ++ * allocate the lock when we allocate PTE-pages. ++ * ++ * This is an overall win, since only a small fraction of the pages ++ * will be PTE pages under normal circumstances. ++ */ ++ ++#define __pte_lockptr(page) ((page)->ptl) ++ ++extern struct page *pte_lock_init(struct page *page); ++extern void pte_lock_deinit(struct page *page); ++ ++#endif /* PREEMPT_RT_FULL */ ++ + #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) + #else /* !USE_SPLIT_PTLOCKS */ + /* + * We use mm->page_table_lock to guard all pagetable pages of the mm. + */ +-#define pte_lock_init(page) do {} while (0) ++static inline struct page *pte_lock_init(struct page *page) { return page; } + #define pte_lock_deinit(page) do {} while (0) + #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) + #endif /* USE_SPLIT_PTLOCKS */ + +-static inline void pgtable_page_ctor(struct page *page) ++static inline struct page *__pgtable_page_ctor(struct page *page) + { +- pte_lock_init(page); +- inc_zone_page_state(page, NR_PAGETABLE); ++ page = pte_lock_init(page); ++ if (page) ++ inc_zone_page_state(page, NR_PAGETABLE); ++ return page; + } + ++#define pgtable_page_ctor(page) \ ++do { \ ++ page = __pgtable_page_ctor(page); \ ++} while (0) ++ + static inline void pgtable_page_dtor(struct page *page) + { + pte_lock_deinit(page); +Index: linux-3.2/include/linux/mm_types.h +=================================================================== +--- linux-3.2.orig/include/linux/mm_types.h ++++ linux-3.2/include/linux/mm_types.h +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -118,7 +119,11 @@ struct page { + * system if PG_buddy is set. + */ + #if USE_SPLIT_PTLOCKS +- spinlock_t ptl; ++# ifndef CONFIG_PREEMPT_RT_FULL ++ spinlock_t ptl; ++# else ++ spinlock_t *ptl; ++# endif + #endif + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ +@@ -389,6 +394,9 @@ struct mm_struct { + #ifdef CONFIG_CPUMASK_OFFSTACK + struct cpumask cpumask_allocation; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + }; + + static inline void mm_init_cpumask(struct mm_struct *mm) +Index: linux-3.2/init/Kconfig +=================================================================== +--- linux-3.2.orig/init/Kconfig ++++ linux-3.2/init/Kconfig +@@ -1239,6 +1239,7 @@ config SLAB + + config SLUB + bool "SLUB (Unqueued Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLUB is a slab allocator that minimizes cache line usage + instead of managing queues of cached objects (SLAB approach). +@@ -1250,6 +1251,7 @@ config SLUB + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but +Index: linux-3.2/include/linux/radix-tree.h +=================================================================== +--- linux-3.2.orig/include/linux/radix-tree.h ++++ linux-3.2/include/linux/radix-tree.h +@@ -231,7 +231,13 @@ unsigned long radix_tree_next_hole(struc + unsigned long index, unsigned long max_scan); + unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + int radix_tree_preload(gfp_t gfp_mask); ++#else ++static inline int radix_tree_preload(gfp_t gm) { return 0; } ++#endif ++ + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -256,7 +262,7 @@ unsigned long radix_tree_locate_item(str + + static inline void radix_tree_preload_end(void) + { +- preempt_enable(); ++ preempt_enable_nort(); + } + + #endif /* _LINUX_RADIX_TREE_H */ +Index: linux-3.2/lib/radix-tree.c +=================================================================== +--- linux-3.2.orig/lib/radix-tree.c ++++ linux-3.2/lib/radix-tree.c +@@ -166,12 +166,13 @@ radix_tree_node_alloc(struct radix_tree_ + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = &__get_cpu_var(radix_tree_preloads); ++ rtp = &get_cpu_var(radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes[rtp->nr - 1]; + rtp->nodes[rtp->nr - 1] = NULL; + rtp->nr--; + } ++ put_cpu_var(radix_tree_preloads); + } + if (ret == NULL) + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); +@@ -206,6 +207,7 @@ radix_tree_node_free(struct radix_tree_n + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail. On +@@ -240,6 +242,7 @@ out: + return ret; + } + EXPORT_SYMBOL(radix_tree_preload); ++#endif + + /* + * Return the maximum key which can be store into a +Index: linux-3.2/kernel/panic.c +=================================================================== +--- linux-3.2.orig/kernel/panic.c ++++ linux-3.2/kernel/panic.c +@@ -334,9 +334,11 @@ static u64 oops_id; + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; +Index: linux-3.2/ipc/mqueue.c +=================================================================== +--- linux-3.2.orig/ipc/mqueue.c ++++ linux-3.2/ipc/mqueue.c +@@ -820,12 +820,17 @@ static inline void pipelined_send(struct + struct msg_msg *message, + struct ext_wait_queue *receiver) + { ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); + receiver->msg = message; + list_del(&receiver->list); + receiver->state = STATE_PENDING; + wake_up_process(receiver->task); + smp_wmb(); + receiver->state = STATE_READY; ++ preempt_enable_rt(); + } + + /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() +@@ -839,15 +844,19 @@ static inline void pipelined_receive(str + wake_up_interruptible(&info->wait_q); + return; + } ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); + msg_insert(sender->msg, info); + list_del(&sender->list); + sender->state = STATE_PENDING; + wake_up_process(sender->task); + smp_wmb(); + sender->state = STATE_READY; ++ preempt_enable_rt(); + } +- +-SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, ++ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct timespec __user *, u_abs_timeout) + { +Index: linux-3.2/ipc/msg.c +=================================================================== +--- linux-3.2.orig/ipc/msg.c ++++ linux-3.2/ipc/msg.c +@@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue + while (tmp != &msq->q_receivers) { + struct msg_receiver *msr; + ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); ++ + msr = list_entry(tmp, struct msg_receiver, r_list); + tmp = tmp->next; + msr->r_msg = NULL; + wake_up_process(msr->r_tsk); + smp_mb(); + msr->r_msg = ERR_PTR(res); ++ ++ preempt_enable_rt(); + } + } + +@@ -611,6 +619,12 @@ static inline int pipelined_send(struct + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { + ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); ++ + list_del(&msr->r_list); + if (msr->r_maxsize < msg->m_ts) { + msr->r_msg = NULL; +@@ -624,9 +638,11 @@ static inline int pipelined_send(struct + wake_up_process(msr->r_tsk); + smp_mb(); + msr->r_msg = msg; ++ preempt_enable_rt(); + + return 1; + } ++ preempt_enable_rt(); + } + } + return 0; +Index: linux-3.2/kernel/relay.c +=================================================================== +--- linux-3.2.orig/kernel/relay.c ++++ linux-3.2/kernel/relay.c +@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long + { + struct rchan_buf *buf = (struct rchan_buf *)data; + wake_up_interruptible(&buf->read_wait); ++ /* ++ * Stupid polling for now: ++ */ ++ mod_timer(&buf->timer, jiffies + 1); + } + + /** +@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_b + init_waitqueue_head(&buf->read_wait); + kref_init(&buf->kref); + setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); ++ mod_timer(&buf->timer, jiffies + 1); + } else + del_timer_sync(&buf->timer); + +@@ -733,15 +738,6 @@ size_t relay_switch_subbuf(struct rchan_ + else + buf->early_bytes += buf->chan->subbuf_size - + buf->padding[old_subbuf]; +- smp_mb(); +- if (waitqueue_active(&buf->read_wait)) +- /* +- * Calling wake_up_interruptible() from here +- * will deadlock if we happen to be logging +- * from the scheduler (trying to re-grab +- * rq->lock), so defer it. +- */ +- mod_timer(&buf->timer, jiffies + 1); + } + + old = buf->data; +Index: linux-3.2/net/ipv4/route.c +=================================================================== +--- linux-3.2.orig/net/ipv4/route.c ++++ linux-3.2/net/ipv4/route.c +@@ -245,7 +245,7 @@ struct rt_hash_bucket { + }; + + #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ +- defined(CONFIG_PROVE_LOCKING) ++ defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT_FULL) + /* + * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks + * The size of this table is a power of two and depends on the number of CPUS. +Index: linux-3.2/include/linux/timer.h +=================================================================== +--- linux-3.2.orig/include/linux/timer.h ++++ linux-3.2/include/linux/timer.h +@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +Index: linux-3.2/kernel/timer.c +=================================================================== +--- linux-3.2.orig/kernel/timer.c ++++ linux-3.2/kernel/timer.c +@@ -75,6 +75,7 @@ struct tvec_root { + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++ wait_queue_head_t wait_for_running_timer; + unsigned long timer_jiffies; + unsigned long next_timer; + struct tvec_root tv1; +@@ -653,6 +654,36 @@ static struct tvec_base *lock_timer_base + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* See the comment in lock_timer_base() */ ++ timer_set_base(timer, NULL); ++ spin_unlock(&old->lock); ++ spin_lock(&new->lock); ++ timer_set_base(timer, new); ++ return new; ++} ++#else ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* ++ * We cannot do the above because we might be preempted and ++ * then the preempter would see NULL and loop forever. ++ */ ++ if (spin_trylock(&new->lock)) { ++ timer_set_base(timer, new); ++ spin_unlock(&old->lock); ++ return new; ++ } ++ return old; ++} ++#endif ++ + static inline int + __mod_timer(struct timer_list *timer, unsigned long expires, + bool pending_only, int pinned) +@@ -679,12 +710,15 @@ __mod_timer(struct timer_list *timer, un + + debug_activate(timer, expires); + ++ preempt_disable_rt(); + cpu = smp_processor_id(); + + #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) + cpu = get_nohz_timer_target(); + #endif ++ preempt_enable_rt(); ++ + new_base = per_cpu(tvec_bases, cpu); + + if (base != new_base) { +@@ -695,14 +729,8 @@ __mod_timer(struct timer_list *timer, un + * handler yet has not finished. This also guarantees that + * the timer is serialized wrt itself. + */ +- if (likely(base->running_timer != timer)) { +- /* See the comment in lock_timer_base() */ +- timer_set_base(timer, NULL); +- spin_unlock(&base->lock); +- base = new_base; +- spin_lock(&base->lock); +- timer_set_base(timer, base); +- } ++ if (likely(base->running_timer != timer)) ++ base = switch_timer_base(timer, base, new_base); + } + + timer->expires = expires; +@@ -885,6 +913,29 @@ void add_timer_on(struct timer_list *tim + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct tvec_base *base = timer->base; ++ ++ if (base->running_timer == timer) ++ wait_event(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactive a timer. + * @timer: the timer to be deactivated +@@ -953,7 +1004,7 @@ out: + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1013,7 +1064,7 @@ int del_timer_sync(struct timer_list *ti + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1124,10 +1175,11 @@ static inline void __run_timers(struct t + + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock_irq(&base->lock); + } + } +- base->running_timer = NULL; ++ wake_up(&base->wait_for_running_timer); + spin_unlock_irq(&base->lock); + } + +@@ -1266,6 +1318,23 @@ unsigned long get_next_timer_interrupt(u + */ + if (cpu_is_offline(smp_processor_id())) + return now + NEXT_TIMER_MAX_DELTA; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * On PREEMPT_RT we cannot sleep here. If the trylock does not ++ * succeed then we return the worst-case 'expires in 1 tick' ++ * value. We use the rt functions here directly to avoid a ++ * migrate_disable() call. ++ */ ++ if (spin_do_trylock(&base->lock)) { ++ if (time_before_eq(base->next_timer, base->timer_jiffies)) ++ base->next_timer = __next_timer_interrupt(base); ++ expires = base->next_timer; ++ rt_spin_unlock(&base->lock); ++ } else { ++ expires = now + 1; ++ } ++#else + spin_lock(&base->lock); + if (time_before_eq(base->next_timer, base->timer_jiffies)) + base->next_timer = __next_timer_interrupt(base); +@@ -1274,7 +1343,7 @@ unsigned long get_next_timer_interrupt(u + + if (time_before_eq(expires, now)) + return now; +- ++#endif + return cmp_next_hrtimer_event(now, expires); + } + #endif +@@ -1290,14 +1359,13 @@ void update_process_times(int user_tick) + + /* Note: this timer irq context must be accounted for as well. */ + account_process_tick(p, user_tick); ++ scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); +- printk_tick(); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) + if (in_irq()) + irq_work_run(); + #endif +- scheduler_tick(); + run_posix_cpu_timers(p); + } + +@@ -1308,6 +1376,11 @@ static void run_timer_softirq(struct sof + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++ irq_work_run(); ++#endif ++ ++ printk_tick(); + hrtimer_run_pending(); + + if (time_after_eq(jiffies, base->timer_jiffies)) +@@ -1634,6 +1707,7 @@ static int __cpuinit init_timers_cpu(int + } + + spin_lock_init(&base->lock); ++ init_waitqueue_head(&base->wait_for_running_timer); + + for (j = 0; j < TVN_SIZE; j++) { + INIT_LIST_HEAD(base->tv5.vec + j); +@@ -1669,6 +1743,7 @@ static void __cpuinit migrate_timers(int + { + struct tvec_base *old_base; + struct tvec_base *new_base; ++ unsigned long flags; + int i; + + BUG_ON(cpu_online(cpu)); +@@ -1678,8 +1753,11 @@ static void __cpuinit migrate_timers(int + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. + */ +- spin_lock_irq(&new_base->lock); +- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); ++ local_irq_save(flags); ++ while (!spin_trylock(&new_base->lock)) ++ cpu_relax(); ++ while (!spin_trylock(&old_base->lock)) ++ cpu_relax(); + + BUG_ON(old_base->running_timer); + +@@ -1693,7 +1771,9 @@ static void __cpuinit migrate_timers(int + } + + spin_unlock(&old_base->lock); +- spin_unlock_irq(&new_base->lock); ++ spin_unlock(&new_base->lock); ++ local_irq_restore(flags); ++ + put_cpu_var(tvec_bases); + } + #endif /* CONFIG_HOTPLUG_CPU */ +Index: linux-3.2/include/linux/hrtimer.h +=================================================================== +--- linux-3.2.orig/include/linux/hrtimer.h ++++ linux-3.2/include/linux/hrtimer.h +@@ -111,6 +111,8 @@ struct hrtimer { + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++ struct list_head cb_entry; ++ int irqsafe; + #ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; +@@ -147,6 +149,7 @@ struct hrtimer_clock_base { + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t resolution; + ktime_t (*get_time)(void); + ktime_t softirq_time; +@@ -187,6 +190,9 @@ struct hrtimer_cpu_base { + unsigned long nr_hangs; + ktime_t max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + }; + +@@ -374,6 +380,13 @@ static inline int hrtimer_restart(struct + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif ++ + /* Query timers: */ + extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); + extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); +Index: linux-3.2/kernel/itimer.c +=================================================================== +--- linux-3.2.orig/kernel/itimer.c ++++ linux-3.2/kernel/itimer.c +@@ -214,6 +214,7 @@ again: + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_wait_for_timer(&tsk->signal->real_timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +Index: linux-3.2/kernel/posix-cpu-timers.c +=================================================================== +--- linux-3.2.orig/kernel/posix-cpu-timers.c ++++ linux-3.2/kernel/posix-cpu-timers.c +@@ -701,7 +701,7 @@ static int posix_cpu_timer_set(struct k_ + /* + * Disarm any old timer after extracting its expiry time. + */ +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + ret = 0; + old_incr = timer->it.cpu.incr; +@@ -1223,7 +1223,7 @@ void posix_cpu_timer_schedule(struct k_i + /* + * Now re-arm for the new expiry time. + */ +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + arm_timer(timer); + spin_unlock(&p->sighand->siglock); + +@@ -1290,10 +1290,11 @@ static inline int fastpath_timer_check(s + sig = tsk->signal; + if (sig->cputimer.running) { + struct task_cputime group_sample; ++ unsigned long flags; + +- raw_spin_lock(&sig->cputimer.lock); ++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags); + group_sample = sig->cputimer.cputime; +- raw_spin_unlock(&sig->cputimer.lock); ++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); + + if (task_cputime_expired(&group_sample, &sig->cputime_expires)) + return 1; +@@ -1307,13 +1308,13 @@ static inline int fastpath_timer_check(s + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + /* + * The fast path checks that there are no expired thread or thread +@@ -1371,6 +1372,190 @@ void run_posix_cpu_timers(struct task_st + } + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include ++#include ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++ ++static int posix_cpu_timers_thread(void *data) ++{ ++ int cpu = (long)data; ++ ++ BUG_ON(per_cpu(posix_timer_task,cpu) != current); ++ ++ while (!kthread_should_stop()) { ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ if (cpu_is_offline(cpu)) ++ goto wait_to_die; ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ __set_current_state(TASK_RUNNING); ++ continue; ++ } ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++ } ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned long cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ if(!per_cpu(posix_timer_task, cpu)) ++ return; ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++ } ++} ++ ++/* ++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int posix_cpu_thread_call(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ struct task_struct *p; ++ struct sched_param param; ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ p = kthread_create(posix_cpu_timers_thread, hcpu, ++ "posixcputmr/%d",cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ p->flags |= PF_NOFREEZE; ++ kthread_bind(p, cpu); ++ /* Must be high prio to avoid getting starved */ ++ param.sched_priority = MAX_RT_PRIO-1; ++ sched_setscheduler(p, SCHED_FIFO, ¶m); ++ per_cpu(posix_timer_task,cpu) = p; ++ break; ++ case CPU_ONLINE: ++ /* Strictly unneccessary, as first user will wake it. */ ++ wake_up_process(per_cpu(posix_timer_task,cpu)); ++ break; ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(per_cpu(posix_timer_task,cpu), ++ any_online_cpu(cpu_online_map)); ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++ case CPU_DEAD: ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block __devinitdata posix_cpu_thread_notifier = { ++ .notifier_call = posix_cpu_thread_call, ++ .priority = 10 ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ void *hcpu = (void *)(long)smp_processor_id(); ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_cpu_mask(cpu, cpu_possible_map) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); ++ register_cpu_notifier(&posix_cpu_thread_notifier); ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. +Index: linux-3.2/include/linux/rcupdate.h +=================================================================== +--- linux-3.2.orig/include/linux/rcupdate.h ++++ linux-3.2/include/linux/rcupdate.h +@@ -92,6 +92,9 @@ extern void call_rcu(struct rcu_head *he + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -112,6 +115,7 @@ extern void call_rcu(struct rcu_head *he + */ + extern void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); ++#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -147,6 +151,11 @@ void synchronize_rcu(void); + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -170,6 +179,8 @@ static inline int rcu_preempt_depth(void + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +@@ -274,7 +285,14 @@ static inline int rcu_read_lock_held(voi + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + extern int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -677,8 +695,12 @@ static inline void rcu_read_unlock(void) + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_read_acquire_bh(); ++#endif + } + + /* +@@ -688,8 +710,12 @@ static inline void rcu_read_lock_bh(void + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + rcu_read_release_bh(); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +Index: linux-3.2/kernel/sched_fair.c +=================================================================== +--- linux-3.2.orig/kernel/sched_fair.c ++++ linux-3.2/kernel/sched_fair.c +@@ -2806,6 +2806,10 @@ balance_tasks(struct rq *this_rq, int th + */ + if (idle == CPU_NEWLY_IDLE) + break; ++ ++ if (raw_spin_is_contended(&this_rq->lock) || ++ raw_spin_is_contended(&busiest->lock)) ++ break; + #endif + + /* +@@ -2946,6 +2950,20 @@ load_balance_fair(struct rq *this_rq, in + rem_load_move -= moved_load; + if (rem_load_move < 0) + break; ++ ++#ifdef CONFIG_PREEMPT ++ /* ++ * NEWIDLE balancing is a source of latency, so preemptible ++ * kernels will stop after the first task is pulled to minimize ++ * the critical section. ++ */ ++ if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) ++ break; ++ ++ if (raw_spin_is_contended(&this_rq->lock) || ++ raw_spin_is_contended(&busiest->lock)) ++ break; ++#endif + } + rcu_read_unlock(); + +Index: linux-3.2/kernel/stop_machine.c +=================================================================== +--- linux-3.2.orig/kernel/stop_machine.c ++++ linux-3.2/kernel/stop_machine.c +@@ -29,12 +29,12 @@ struct cpu_stop_done { + atomic_t nr_todo; /* nr left to execute */ + bool executed; /* actually executed? */ + int ret; /* collected return value */ +- struct completion completion; /* fired if nr_todo reaches 0 */ ++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */ + }; + + /* the actual stopper, one per every possible cpu, enabled on online cpus */ + struct cpu_stopper { +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + struct task_struct *thread; /* stopper thread */ +@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cp + { + memset(done, 0, sizeof(*done)); + atomic_set(&done->nr_todo, nr_todo); +- init_completion(&done->completion); ++ done->waiter = current; + } + + /* signal completion unless @done is NULL */ +@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct + if (done) { + if (executed) + done->executed = true; +- if (atomic_dec_and_test(&done->nr_todo)) +- complete(&done->completion); ++ if (atomic_dec_and_test(&done->nr_todo)) { ++ wake_up_process(done->waiter); ++ done->waiter = NULL; ++ } + } + } + +@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct c + { + unsigned long flags; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + + if (stopper->enabled) { + list_add_tail(&work->list, &stopper->works); +@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct c + } else + cpu_stop_signal_done(work->done, false); + +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); ++} ++ ++static void wait_for_stop_done(struct cpu_stop_done *done) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (atomic_read(&done->nr_todo)) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ /* ++ * We need to wait until cpu_stop_signal_done() has cleared ++ * done->waiter. ++ */ ++ while (done->waiter) ++ cpu_relax(); ++ set_current_state(TASK_RUNNING); + } + + /** +@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s + + cpu_stop_init_done(&done, 1); + cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -135,6 +153,7 @@ void stop_one_cpu_nowait(unsigned int cp + + /* static data for stop_cpus */ + static DEFINE_MUTEX(stop_cpus_mutex); ++static DEFINE_MUTEX(stopper_lock); + static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); + + static void queue_stop_cpus_work(const struct cpumask *cpumask, +@@ -153,15 +172,14 @@ static void queue_stop_cpus_work(const s + } + + /* +- * Disable preemption while queueing to avoid getting +- * preempted by a stopper which might wait for other stoppers +- * to enter @fn which can lead to deadlock. ++ * Make sure that all work is queued on all cpus before we ++ * any of the cpus can execute it. + */ +- preempt_disable(); ++ mutex_lock(&stopper_lock); + for_each_cpu(cpu, cpumask) + cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), + &per_cpu(stop_cpus_work, cpu)); +- preempt_enable(); ++ mutex_unlock(&stopper_lock); + } + + static int __stop_cpus(const struct cpumask *cpumask, +@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpum + + cpu_stop_init_done(&done, cpumask_weight(cpumask)); + queue_stop_cpus_work(cpumask, fn, arg, &done); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -259,13 +277,13 @@ repeat: + } + + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -275,6 +293,16 @@ repeat: + + __set_current_state(TASK_RUNNING); + ++ /* ++ * Wait until the stopper finished scheduling on all ++ * cpus ++ */ ++ mutex_lock(&stopper_lock); ++ /* ++ * Let other cpu threads continue as well ++ */ ++ mutex_unlock(&stopper_lock); ++ + /* cpu stop callbacks are not allowed to sleep */ + preempt_disable(); + +@@ -289,7 +317,13 @@ repeat: + kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, + ksym_buf), arg); + ++ /* ++ * Make sure that the wakeup and setting done->waiter ++ * to NULL is atomic. ++ */ ++ local_irq_disable(); + cpu_stop_signal_done(done, true); ++ local_irq_enable(); + } else + schedule(); + +@@ -317,6 +351,7 @@ static int __cpuinit cpu_stop_cpu_callba + if (IS_ERR(p)) + return notifier_from_errno(PTR_ERR(p)); + get_task_struct(p); ++ p->flags |= PF_STOMPER; + kthread_bind(p, cpu); + sched_set_stop_task(cpu, p); + stopper->thread = p; +@@ -326,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callba + /* strictly unnecessary, as first user will wake it */ + wake_up_process(stopper->thread); + /* mark enabled */ +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + stopper->enabled = true; +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + break; + + #ifdef CONFIG_HOTPLUG_CPU +@@ -341,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callba + /* kill the stopper */ + kthread_stop(stopper->thread); + /* drain remaining works */ +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + list_for_each_entry(work, &stopper->works, list) + cpu_stop_signal_done(work->done, false); + stopper->enabled = false; +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + /* release the stopper */ + put_task_struct(stopper->thread); + stopper->thread = NULL; +@@ -376,7 +411,7 @@ static int __init cpu_stop_init(void) + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + +@@ -570,7 +605,7 @@ int stop_machine_from_inactive_cpu(int ( + ret = stop_machine_cpu_stop(&smdata); + + /* Busy wait for completion. */ +- while (!completion_done(&done.completion)) ++ while (atomic_read(&done.nr_todo)) + cpu_relax(); + + mutex_unlock(&stop_cpus_mutex); +Index: linux-3.2/include/linux/cpu.h +=================================================================== +--- linux-3.2.orig/include/linux/cpu.h ++++ linux-3.2/include/linux/cpu.h +@@ -66,8 +66,10 @@ enum { + /* migration should happen before other stuff but after perf */ + CPU_PRI_PERF = 20, + CPU_PRI_MIGRATION = 10, +- /* prepare workqueues for other notifiers */ +- CPU_PRI_WORKQUEUE = 5, ++ ++ CPU_PRI_WORKQUEUE_ACTIVE = 5, /* prepare workqueues for others */ ++ CPU_PRI_NORMAL = 0, ++ CPU_PRI_WORKQUEUE_INACTIVE = -5, /* flush workqueues after others */ + }; + + #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ +@@ -167,6 +169,8 @@ extern struct sysdev_class cpu_sysdev_cl + + extern void get_online_cpus(void); + extern void put_online_cpus(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) + #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +@@ -189,6 +193,8 @@ static inline void cpu_hotplug_driver_un + + #define get_online_cpus() do { } while (0) + #define put_online_cpus() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ + #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +Index: linux-3.2/kernel/cpu.c +=================================================================== +--- linux-3.2.orig/kernel/cpu.c ++++ linux-3.2/kernel/cpu.c +@@ -58,6 +58,104 @@ static struct { + .refcount = 0, + }; + ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ int refcount; ++ struct completion synced; ++}; ++ ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); ++ ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! ++ */ ++void pin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp; ++ ++retry: ++ hp = &__get_cpu_var(hotplug_pcp); ++ ++ if (!hp->unplug || hp->refcount || preempt_count() > 1 || ++ hp->unplug == current || (current->flags & PF_STOMPER)) { ++ hp->refcount++; ++ return; ++ } ++ preempt_enable(); ++ mutex_lock(&cpu_hotplug.lock); ++ mutex_unlock(&cpu_hotplug.lock); ++ preempt_disable(); ++ goto retry; ++} ++ ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ ++ WARN_ON(hp->refcount <= 0); ++ ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current && ++ !(current->flags & PF_STOMPER)) ++ wake_up_process(hp->unplug); ++} ++ ++/* ++ * FIXME: Is this really correct under all circumstances ? ++ */ ++static int sync_unplug_thread(void *data) ++{ ++ struct hotplug_pcp *hp = data; ++ ++ preempt_disable(); ++ hp->unplug = current; ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ set_current_state(TASK_RUNNING); ++ preempt_enable(); ++ complete(&hp->synced); ++ return 0; ++} ++ ++/* ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. ++ */ ++static int cpu_unplug_begin(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ struct task_struct *tsk; ++ ++ init_completion(&hp->synced); ++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(tsk)) ++ return (PTR_ERR(tsk)); ++ kthread_bind(tsk, cpu); ++ wake_up_process(tsk); ++ wait_for_completion(&hp->synced); ++ return 0; ++} ++ ++static void cpu_unplug_done(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ hp->unplug = NULL; ++} ++ + void get_online_cpus(void) + { + might_sleep(); +@@ -211,13 +309,14 @@ static int __ref take_cpu_down(void *_pa + /* Requires cpu_add_remove_lock to be held */ + static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) + { +- int err, nr_calls = 0; ++ int mycpu, err, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; ++ cpumask_var_t cpumask; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -225,7 +324,26 @@ static int __ref _cpu_down(unsigned int + if (!cpu_online(cpu)) + return -EINVAL; + ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ migrate_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ migrate_enable(); ++ return -EBUSY; ++ } ++ + cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); ++ if (err) { ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; ++ } + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -264,6 +382,9 @@ static int __ref _cpu_down(unsigned int + check_for_tasks(cpu); + + out_release: ++ cpu_unplug_done(cpu); ++out_cancel: ++ migrate_enable(); + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); +Index: linux-3.2/lib/smp_processor_id.c +=================================================================== +--- linux-3.2.orig/lib/smp_processor_id.c ++++ linux-3.2/lib/smp_processor_id.c +@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor + if (!printk_ratelimit()) + goto out_enable; + +- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " +- "code: %s/%d\n", +- preempt_count() - 1, current->comm, current->pid); ++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " ++ "code: %s/%d\n", preempt_count() - 1, ++ __migrate_disabled(current), current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + +Index: linux-3.2/include/linux/ftrace_event.h +=================================================================== +--- linux-3.2.orig/include/linux/ftrace_event.h ++++ linux-3.2/include/linux/ftrace_event.h +@@ -49,7 +49,8 @@ struct trace_entry { + unsigned char flags; + unsigned char preempt_count; + int pid; +- int padding; ++ unsigned short migrate_disable; ++ unsigned short padding; + }; + + #define FTRACE_MAX_EVENT \ +Index: linux-3.2/kernel/trace/trace.c +=================================================================== +--- linux-3.2.orig/kernel/trace/trace.c ++++ linux-3.2/kernel/trace/trace.c +@@ -359,11 +359,13 @@ static DECLARE_DELAYED_WORK(wakeup_work, + */ + void trace_wake_up(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + const unsigned long delay = msecs_to_jiffies(2); + + if (trace_flags & TRACE_ITER_BLOCK) + return; + schedule_delayed_work(&wakeup_work, delay); ++#endif + } + + static int __init set_buf_size(char *str) +@@ -719,6 +721,12 @@ update_max_tr_single(struct trace_array + } + #endif /* CONFIG_TRACER_MAX_TRACE */ + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static void default_wait_pipe(struct trace_iterator *iter); ++#else ++#define default_wait_pipe poll_wait_pipe ++#endif ++ + /** + * register_tracer - register a tracer with the ftrace system. + * @type - the plugin for the tracer +@@ -1123,6 +1131,8 @@ tracing_generic_entry_update(struct trac + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -1850,9 +1860,10 @@ static void print_lat_help_header(struct + seq_puts(m, "# | / _----=> need-resched \n"); + seq_puts(m, "# || / _---=> hardirq/softirq \n"); + seq_puts(m, "# ||| / _--=> preempt-depth \n"); +- seq_puts(m, "# |||| / delay \n"); +- seq_puts(m, "# cmd pid ||||| time | caller \n"); +- seq_puts(m, "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# |||| / _--=> migrate-disable\n"); ++ seq_puts(m, "# ||||| / delay \n"); ++ seq_puts(m, "# cmd pid |||||| time | caller \n"); ++ seq_puts(m, "# \\ / ||||| \\ | / \n"); + } + + static void print_func_help_header(struct seq_file *m) +@@ -3189,6 +3200,7 @@ static int tracing_release_pipe(struct i + return 0; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static unsigned int + tracing_poll_pipe(struct file *filp, poll_table *poll_table) + { +@@ -3210,8 +3222,7 @@ tracing_poll_pipe(struct file *filp, pol + } + } + +- +-void default_wait_pipe(struct trace_iterator *iter) ++static void default_wait_pipe(struct trace_iterator *iter) + { + DEFINE_WAIT(wait); + +@@ -3222,6 +3233,20 @@ void default_wait_pipe(struct trace_iter + + finish_wait(&trace_wait, &wait); + } ++#else ++static unsigned int ++tracing_poll_pipe(struct file *filp, poll_table *poll_table) ++{ ++ struct trace_iterator *iter = filp->private_data; ++ ++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter)) ++ return POLLIN | POLLRDNORM; ++ poll_wait_pipe(iter); ++ if (!trace_empty(iter)) ++ return POLLIN | POLLRDNORM; ++ return 0; ++} ++#endif + + /* + * This is a make-shift waitqueue. +Index: linux-3.2/kernel/trace/trace_events.c +=================================================================== +--- linux-3.2.orig/kernel/trace/trace_events.c ++++ linux-3.2/kernel/trace/trace_events.c +@@ -116,6 +116,7 @@ static int trace_define_common_fields(vo + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned short, migrate_disable); + __common_field(int, padding); + + return ret; +Index: linux-3.2/kernel/trace/trace_output.c +=================================================================== +--- linux-3.2.orig/kernel/trace/trace_output.c ++++ linux-3.2/kernel/trace/trace_output.c +@@ -591,6 +591,11 @@ int trace_print_lat_fmt(struct trace_seq + else + ret = trace_seq_putc(s, '.'); + ++ if (entry->migrate_disable) ++ ret = trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ ret = trace_seq_putc(s, '.'); ++ + return ret; + } + +Index: linux-3.2/kernel/sched_debug.c +=================================================================== +--- linux-3.2.orig/kernel/sched_debug.c ++++ linux-3.2/kernel/sched_debug.c +@@ -235,6 +235,9 @@ void print_rt_rq(struct seq_file *m, int + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++#ifdef CONFIG_SMP ++ P(rt_nr_migratory); ++#endif + + #undef PN + #undef P +@@ -484,6 +487,10 @@ void proc_sched_show_task(struct task_st + P(se.load.weight); + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); ++#endif ++ P(rt.nr_cpus_allowed); + #undef PN + #undef __PN + #undef P +Index: linux-3.2/kernel/trace/trace.h +=================================================================== +--- linux-3.2.orig/kernel/trace/trace.h ++++ linux-3.2/kernel/trace/trace.h +@@ -345,7 +345,6 @@ void trace_init_global_iter(struct trace + + void tracing_iter_reset(struct trace_iterator *iter, int cpu); + +-void default_wait_pipe(struct trace_iterator *iter); + void poll_wait_pipe(struct trace_iterator *iter); + + void ftrace(struct trace_array *tr, +Index: linux-3.2/kernel/trace/ring_buffer.c +=================================================================== +--- linux-3.2.orig/kernel/trace/ring_buffer.c ++++ linux-3.2/kernel/trace/ring_buffer.c +@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu { + int cpu; + atomic_t record_disabled; + struct ring_buffer *buffer; +- raw_spinlock_t reader_lock; /* serialize readers */ ++ spinlock_t reader_lock; /* serialize readers */ + arch_spinlock_t lock; + struct lock_class_key lock_key; + struct list_head *pages; +@@ -1049,6 +1049,44 @@ static int rb_allocate_pages(struct ring + return -ENOMEM; + } + ++static inline int ok_to_lock(void) ++{ ++ if (in_nmi()) ++ return 0; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (in_atomic()) ++ return 0; ++#endif ++ return 1; ++} ++ ++static int ++read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer, ++ unsigned long *flags) ++{ ++ /* ++ * If an NMI die dumps out the content of the ring buffer ++ * do not grab locks. We also permanently disable the ring ++ * buffer too. A one time deal is all you get from reading ++ * the ring buffer from an NMI. ++ */ ++ if (!ok_to_lock()) { ++ if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags)) ++ return 1; ++ tracing_off_permanent(); ++ return 0; ++ } ++ spin_lock_irqsave(&cpu_buffer->reader_lock, *flags); ++ return 1; ++} ++ ++static void ++read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer, ++ unsigned long flags, int locked) ++{ ++ if (locked) ++ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++} + static struct ring_buffer_per_cpu * + rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) + { +@@ -1064,7 +1102,7 @@ rb_allocate_cpu_buffer(struct ring_buffe + + cpu_buffer->cpu = cpu; + cpu_buffer->buffer = buffer; +- raw_spin_lock_init(&cpu_buffer->reader_lock); ++ spin_lock_init(&cpu_buffer->reader_lock); + lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; + +@@ -1259,9 +1297,11 @@ rb_remove_pages(struct ring_buffer_per_c + { + struct buffer_page *bpage; + struct list_head *p; ++ unsigned long flags; + unsigned i; ++ int locked; + +- raw_spin_lock_irq(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + rb_head_page_deactivate(cpu_buffer); + + for (i = 0; i < nr_pages; i++) { +@@ -1279,7 +1319,7 @@ rb_remove_pages(struct ring_buffer_per_c + rb_check_pages(cpu_buffer); + + out: +- raw_spin_unlock_irq(&cpu_buffer->reader_lock); ++ read_buffer_unlock(cpu_buffer, flags, locked); + } + + static void +@@ -1288,9 +1328,11 @@ rb_insert_pages(struct ring_buffer_per_c + { + struct buffer_page *bpage; + struct list_head *p; ++ unsigned long flags; + unsigned i; ++ int locked; + +- raw_spin_lock_irq(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + rb_head_page_deactivate(cpu_buffer); + + for (i = 0; i < nr_pages; i++) { +@@ -1305,7 +1347,7 @@ rb_insert_pages(struct ring_buffer_per_c + rb_check_pages(cpu_buffer); + + out: +- raw_spin_unlock_irq(&cpu_buffer->reader_lock); ++ read_buffer_unlock(cpu_buffer, flags, locked); + } + + /** +@@ -2689,7 +2731,7 @@ unsigned long ring_buffer_oldest_event_t + return 0; + + cpu_buffer = buffer->buffers[cpu]; +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ spin_lock_irqsave(&cpu_buffer->reader_lock, flags); + /* + * if the tail is on reader_page, oldest time stamp is on the reader + * page +@@ -2699,7 +2741,7 @@ unsigned long ring_buffer_oldest_event_t + else + bpage = rb_set_head_page(cpu_buffer); + ret = bpage->page->time_stamp; +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + + return ret; + } +@@ -2863,15 +2905,16 @@ void ring_buffer_iter_reset(struct ring_ + { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; ++ int locked; + + if (!iter) + return; + + cpu_buffer = iter->cpu_buffer; + +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + rb_iter_reset(iter); +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + } + EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); + +@@ -3289,21 +3332,6 @@ rb_iter_peek(struct ring_buffer_iter *it + } + EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); + +-static inline int rb_ok_to_lock(void) +-{ +- /* +- * If an NMI die dumps out the content of the ring buffer +- * do not grab locks. We also permanently disable the ring +- * buffer too. A one time deal is all you get from reading +- * the ring buffer from an NMI. +- */ +- if (likely(!in_nmi())) +- return 1; +- +- tracing_off_permanent(); +- return 0; +-} +- + /** + * ring_buffer_peek - peek at the next event to be read + * @buffer: The ring buffer to read +@@ -3321,22 +3349,17 @@ ring_buffer_peek(struct ring_buffer *buf + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_event *event; + unsigned long flags; +- int dolock; ++ int locked; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return NULL; + +- dolock = rb_ok_to_lock(); + again: +- local_irq_save(flags); +- if (dolock) +- raw_spin_lock(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + event = rb_buffer_peek(cpu_buffer, ts, lost_events); + if (event && event->type_len == RINGBUF_TYPE_PADDING) + rb_advance_reader(cpu_buffer); +- if (dolock) +- raw_spin_unlock(&cpu_buffer->reader_lock); +- local_irq_restore(flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + if (event && event->type_len == RINGBUF_TYPE_PADDING) + goto again; +@@ -3358,11 +3381,12 @@ ring_buffer_iter_peek(struct ring_buffer + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; + struct ring_buffer_event *event; + unsigned long flags; ++ int locked; + + again: +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + event = rb_iter_peek(iter, ts); +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + if (event && event->type_len == RINGBUF_TYPE_PADDING) + goto again; +@@ -3388,9 +3412,7 @@ ring_buffer_consume(struct ring_buffer * + struct ring_buffer_per_cpu *cpu_buffer; + struct ring_buffer_event *event = NULL; + unsigned long flags; +- int dolock; +- +- dolock = rb_ok_to_lock(); ++ int locked; + + again: + /* might be called in atomic */ +@@ -3400,9 +3422,7 @@ ring_buffer_consume(struct ring_buffer * + goto out; + + cpu_buffer = buffer->buffers[cpu]; +- local_irq_save(flags); +- if (dolock) +- raw_spin_lock(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + + event = rb_buffer_peek(cpu_buffer, ts, lost_events); + if (event) { +@@ -3410,9 +3430,8 @@ ring_buffer_consume(struct ring_buffer * + rb_advance_reader(cpu_buffer); + } + +- if (dolock) +- raw_spin_unlock(&cpu_buffer->reader_lock); +- local_irq_restore(flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); ++ + + out: + preempt_enable(); +@@ -3497,17 +3516,18 @@ ring_buffer_read_start(struct ring_buffe + { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; ++ int locked; + + if (!iter) + return; + + cpu_buffer = iter->cpu_buffer; + +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + arch_spin_lock(&cpu_buffer->lock); + rb_iter_reset(iter); + arch_spin_unlock(&cpu_buffer->lock); +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + } + EXPORT_SYMBOL_GPL(ring_buffer_read_start); + +@@ -3541,8 +3561,9 @@ ring_buffer_read(struct ring_buffer_iter + struct ring_buffer_event *event; + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; + unsigned long flags; ++ int locked; + +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + again: + event = rb_iter_peek(iter, ts); + if (!event) +@@ -3553,7 +3574,7 @@ ring_buffer_read(struct ring_buffer_iter + + rb_advance_iter(iter); + out: +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + return event; + } +@@ -3618,13 +3639,14 @@ void ring_buffer_reset_cpu(struct ring_b + { + struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + unsigned long flags; ++ int locked; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return; + + atomic_inc(&cpu_buffer->record_disabled); + +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + + if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) + goto out; +@@ -3636,7 +3658,7 @@ void ring_buffer_reset_cpu(struct ring_b + arch_spin_unlock(&cpu_buffer->lock); + + out: +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + atomic_dec(&cpu_buffer->record_disabled); + } +@@ -3663,22 +3685,16 @@ int ring_buffer_empty(struct ring_buffer + { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; +- int dolock; ++ int locked; + int cpu; + int ret; + +- dolock = rb_ok_to_lock(); +- + /* yes this is racy, but if you don't like the race, lock the buffer */ + for_each_buffer_cpu(buffer, cpu) { + cpu_buffer = buffer->buffers[cpu]; +- local_irq_save(flags); +- if (dolock) +- raw_spin_lock(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + ret = rb_per_cpu_empty(cpu_buffer); +- if (dolock) +- raw_spin_unlock(&cpu_buffer->reader_lock); +- local_irq_restore(flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + if (!ret) + return 0; +@@ -3697,22 +3713,16 @@ int ring_buffer_empty_cpu(struct ring_bu + { + struct ring_buffer_per_cpu *cpu_buffer; + unsigned long flags; +- int dolock; ++ int locked; + int ret; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return 1; + +- dolock = rb_ok_to_lock(); +- + cpu_buffer = buffer->buffers[cpu]; +- local_irq_save(flags); +- if (dolock) +- raw_spin_lock(&cpu_buffer->reader_lock); ++ locked = read_buffer_lock(cpu_buffer, &flags); + ret = rb_per_cpu_empty(cpu_buffer); +- if (dolock) +- raw_spin_unlock(&cpu_buffer->reader_lock); +- local_irq_restore(flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + return ret; + } +@@ -3887,6 +3897,7 @@ int ring_buffer_read_page(struct ring_bu + unsigned int commit; + unsigned int read; + u64 save_timestamp; ++ int locked; + int ret = -1; + + if (!cpumask_test_cpu(cpu, buffer->cpumask)) +@@ -3908,7 +3919,7 @@ int ring_buffer_read_page(struct ring_bu + if (!bpage) + goto out; + +- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); ++ locked = read_buffer_lock(cpu_buffer, &flags); + + reader = rb_get_reader_page(cpu_buffer); + if (!reader) +@@ -4032,7 +4043,7 @@ int ring_buffer_read_page(struct ring_bu + memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); + + out_unlock: +- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); ++ read_buffer_unlock(cpu_buffer, flags, locked); + + out: + return ret; +Index: linux-3.2/kernel/lockdep.c +=================================================================== +--- linux-3.2.orig/kernel/lockdep.c ++++ linux-3.2/kernel/lockdep.c +@@ -3488,6 +3488,7 @@ static void check_flags(unsigned long fl + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3502,6 +3503,7 @@ static void check_flags(unsigned long fl + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); +Index: linux-3.2/kernel/Kconfig.locks +=================================================================== +--- linux-3.2.orig/kernel/Kconfig.locks ++++ linux-3.2/kernel/Kconfig.locks +@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + + config MUTEX_SPIN_ON_OWNER +- def_bool SMP && !DEBUG_MUTEXES ++ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL +Index: linux-3.2/include/linux/hardirq.h +=================================================================== +--- linux-3.2.orig/include/linux/hardirq.h ++++ linux-3.2/include/linux/hardirq.h +@@ -60,7 +60,11 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + #ifndef PREEMPT_ACTIVE + #define PREEMPT_ACTIVE_BITS 1 +@@ -73,10 +77,17 @@ + #endif + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0UL) ++extern int in_serving_softirq(void); ++#endif ++ + /* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? +@@ -86,7 +97,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +Index: linux-3.2/arch/powerpc/kernel/irq.c +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/irq.c ++++ linux-3.2/arch/powerpc/kernel/irq.c +@@ -443,6 +443,7 @@ void irq_ctx_init(void) + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline void do_softirq_onstack(void) + { + struct thread_info *curtp, *irqtp; +@@ -479,7 +480,7 @@ void do_softirq(void) + + local_irq_restore(flags); + } +- ++#endif + + /* + * IRQ controller and virtual interrupts +Index: linux-3.2/arch/powerpc/kernel/misc_32.S +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/misc_32.S ++++ linux-3.2/arch/powerpc/kernel/misc_32.S +@@ -36,6 +36,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq) + lwz r0,4(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_handle_irq) + mflr r0 +Index: linux-3.2/arch/powerpc/kernel/misc_64.S +=================================================================== +--- linux-3.2.orig/arch/powerpc/kernel/misc_64.S ++++ linux-3.2/arch/powerpc/kernel/misc_64.S +@@ -29,6 +29,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq) + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_handle_irq) + ld r8,0(r6) +Index: linux-3.2/arch/sh/kernel/irq.c +=================================================================== +--- linux-3.2.orig/arch/sh/kernel/irq.c ++++ linux-3.2/arch/sh/kernel/irq.c +@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void) + { + unsigned long flags; +@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +Index: linux-3.2/arch/sparc/kernel/irq_64.c +=================================================================== +--- linux-3.2.orig/arch/sparc/kernel/irq_64.c ++++ linux-3.2/arch/sparc/kernel/irq_64.c +@@ -699,6 +699,7 @@ void __irq_entry handler_irq(int pil, st + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq(void) + { + unsigned long flags; +@@ -724,6 +725,7 @@ void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +Index: linux-3.2/arch/x86/kernel/entry_64.S +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/entry_64.S ++++ linux-3.2/arch/x86/kernel/entry_64.S +@@ -1192,6 +1192,7 @@ ENTRY(kernel_execve) + CFI_ENDPROC + END(kernel_execve) + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(call_softirq) + CFI_STARTPROC +@@ -1211,6 +1212,7 @@ ENTRY(call_softirq) + ret + CFI_ENDPROC + END(call_softirq) ++#endif + + #ifdef CONFIG_XEN + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +Index: linux-3.2/arch/x86/kernel/irq_32.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/irq_32.c ++++ linux-3.2/arch/x86/kernel/irq_32.c +@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu) + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void) + { + unsigned long flags; +@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + + bool handle_irq(unsigned irq, struct pt_regs *regs) + { +Index: linux-3.2/arch/x86/kernel/irq_64.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/irq_64.c ++++ linux-3.2/arch/x86/kernel/irq_64.c +@@ -62,7 +62,7 @@ bool handle_irq(unsigned irq, struct pt_ + return true; + } + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + extern void call_softirq(void); + + asmlinkage void do_softirq(void) +@@ -82,3 +82,4 @@ asmlinkage void do_softirq(void) + } + local_irq_restore(flags); + } ++#endif +Index: linux-3.2/drivers/md/raid5.c +=================================================================== +--- linux-3.2.orig/drivers/md/raid5.c ++++ linux-3.2/drivers/md/raid5.c +@@ -1245,8 +1245,9 @@ static void __raid_run_ops(struct stripe + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1298,7 +1299,8 @@ static void __raid_run_ops(struct stripe + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + #ifdef CONFIG_MULTICORE_RAID456 +@@ -4531,6 +4533,7 @@ static int raid5_alloc_percpu(struct r5c + break; + } + per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + } + #ifdef CONFIG_HOTPLUG_CPU + conf->cpu_notify.notifier_call = raid456_cpu_notify; +Index: linux-3.2/drivers/md/raid5.h +=================================================================== +--- linux-3.2.orig/drivers/md/raid5.h ++++ linux-3.2/drivers/md/raid5.h +@@ -405,6 +405,7 @@ struct r5conf { + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address +Index: linux-3.2/include/linux/rtmutex.h +=================================================================== +--- linux-3.2.orig/include/linux/rtmutex.h ++++ linux-3.2/include/linux/rtmutex.h +@@ -14,7 +14,7 @@ + + #include + #include +-#include ++#include + + extern int max_lock_depth; /* for sysctl */ + +@@ -29,9 +29,10 @@ struct rt_mutex { + raw_spinlock_t wait_lock; + struct plist_head wait_list; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; +- const char *name, *file; ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++ const char *file; ++ const char *name; + int line; + void *magic; + #endif +@@ -56,19 +57,39 @@ struct hrtimer_sleeper; + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) ++ ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) ++ ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +@@ -90,6 +111,7 @@ extern void rt_mutex_destroy(struct rt_m + extern void rt_mutex_lock(struct rt_mutex *lock); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); +Index: linux-3.2/kernel/rtmutex.c +=================================================================== +--- linux-3.2.orig/kernel/rtmutex.c ++++ linux-3.2/kernel/rtmutex.c +@@ -8,6 +8,12 @@ + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen + * ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt ++ * + * See Documentation/rt-mutex-design.txt for details. + */ + #include +@@ -67,6 +73,11 @@ static void fixup_rt_mutex_waiters(struc + clear_rt_mutex_waiters(lock); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if the architecture + * supports cmpxchg and if there's no debugging state to be set up +@@ -90,6 +101,12 @@ static inline void mark_rt_mutex_waiters + } + #endif + ++static inline void init_lists(struct rt_mutex *lock) ++{ ++ if (unlikely(!lock->wait_list.node_list.prev)) ++ plist_head_init(&lock->wait_list); ++} ++ + /* + * Calculate task priority from the waiter list priority + * +@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -196,7 +221,7 @@ static int rt_mutex_adjust_prio_chain(st + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -247,13 +272,15 @@ static int rt_mutex_adjust_prio_chain(st + /* Release the task */ + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue above changed the top waiter, then we need + * to wake the new top waiter up to try to get the lock. + */ +- +- if (top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); + raw_spin_unlock(&lock->wait_lock); + goto out_put_task; + } +@@ -298,6 +325,25 @@ static int rt_mutex_adjust_prio_chain(st + return ret; + } + ++ ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++ ++/* ++ * Note that RT tasks are excluded from lateral-steals to prevent the ++ * introduction of an unbounded latency ++ */ ++static inline int lock_is_stealable(struct task_struct *task, ++ struct task_struct *pendowner, int mode) ++{ ++ if (mode == STEAL_NORMAL || rt_task(task)) { ++ if (task->prio >= pendowner->prio) ++ return 0; ++ } else if (task->prio > pendowner->prio) ++ return 0; ++ return 1; ++} ++ + /* + * Try to take an rt-mutex + * +@@ -307,8 +353,9 @@ static int rt_mutex_adjust_prio_chain(st + * @task: the task which wants to acquire the lock + * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int ++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { + /* + * We have to be careful here if the atomic speedups are +@@ -341,12 +388,14 @@ static int try_to_take_rt_mutex(struct r + * 3) it is top waiter + */ + if (rt_mutex_has_waiters(lock)) { +- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { +- if (!waiter || waiter != rt_mutex_top_waiter(lock)) +- return 0; +- } ++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; ++ ++ if (task != pown && !lock_is_stealable(task, pown, mode)) ++ return 0; + } + ++ /* We got the lock. */ ++ + if (waiter || rt_mutex_has_waiters(lock)) { + unsigned long flags; + struct rt_mutex_waiter *top; +@@ -371,7 +420,6 @@ static int try_to_take_rt_mutex(struct r + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + +- /* We got the lock. */ + debug_rt_mutex_lock(lock); + + rt_mutex_set_owner(lock, task); +@@ -381,6 +429,13 @@ static int try_to_take_rt_mutex(struct r + return 1; + } + ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ + /* + * Task blocks on lock. + * +@@ -399,6 +454,23 @@ static int task_blocks_on_rt_mutex(struc + int chain_walk = 0, res; + + raw_spin_lock_irqsave(&task->pi_lock, flags); ++ ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + __rt_mutex_adjust_prio(task); + waiter->task = task; + waiter->lock = lock; +@@ -423,7 +495,7 @@ static int task_blocks_on_rt_mutex(struc + plist_add(&waiter->pi_list_entry, &owner->pi_waiters); + + __rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); + } +@@ -478,7 +550,7 @@ static void wakeup_next_waiter(struct rt + + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); + +- wake_up_process(waiter->task); ++ rt_mutex_wake_waiter(waiter); + } + + /* +@@ -517,7 +589,7 @@ static void remove_waiter(struct rt_mute + } + __rt_mutex_adjust_prio(owner); + +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); +@@ -551,23 +623,316 @@ void rt_mutex_adjust_pi(struct task_stru + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || waiter->list_entry.prio == task->prio) { ++ if (!rt_mutex_real_waiter(waiter) || ++ waiter->list_entry.prio == task->prio) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } + +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); +- + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * preemptible spin_lock functions: ++ */ ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ might_sleep(); ++ ++ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) ++ rt_mutex_deadlock_account_lock(lock, current); ++ else ++ slowfn(lock); ++} ++ ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) ++ rt_mutex_deadlock_account_unlock(current); ++ else ++ slowfn(lock); ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; ++ ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif ++ ++# define pi_lock(lock) raw_spin_lock_irq(lock) ++# define pi_unlock(lock) raw_spin_unlock_irq(lock) ++ ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter waiter, *top_waiter; ++ int ret; ++ ++ rt_mutex_init_waiter(&waiter, true); ++ ++ raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); ++ ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ BUG_ON(rt_mutex_owner(lock) == self); ++ ++ /* ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). ++ */ ++ pi_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ ++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); ++ BUG_ON(ret); ++ ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) ++ break; ++ ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_print_deadlock(&waiter); ++ ++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) ++ schedule_rt_mutex(lock); ++ ++ raw_spin_lock(&lock->wait_lock); ++ ++ pi_lock(&self->pi_lock); ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ } ++ ++ /* ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ pi_lock(&self->pi_lock); ++ __set_current_state(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ pi_unlock(&self->pi_lock); ++ ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!plist_node_empty(&waiter.list_entry)); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++/* ++ * Slow path to release a rt_mutex spin_lock style ++ */ ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ raw_spin_lock(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ rt_mutex_deadlock_account_unlock(current); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ wakeup_next_waiter(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ /* Undo pi boosting.when necessary */ ++ rt_mutex_adjust_prio(current); ++} ++ ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); ++} ++EXPORT_SYMBOL(__rt_spin_lock); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif ++ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) ++{ ++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ ++ if (atomic_add_unless(atomic, -1, 1)) ++ return 0; ++ migrate_disable(); ++ rt_spin_lock(lock); ++ if (atomic_dec_and_test(atomic)) ++ return 1; ++ rt_spin_unlock(lock); ++ migrate_enable(); ++ return 0; ++} ++EXPORT_SYMBOL(atomic_dec_and_spin_lock); ++ ++void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take + * @state: the state the task should block in (TASK_INTERRUPTIBLE +- * or TASK_UNINTERRUPTIBLE) ++ * or TASK_UNINTERRUPTIBLE) + * @timeout: the pre-initialized and started timer, or NULL for none + * @waiter: the pre-initialized rt_mutex_waiter + * +@@ -631,9 +996,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, + struct rt_mutex_waiter waiter; + int ret = 0; + +- debug_rt_mutex_init_waiter(&waiter); ++ rt_mutex_init_waiter(&waiter, false); + + raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { +@@ -686,6 +1052,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo + int ret = 0; + + raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); + + if (likely(rt_mutex_owner(lock) != current)) { + +@@ -799,12 +1166,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); + /** + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ + int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, +@@ -818,17 +1185,38 @@ int __sched rt_mutex_lock_interruptible( + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + + /** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal ++ * -EDEADLK when the lock would deadlock (when deadlock detection is on) ++ */ ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock, ++ int detect_deadlock) ++{ ++ might_sleep(); ++ ++ return rt_mutex_fastlock(lock, TASK_KILLABLE, ++ detect_deadlock, rt_mutex_slowlock); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); ++ ++/** + * rt_mutex_timed_lock - lock a rt_mutex interruptible + * the timeout structure is provided + * by the caller + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @timeout: timeout structure or NULL (no timeout) + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -ETIMEDOUT when the timeout expired + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ +@@ -897,7 +1285,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); + void __rt_mutex_init(struct rt_mutex *lock, const char *name) + { + lock->owner = NULL; +- raw_spin_lock_init(&lock->wait_lock); + plist_head_init(&lock->wait_list); + + debug_rt_mutex_init(lock, name); +@@ -917,7 +1304,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); + void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { +- __rt_mutex_init(lock, NULL); ++ rt_mutex_init(lock); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); + rt_mutex_deadlock_account_lock(lock, proxy_owner); +Index: linux-3.2/kernel/futex.c +=================================================================== +--- linux-3.2.orig/kernel/futex.c ++++ linux-3.2/kernel/futex.c +@@ -1410,6 +1410,16 @@ retry_private: + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ free_pi_state(pi_state); ++ continue; + } else if (ret) { + /* -EDEADLK */ + this->pi_state = NULL; +@@ -2254,7 +2264,7 @@ static int futex_wait_requeue_pi(u32 __u + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; + struct rt_mutex *pi_mutex = NULL; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2276,8 +2286,7 @@ static int futex_wait_requeue_pi(u32 __u + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +@@ -2298,20 +2307,55 @@ static int futex_wait_requeue_pi(u32 __u + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -2320,9 +2364,10 @@ static int futex_wait_requeue_pi(u32 __u + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + } + } else { + /* +@@ -2335,7 +2380,8 @@ static int futex_wait_requeue_pi(u32 __u + ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); + debug_rt_mutex_free_waiter(&rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +Index: linux-3.2/kernel/rtmutex_common.h +=================================================================== +--- linux-3.2.orig/kernel/rtmutex_common.h ++++ linux-3.2/kernel/rtmutex_common.h +@@ -49,6 +49,7 @@ struct rt_mutex_waiter { + struct plist_node pi_list_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; +@@ -103,6 +104,8 @@ static inline struct task_struct *rt_mut + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +@@ -123,4 +126,12 @@ extern int rt_mutex_finish_proxy_lock(st + # include "rtmutex.h" + #endif + ++static inline void ++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ waiter->task = NULL; ++ waiter->savestate = savestate; ++} ++ + #endif +Index: linux-3.2/include/linux/rwlock_types.h +=================================================================== +--- linux-3.2.orig/include/linux/rwlock_types.h ++++ linux-3.2/include/linux/rwlock_types.h +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +@@ -43,6 +47,7 @@ typedef struct { + RW_DEP_MAP_INIT(lockname) } + #endif + +-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + + #endif /* __LINUX_RWLOCK_TYPES_H */ +Index: linux-3.2/include/linux/spinlock_types.h +=================================================================== +--- linux-3.2.orig/include/linux/spinlock_types.h ++++ linux-3.2/include/linux/spinlock_types.h +@@ -9,80 +9,15 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif +- +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif ++#include + +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include + #else +-# define SPIN_DEBUG_INIT(lockname) ++# include ++# include ++# include + #endif + +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +- +-#include +- + #endif /* __LINUX_SPINLOCK_TYPES_H */ +Index: linux-3.2/include/linux/spinlock_types_nort.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/spinlock_types_nort.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +Index: linux-3.2/include/linux/spinlock_types_raw.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/spinlock_types_raw.h +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif +Index: linux-3.2/include/linux/rwlock_types_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/rwlock_types_rt.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) ++ ++#endif +Index: linux-3.2/include/linux/spinlock_types_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/spinlock_types_rt.h +@@ -0,0 +1,49 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define __DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif +Index: linux-3.2/include/linux/mutex.h +=================================================================== +--- linux-3.2.orig/include/linux/mutex.h ++++ linux-3.2/include/linux/mutex.h +@@ -17,6 +17,17 @@ + + #include + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -95,13 +106,6 @@ do { \ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killa + */ + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); ++ ++#endif /* !PREEMPT_RT_FULL */ ++ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX +Index: linux-3.2/include/linux/mutex_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/mutex_rt.h +@@ -0,0 +1,84 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include ++ ++/* FIXME: Just for __lockfunc */ ++#include ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++#endif +Index: linux-3.2/include/linux/rwsem.h +=================================================================== +--- linux-3.2.orig/include/linux/rwsem.h ++++ linux-3.2/include/linux/rwsem.h +@@ -17,6 +17,10 @@ + #include + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -131,4 +135,6 @@ extern void down_write_nested(struct rw_ + # define down_write_nested(sem, subclass) down_write(sem) + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* _LINUX_RWSEM_H */ +Index: linux-3.2/include/linux/rwsem_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/rwsem_rt.h +@@ -0,0 +1,105 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++/* ++ * RW-semaphores are a spinlock plus a reader-depth count. ++ * ++ * Note that the semantics are different from the usual ++ * Linux rw-sems, in PREEMPT_RT mode we do not allow ++ * multiple readers to hold the lock at once, we only allow ++ * a read-lock owner to read-lock recursively. This is ++ * better for latency, makes the implementation inherently ++ * fair and makes it simpler as well. ++ */ ++ ++#include ++ ++struct rw_semaphore { ++ struct rt_mutex lock; ++ int read_depth; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, ++ struct lock_class_key *key); ++ ++# define rt_init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), #sem, &__key); \ ++} while (0) ++ ++extern void rt_down_write(struct rw_semaphore *rwsem); ++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_read(struct rw_semaphore *rwsem); ++extern int rt_down_write_trylock(struct rw_semaphore *rwsem); ++extern int rt_down_read_trylock(struct rw_semaphore *rwsem); ++extern void rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_write(struct rw_semaphore *rwsem); ++extern void rt_downgrade_write(struct rw_semaphore *rwsem); ++ ++#define init_rwsem(sem) rt_init_rwsem(sem) ++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) ++ ++static inline void down_read(struct rw_semaphore *sem) ++{ ++ rt_down_read(sem); ++} ++ ++static inline int down_read_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_read_trylock(sem); ++} ++ ++static inline void down_write(struct rw_semaphore *sem) ++{ ++ rt_down_write(sem); ++} ++ ++static inline int down_write_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_write_trylock(sem); ++} ++ ++static inline void up_read(struct rw_semaphore *sem) ++{ ++ rt_up_read(sem); ++} ++ ++static inline void up_write(struct rw_semaphore *sem) ++{ ++ rt_up_write(sem); ++} ++ ++static inline void downgrade_write(struct rw_semaphore *sem) ++{ ++ rt_downgrade_write(sem); ++} ++ ++static inline void down_read_nested(struct rw_semaphore *sem, int subclass) ++{ ++ return rt_down_read_nested(sem, subclass); ++} ++ ++static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ++{ ++ rt_down_write_nested(sem, subclass); ++} ++ ++#endif +Index: linux-3.2/lib/Makefile +=================================================================== +--- linux-3.2.orig/lib/Makefile ++++ linux-3.2/lib/Makefile +@@ -37,8 +37,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o + obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o + obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o ++endif + + CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) + obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o +Index: linux-3.2/include/linux/rwlock_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/rwlock_rt.h +@@ -0,0 +1,123 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) ++ ++#define read_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#endif +Index: linux-3.2/include/linux/spinlock.h +=================================================================== +--- linux-3.2.orig/include/linux/spinlock.h ++++ linux-3.2/include/linux/spinlock.h +@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(ra + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(ra + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -397,4 +405,6 @@ extern int _atomic_dec_and_lock(atomic_t + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_SPINLOCK_H */ +Index: linux-3.2/include/linux/spinlock_api_smp.h +=================================================================== +--- linux-3.2.orig/include/linux/spinlock_api_smp.h ++++ linux-3.2/include/linux/spinlock_api_smp.h +@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh( + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +Index: linux-3.2/include/linux/spinlock_rt.h +=================================================================== +--- /dev/null ++++ linux-3.2/include/linux/spinlock_rt.h +@@ -0,0 +1,166 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++ ++#define spin_lock_local(lock) rt_spin_lock(lock) ++#define spin_unlock_local(lock) rt_spin_unlock(lock) ++ ++#define spin_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ migrate_disable(); \ ++ __locked = spin_do_trylock(lock); \ ++ if (!__locked) \ ++ migrate_enable(); \ ++ __locked; \ ++}) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) ++ ++#endif +Index: linux-3.2/kernel/Makefile +=================================================================== +--- linux-3.2.orig/kernel/Makefile ++++ linux-3.2/kernel/Makefile +@@ -7,8 +7,8 @@ obj-y = sched.o fork.o exec_domain.o + sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ + signal.o sys.o kmod.o workqueue.o pid.o \ + rcupdate.o extable.o params.o posix-timers.o \ +- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ +- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ ++ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ ++ hrtimer.o nsproxy.o srcu.o semaphore.o \ + notifier.o ksysfs.o sched_clock.o cred.o \ + async.o range.o + obj-y += groups.o +@@ -29,7 +29,11 @@ obj-$(CONFIG_PROFILING) += profile.o + obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o + obj-$(CONFIG_STACKTRACE) += stacktrace.o + obj-y += time/ ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++obj-y += rwsem.o ++endif + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -41,6 +45,7 @@ endif + obj-$(CONFIG_RT_MUTEXES) += rtmutex.o + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o ++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o + obj-$(CONFIG_SMP) += smp.o + ifneq ($(CONFIG_SMP),y) +Index: linux-3.2/kernel/rt.c +=================================================================== +--- /dev/null ++++ linux-3.2/kernel/rt.c +@@ -0,0 +1,442 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/* ++ * rwlock_t functions ++ */ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret = rt_mutex_trylock(&rwlock->lock); ++ ++ migrate_disable(); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_write_trylock(rwlock); ++ if (!ret) ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock_irqsave); ++ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the lock, ++ * but not when read_depth == 0 which means that the lock is ++ * write locked. ++ */ ++ migrate_disable(); ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(lock); ++ else if (!rwlock->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwlock->read_depth++; ++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); ++ } else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ ++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); ++ ++ /* ++ * recursive read locks succeed when current owns the lock ++ */ ++ if (rt_mutex_owner(lock) != current) ++ __rt_spin_lock(lock); ++ rwlock->read_depth++; ++} ++ ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ ++ /* Release the lock only when read_depth is down to 0 */ ++ if (--rwlock->read_depth == 0) ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_write_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_write_lock_irqsave); ++ ++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_read_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_read_lock_irqsave); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); ++ lockdep_init_map(&rwlock->dep_map, name, key, 0); ++#endif ++ rwlock->lock.save_state = 1; ++ rwlock->read_depth = 0; ++} ++EXPORT_SYMBOL(__rt_rwlock_init); ++ ++/* ++ * rw_semaphores ++ */ ++ ++void rt_up_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_write); ++ ++void rt_up_read(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ if (--rwsem->read_depth == 0) ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_read); ++ ++/* ++ * downgrade a write lock into a read lock ++ * - just wake up any readers at the front of the queue ++ */ ++void rt_downgrade_write(struct rw_semaphore *rwsem) ++{ ++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); ++ rwsem->read_depth = 1; ++} ++EXPORT_SYMBOL(rt_downgrade_write); ++ ++int rt_down_write_trylock(struct rw_semaphore *rwsem) ++{ ++ int ret = rt_mutex_trylock(&rwsem->lock); ++ ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_write_trylock); ++ ++void rt_down_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write); ++ ++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write_nested); ++ ++int rt_down_read_trylock(struct rw_semaphore *rwsem) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the rwsem, ++ * but not when read_depth == 0 which means that the rwsem is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(&rwsem->lock); ++ else if (!rwsem->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwsem->read_depth++; ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_read_trylock); ++ ++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ ++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ ++ if (rt_mutex_owner(lock) != current) ++ rt_mutex_lock(&rwsem->lock); ++ rwsem->read_depth++; ++} ++ ++void rt_down_read(struct rw_semaphore *rwsem) ++{ ++ __rt_down_read(rwsem, 0); ++} ++EXPORT_SYMBOL(rt_down_read); ++ ++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ __rt_down_read(rwsem, subclass); ++} ++EXPORT_SYMBOL(rt_down_read_nested); ++ ++void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); ++ lockdep_init_map(&rwsem->dep_map, name, key, 0); ++#endif ++ rwsem->read_depth = 0; ++ rwsem->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__rt_rwsem_init); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +Index: linux-3.2/kernel/spinlock.c +=================================================================== +--- linux-3.2.orig/kernel/spinlock.c ++++ linux-3.2/kernel/spinlock.c +@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(loc + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_ + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwl + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +Index: linux-3.2/lib/spinlock_debug.c +=================================================================== +--- linux-3.2.orig/lib/spinlock_debug.c ++++ linux-3.2/lib/spinlock_debug.c +@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t * + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock) + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif +Index: linux-3.2/include/linux/rcutree.h +=================================================================== +--- linux-3.2.orig/include/linux/rcutree.h ++++ linux-3.2/include/linux/rcutree.h +@@ -57,7 +57,11 @@ static inline void exit_rcu(void) + + #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern void synchronize_rcu_bh(void); ++#else ++# define synchronize_rcu_bh synchronize_rcu ++#endif + extern void synchronize_sched_expedited(void); + extern void synchronize_rcu_expedited(void); + +@@ -67,19 +71,29 @@ static inline void synchronize_rcu_bh_ex + } + + extern void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + extern void rcu_barrier_bh(void); ++#endif + extern void rcu_barrier_sched(void); + + extern unsigned long rcutorture_testseq; + extern unsigned long rcutorture_vernum; + extern long rcu_batches_completed(void); +-extern long rcu_batches_completed_bh(void); + extern long rcu_batches_completed_sched(void); + + extern void rcu_force_quiescent_state(void); +-extern void rcu_bh_force_quiescent_state(void); + extern void rcu_sched_force_quiescent_state(void); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++extern void rcu_bh_force_quiescent_state(void); ++extern long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++#endif ++ + /* A context switch is a grace period for RCU-sched and RCU-bh. */ + static inline int rcu_blocking_is_gp(void) + { +Index: linux-3.2/kernel/rcupdate.c +=================================================================== +--- linux-3.2.orig/kernel/rcupdate.c ++++ linux-3.2/kernel/rcupdate.c +@@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void) + } + EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -96,6 +97,7 @@ int rcu_read_lock_bh_held(void) + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +Index: linux-3.2/kernel/rcutiny.c +=================================================================== +--- linux-3.2.orig/kernel/rcutiny.c ++++ linux-3.2/kernel/rcutiny.c +@@ -243,6 +243,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Post an RCU bottom-half callback to be invoked after any subsequent + * quiescent state. +@@ -252,3 +253,4 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_ctrlblk); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif +Index: linux-3.2/include/linux/lglock.h +=================================================================== +--- linux-3.2.orig/include/linux/lglock.h ++++ linux-3.2/include/linux/lglock.h +@@ -70,6 +70,9 @@ + extern void name##_global_lock_online(void); \ + extern void name##_global_unlock_online(void); \ + ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #define DEFINE_LGLOCK(name) \ + \ + DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ +@@ -169,4 +172,101 @@ + preempt_enable(); \ + } \ + EXPORT_SYMBOL(name##_global_unlock); ++ ++#else /* !PREEMPT_RT_FULL */ ++#define DEFINE_LGLOCK(name) \ ++ \ ++ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \ ++ DEFINE_LGLOCK_LOCKDEP(name); \ ++ \ ++ void name##_lock_init(void) { \ ++ int i; \ ++ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ ++ for_each_possible_cpu(i) { \ ++ struct rt_mutex *lock; \ ++ lock = &per_cpu(name##_lock, i); \ ++ rt_mutex_init(lock); \ ++ } \ ++ } \ ++ EXPORT_SYMBOL(name##_lock_init); \ ++ \ ++ void name##_local_lock(void) { \ ++ struct rt_mutex *lock; \ ++ migrate_disable(); \ ++ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ ++ lock = &__get_cpu_var(name##_lock); \ ++ __rt_spin_lock(lock); \ ++ } \ ++ EXPORT_SYMBOL(name##_local_lock); \ ++ \ ++ void name##_local_unlock(void) { \ ++ struct rt_mutex *lock; \ ++ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ ++ lock = &__get_cpu_var(name##_lock); \ ++ __rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ } \ ++ EXPORT_SYMBOL(name##_local_unlock); \ ++ \ ++ void name##_local_lock_cpu(int cpu) { \ ++ struct rt_mutex *lock; \ ++ rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ ++ lock = &per_cpu(name##_lock, cpu); \ ++ __rt_spin_lock(lock); \ ++ } \ ++ EXPORT_SYMBOL(name##_local_lock_cpu); \ ++ \ ++ void name##_local_unlock_cpu(int cpu) { \ ++ struct rt_mutex *lock; \ ++ rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ ++ lock = &per_cpu(name##_lock, cpu); \ ++ __rt_spin_unlock(lock); \ ++ } \ ++ EXPORT_SYMBOL(name##_local_unlock_cpu); \ ++ \ ++ void name##_global_lock_online(void) { \ ++ int i; \ ++ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ ++ for_each_online_cpu(i) { \ ++ struct rt_mutex *lock; \ ++ lock = &per_cpu(name##_lock, i); \ ++ __rt_spin_lock(lock); \ ++ } \ ++ } \ ++ EXPORT_SYMBOL(name##_global_lock_online); \ ++ \ ++ void name##_global_unlock_online(void) { \ ++ int i; \ ++ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ ++ for_each_online_cpu(i) { \ ++ struct rt_mutex *lock; \ ++ lock = &per_cpu(name##_lock, i); \ ++ __rt_spin_unlock(lock); \ ++ } \ ++ } \ ++ EXPORT_SYMBOL(name##_global_unlock_online); \ ++ \ ++ void name##_global_lock(void) { \ ++ int i; \ ++ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ ++ for_each_possible_cpu(i) { \ ++ struct rt_mutex *lock; \ ++ lock = &per_cpu(name##_lock, i); \ ++ __rt_spin_lock(lock); \ ++ } \ ++ } \ ++ EXPORT_SYMBOL(name##_global_lock); \ ++ \ ++ void name##_global_unlock(void) { \ ++ int i; \ ++ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ ++ for_each_possible_cpu(i) { \ ++ struct rt_mutex *lock; \ ++ lock = &per_cpu(name##_lock, i); \ ++ __rt_spin_unlock(lock); \ ++ } \ ++ } \ ++ EXPORT_SYMBOL(name##_global_unlock); ++#endif /* PRREMPT_RT_FULL */ ++ + #endif +Index: linux-3.2/drivers/tty/serial/8250.c +=================================================================== +--- linux-3.2.orig/drivers/tty/serial/8250.c ++++ linux-3.2/drivers/tty/serial/8250.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1631,12 +1632,14 @@ static irqreturn_t serial8250_interrupt( + + l = l->next; + ++#ifndef CONFIG_PREEMPT_RT_FULL + if (l == i->head && pass_counter++ > PASS_LIMIT) { + /* If we hit this, we're dead. */ + printk_ratelimited(KERN_ERR + "serial8250: too much work for irq%d\n", irq); + break; + } ++#endif + } while (l != end); + + spin_unlock(&i->lock); +@@ -2846,14 +2849,14 @@ serial8250_console_write(struct console + + touch_nmi_watchdog(); + +- local_irq_save(flags); +- if (up->port.sysrq) { +- /* serial8250_handle_port() already took the lock */ ++ if (unlikely(in_kdb_printk())) { + locked = 0; +- } else if (oops_in_progress) { +- locked = spin_trylock(&up->port.lock); +- } else +- spin_lock(&up->port.lock); ++ } else { ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); ++ else ++ spin_lock_irqsave(&up->port.lock, flags); ++ } + + /* + * First save the IER then disable the interrupts +@@ -2885,8 +2888,7 @@ serial8250_console_write(struct console + check_modem_status(up); + + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init serial8250_console_setup(struct console *co, char *options) +Index: linux-3.2/drivers/tty/tty_buffer.c +=================================================================== +--- linux-3.2.orig/drivers/tty/tty_buffer.c ++++ linux-3.2/drivers/tty/tty_buffer.c +@@ -493,10 +493,14 @@ void tty_flip_buffer_push(struct tty_str + tty->buf.tail->commit = tty->buf.tail->used; + spin_unlock_irqrestore(&tty->buf.lock, flags); + ++#ifndef CONFIG_PREEMPT_RT_FULL + if (tty->low_latency) + flush_to_ldisc(&tty->buf.work); + else + schedule_work(&tty->buf.work); ++#else ++ flush_to_ldisc(&tty->buf.work); ++#endif + } + EXPORT_SYMBOL(tty_flip_buffer_push); + +Index: linux-3.2/drivers/tty/serial/omap-serial.c +=================================================================== +--- linux-3.2.orig/drivers/tty/serial/omap-serial.c ++++ linux-3.2/drivers/tty/serial/omap-serial.c +@@ -946,13 +946,12 @@ serial_omap_console_write(struct console + unsigned int ier; + int locked = 1; + +- local_irq_save(flags); + if (up->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -979,8 +978,7 @@ serial_omap_console_write(struct console + check_modem_status(up); + + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init +Index: linux-3.2/fs/namespace.c +=================================================================== +--- linux-3.2.orig/fs/namespace.c ++++ linux-3.2/fs/namespace.c +@@ -341,8 +341,14 @@ int mnt_want_write(struct vfsmount *mnt) + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (mnt->mnt_flags & MNT_WRITE_HOLD) ++ /* ++ * No need to keep preemption disabled accross the spin loop. ++ */ ++ while (mnt->mnt_flags & MNT_WRITE_HOLD) { ++ preempt_enable(); + cpu_relax(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until +@@ -352,9 +358,7 @@ int mnt_want_write(struct vfsmount *mnt) + if (__mnt_is_readonly(mnt)) { + mnt_dec_writers(mnt); + ret = -EROFS; +- goto out; + } +-out: + preempt_enable(); + return ret; + } +Index: linux-3.2/fs/exec.c +=================================================================== +--- linux-3.2.orig/fs/exec.c ++++ linux-3.2/fs/exec.c +@@ -837,10 +837,12 @@ static int exec_mmap(struct mm_struct *m + } + } + task_lock(tsk); ++ local_irq_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); ++ local_irq_enable_rt(); + task_unlock(tsk); + arch_pick_mmap_layout(mm); + if (old_mm) { +Index: linux-3.2/mm/mmu_context.c +=================================================================== +--- linux-3.2.orig/mm/mmu_context.c ++++ linux-3.2/mm/mmu_context.c +@@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm) + struct task_struct *tsk = current; + + task_lock(tsk); ++ local_irq_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + atomic_inc(&mm->mm_count); +@@ -33,6 +34,7 @@ void use_mm(struct mm_struct *mm) + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ local_irq_enable_rt(); + task_unlock(tsk); + + if (active_mm != mm) +Index: linux-3.2/arch/x86/kernel/cpu/mcheck/mce.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/mce.c ++++ linux-3.2/arch/x86/kernel/cpu/mcheck/mce.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1106,17 +1107,14 @@ void mce_log_therm_throt_event(__u64 sta + * poller finds an MCE, poll 2x faster. When the poller finds no more + * errors, poll 2x slower (up to check_interval seconds). + */ +-static int check_interval = 5 * 60; /* 5 minutes */ ++static unsigned long check_interval = 5 * 60; /* 5 minutes */ + +-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); + +-static void mce_start_timer(unsigned long data) ++static enum hrtimer_restart mce_start_timer(struct hrtimer *timer) + { +- struct timer_list *t = &per_cpu(mce_timer, data); +- int *n; +- +- WARN_ON(smp_processor_id() != data); ++ unsigned long *n; + + if (mce_available(__this_cpu_ptr(&cpu_info))) { + machine_check_poll(MCP_TIMESTAMP, +@@ -1129,21 +1127,22 @@ static void mce_start_timer(unsigned lon + */ + n = &__get_cpu_var(mce_next_interval); + if (mce_notify_irq()) +- *n = max(*n/2, HZ/100); ++ *n = max(*n/2, HZ/100UL); + else +- *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); ++ *n = min(*n*2, round_jiffies_relative(check_interval*HZ)); + +- t->expires = jiffies + *n; +- add_timer_on(t, smp_processor_id()); ++ hrtimer_forward(timer, timer->base->get_time(), ++ ns_to_ktime(jiffies_to_usecs(*n) * 1000)); ++ return HRTIMER_RESTART; + } + +-/* Must not be called in IRQ context where del_timer_sync() can deadlock */ ++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ + static void mce_timer_delete_all(void) + { + int cpu; + + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } + + static void mce_do_trigger(struct work_struct *work) +@@ -1375,10 +1374,11 @@ static void __mcheck_cpu_init_vendor(str + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); +- int *n = &__get_cpu_var(mce_next_interval); ++ struct hrtimer *t = &__get_cpu_var(mce_timer); ++ unsigned long *n = &__get_cpu_var(mce_next_interval); + +- setup_timer(t, mce_start_timer, smp_processor_id()); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_start_timer; + + if (mce_ignore_ce) + return; +@@ -1386,8 +1386,9 @@ static void __mcheck_cpu_init_timer(void + *n = check_interval * HZ; + if (!*n) + return; +- t->expires = round_jiffies(jiffies + *n); +- add_timer_on(t, smp_processor_id()); ++ ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000), ++ 0 , HRTIMER_MODE_REL_PINNED); + } + + /* Handle unconfigured int18 (should never happen) */ +@@ -2004,6 +2005,8 @@ static void __cpuinit mce_disable_cpu(vo + if (!mce_available(__this_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(&__get_cpu_var(mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + for (i = 0; i < banks; i++) { +@@ -2030,6 +2033,7 @@ static void __cpuinit mce_reenable_cpu(v + if (b->init) + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2037,7 +2041,6 @@ static int __cpuinit + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action) { + case CPU_ONLINE: +@@ -2054,16 +2057,10 @@ mce_cpu_callback(struct notifier_block * + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: +- del_timer_sync(t); + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: +- if (!mce_ignore_ce && check_interval) { +- t->expires = round_jiffies(jiffies + +- __get_cpu_var(mce_next_interval)); +- add_timer_on(t, cpu); +- } + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); + break; + case CPU_POST_DEAD: +Index: linux-3.2/arch/x86/include/asm/stackprotector.h +=================================================================== +--- linux-3.2.orig/arch/x86/include/asm/stackprotector.h ++++ linux-3.2/arch/x86/include/asm/stackprotector.h +@@ -58,7 +58,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -69,8 +69,16 @@ static __always_inline void boot_init_st + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of ++ * it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = __native_read_tsc(); + canary += tsc + (tsc << 32UL); + +Index: linux-3.2/arch/x86/Kconfig +=================================================================== +--- linux-3.2.orig/arch/x86/Kconfig ++++ linux-3.2/arch/x86/Kconfig +@@ -161,10 +161,10 @@ config ARCH_MAY_HAVE_PC_FDC + def_bool ISA_DMA_API + + config RWSEM_GENERIC_SPINLOCK +- def_bool !X86_XADD ++ def_bool !X86_XADD || PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM +- def_bool X86_XADD ++ def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config ARCH_HAS_CPU_IDLE_WAIT + def_bool y +Index: linux-3.2/arch/x86/include/asm/page_64_types.h +=================================================================== +--- linux-3.2.orig/arch/x86/include/asm/page_64_types.h ++++ linux-3.2/arch/x86/include/asm/page_64_types.h +@@ -14,12 +14,21 @@ + #define IRQ_STACK_ORDER 2 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +-#define STACKFAULT_STACK 1 +-#define DOUBLEFAULT_STACK 2 +-#define NMI_STACK 3 +-#define DEBUG_STACK 4 +-#define MCE_STACK 5 +-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define STACKFAULT_STACK 0 ++# define DOUBLEFAULT_STACK 1 ++# define NMI_STACK 2 ++# define DEBUG_STACK 0 ++# define MCE_STACK 3 ++# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ ++#else ++# define STACKFAULT_STACK 1 ++# define DOUBLEFAULT_STACK 2 ++# define NMI_STACK 3 ++# define DEBUG_STACK 4 ++# define MCE_STACK 5 ++# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#endif + + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +Index: linux-3.2/arch/x86/kernel/cpu/common.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/cpu/common.c ++++ linux-3.2/arch/x86/kernel/cpu/common.c +@@ -1055,7 +1055,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) + */ + static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, ++#if DEBUG_STACK > 0 + [DEBUG_STACK - 1] = DEBUG_STKSZ ++#endif + }; + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks +Index: linux-3.2/arch/x86/kernel/dumpstack_64.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/dumpstack_64.c ++++ linux-3.2/arch/x86/kernel/dumpstack_64.c +@@ -21,10 +21,14 @@ + (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) + + static char x86_stack_ids[][8] = { ++#if DEBUG_STACK > 0 + [ DEBUG_STACK-1 ] = "#DB", ++#endif + [ NMI_STACK-1 ] = "NMI", + [ DOUBLEFAULT_STACK-1 ] = "#DF", ++#if STACKFAULT_STACK > 0 + [ STACKFAULT_STACK-1 ] = "#SS", ++#endif + [ MCE_STACK-1 ] = "#MC", + #if DEBUG_STKSZ > EXCEPTION_STKSZ + [ N_EXCEPTION_STACKS ... +Index: linux-3.2/fs/eventpoll.c +=================================================================== +--- linux-3.2.orig/fs/eventpoll.c ++++ linux-3.2/fs/eventpoll.c +@@ -438,12 +438,12 @@ static int ep_poll_wakeup_proc(void *pri + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + /* +Index: linux-3.2/mm/vmalloc.c +=================================================================== +--- linux-3.2.orig/mm/vmalloc.c ++++ linux-3.2/mm/vmalloc.c +@@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + + node = numa_node_id(); + +@@ -821,12 +821,13 @@ static struct vmap_block *new_vmap_block + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + vb->vbq = vbq; + spin_lock(&vbq->lock); + list_add_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vb; + } +@@ -900,7 +901,7 @@ static void *vb_alloc(unsigned long size + struct vmap_block *vb; + unsigned long addr = 0; + unsigned int order; +- int purge = 0; ++ int purge = 0, cpu; + + BUG_ON(size & ~PAGE_MASK); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -908,7 +909,8 @@ static void *vb_alloc(unsigned long size + + again: + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + int i; + +@@ -945,7 +947,7 @@ next: + if (purge) + purge_fragmented_blocks_thiscpu(); + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + if (!addr) { +Index: linux-3.2/include/linux/workqueue.h +=================================================================== +--- linux-3.2.orig/include/linux/workqueue.h ++++ linux-3.2/include/linux/workqueue.h +@@ -254,9 +254,10 @@ enum { + WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ + WQ_HIGHPRI = 1 << 4, /* high priority */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ ++ WQ_NON_AFFINE = 1 << 6, /* free to move works around cpus */ + +- WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ +- WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ ++ WQ_DRAINING = 1 << 7, /* internal: workqueue is draining */ ++ WQ_RESCUER = 1 << 8, /* internal: workqueue has rescuer */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ +Index: linux-3.2/lib/debugobjects.c +=================================================================== +--- linux-3.2.orig/lib/debugobjects.c ++++ linux-3.2/lib/debugobjects.c +@@ -306,7 +306,10 @@ __debug_object_init(void *addr, struct d + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + +@@ -1015,9 +1018,9 @@ static int __init debug_objects_replace_ + } + } + ++ local_irq_enable(); + printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, + obj_pool_used); +- local_irq_enable(); + return 0; + free: + hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { +Index: linux-3.2/include/linux/jump_label.h +=================================================================== +--- linux-3.2.orig/include/linux/jump_label.h ++++ linux-3.2/include/linux/jump_label.h +@@ -4,7 +4,7 @@ + #include + #include + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && !defined(CONFIG_PREEMPT_BASE) + + struct jump_label_key { + atomic_t enabled; +Index: linux-3.2/include/linux/netdevice.h +=================================================================== +--- linux-3.2.orig/include/linux/netdevice.h ++++ linux-3.2/include/linux/netdevice.h +@@ -1760,6 +1760,7 @@ struct softnet_data { + unsigned dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + }; + + static inline void input_queue_head_incr(struct softnet_data *sd) +Index: linux-3.2/include/linux/skbuff.h +=================================================================== +--- linux-3.2.orig/include/linux/skbuff.h ++++ linux-3.2/include/linux/skbuff.h +@@ -124,6 +124,7 @@ struct sk_buff_head { + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -925,6 +926,12 @@ static inline void skb_queue_head_init(s + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +Index: linux-3.2/arch/x86/kernel/irq_work.c +=================================================================== +--- linux-3.2.orig/arch/x86/kernel/irq_work.c ++++ linux-3.2/arch/x86/kernel/irq_work.c +@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_re + irq_exit(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void arch_irq_work_raise(void) + { + #ifdef CONFIG_X86_LOCAL_APIC +@@ -28,3 +29,4 @@ void arch_irq_work_raise(void) + apic_wait_icr_idle(); + #endif + } ++#endif +Index: linux-3.2/kernel/irq_work.c +=================================================================== +--- linux-3.2.orig/kernel/irq_work.c ++++ linux-3.2/kernel/irq_work.c +@@ -105,8 +105,10 @@ void irq_work_run(void) + if (llist_empty(this_list)) + return; + ++#ifndef CONFIG_PREEMPT_RT_FULL + BUG_ON(!in_irq()); + BUG_ON(!irqs_disabled()); ++#endif + + llnode = llist_del_all(this_list); + while (llnode != NULL) { +Index: linux-3.2/arch/mips/Kconfig +=================================================================== +--- linux-3.2.orig/arch/mips/Kconfig ++++ linux-3.2/arch/mips/Kconfig +@@ -2040,7 +2040,7 @@ config CPU_R4400_WORKAROUNDS + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool +Index: linux-3.2/Documentation/sysrq.txt +=================================================================== +--- linux-3.2.orig/Documentation/sysrq.txt ++++ linux-3.2/Documentation/sysrq.txt +@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen ( + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. + +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger + ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. ++ ++ + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +Index: linux-3.2/include/net/netns/ipv4.h +=================================================================== +--- linux-3.2.orig/include/net/netns/ipv4.h ++++ linux-3.2/include/net/netns/ipv4.h +@@ -47,6 +47,7 @@ struct netns_ipv4 { + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +Index: linux-3.2/net/ipv4/icmp.c +=================================================================== +--- linux-3.2.orig/net/ipv4/icmp.c ++++ linux-3.2/net/ipv4/icmp.c +@@ -67,6 +67,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -801,6 +802,30 @@ out_err: + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 ++ ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++{ ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; ++ ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++} ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -827,6 +852,11 @@ static void icmp_echo(struct sk_buff *sk + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + } + +Index: linux-3.2/net/ipv4/sysctl_net_ipv4.c +=================================================================== +--- linux-3.2.orig/net/ipv4/sysctl_net_ipv4.c ++++ linux-3.2/net/ipv4/sysctl_net_ipv4.c +@@ -680,6 +680,13 @@ static struct ctl_table ipv4_net_table[] + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), +Index: linux-3.2/include/linux/kdb.h +=================================================================== +--- linux-3.2.orig/include/linux/kdb.h ++++ linux-3.2/include/linux/kdb.h +@@ -150,12 +150,14 @@ extern int kdb_register(char *, kdb_func + extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, + short, kdb_repeat_t); + extern int kdb_unregister(char *); ++#define in_kdb_printk() (kdb_trap_printk) + #else /* ! CONFIG_KGDB_KDB */ + #define kdb_printf(...) + #define kdb_init(x) + #define kdb_register(...) + #define kdb_register_repeat(...) + #define kdb_uregister(x) ++#define in_kdb_printk() (0) + #endif /* CONFIG_KGDB_KDB */ + enum { + KDB_NOT_INITIALIZED, +Index: linux-3.2/kernel/debug/kdb/kdb_io.c +=================================================================== +--- linux-3.2.orig/kernel/debug/kdb/kdb_io.c ++++ linux-3.2/kernel/debug/kdb/kdb_io.c +@@ -553,7 +553,6 @@ int vkdb_printf(const char *fmt, va_list + int diag; + int linecount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -564,8 +563,6 @@ int vkdb_printf(const char *fmt, va_list + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -821,7 +818,6 @@ kdb_print_out: + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -831,9 +827,11 @@ int kdb_printf(const char *fmt, ...) + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } +Index: linux-3.2/kernel/ksysfs.c +=================================================================== +--- linux-3.2.orig/kernel/ksysfs.c ++++ linux-3.2/kernel/ksysfs.c +@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo); + + #endif /* CONFIG_KEXEC */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[] + &kexec_crash_size_attr.attr, + &vmcoreinfo_attr.attr, + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + +Index: linux-3.2/ipc/sem.c +=================================================================== +--- linux-3.2.orig/ipc/sem.c ++++ linux-3.2/ipc/sem.c +@@ -461,6 +461,13 @@ undo: + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -472,6 +479,7 @@ static void wake_up_sem_queue_prepare(st + q->pid = error; + + list_add_tail(&q->simple_list, pt); ++#endif + } + + /** +@@ -485,6 +493,7 @@ static void wake_up_sem_queue_prepare(st + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +Index: linux-3.2/drivers/tty/serial/cpm_uart/cpm_uart_core.c +=================================================================== +--- linux-3.2.orig/drivers/tty/serial/cpm_uart/cpm_uart_core.c ++++ linux-3.2/drivers/tty/serial/cpm_uart/cpm_uart_core.c +@@ -1226,7 +1226,7 @@ static void cpm_uart_console_write(struc + { + struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; + unsigned long flags; +- int nolock = oops_in_progress; ++ int nolock = oops_in_progress || sysrq_in_progress; + + if (unlikely(nolock)) { + local_irq_save(flags); +Index: linux-3.2/drivers/tty/sysrq.c +=================================================================== +--- linux-3.2.orig/drivers/tty/sysrq.c ++++ linux-3.2/drivers/tty/sysrq.c +@@ -492,6 +492,23 @@ static void __sysrq_put_key_op(int key, + sysrq_key_table[i] = op_p; + } + ++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK ++ ++int sysrq_in_progress; ++ ++static void set_sysrq_in_progress(int value) ++{ ++ sysrq_in_progress = value; ++} ++ ++#else ++ ++static void set_sysrq_in_progress(int value) ++{ ++} ++ ++#endif ++ + void __handle_sysrq(int key, bool check_mask) + { + struct sysrq_key_op *op_p; +@@ -500,6 +517,9 @@ void __handle_sysrq(int key, bool check_ + unsigned long flags; + + spin_lock_irqsave(&sysrq_key_table_lock, flags); ++ ++ set_sysrq_in_progress(1); ++ + /* + * Raise the apparent loglevel to maximum so that the sysrq header + * is shown to provide the user with positive feedback. We do not +@@ -541,6 +561,9 @@ void __handle_sysrq(int key, bool check_ + printk("\n"); + console_loglevel = orig_log_level; + } ++ ++ set_sysrq_in_progress(0); ++ + spin_unlock_irqrestore(&sysrq_key_table_lock, flags); + } + +Index: linux-3.2/include/linux/sysrq.h +=================================================================== +--- linux-3.2.orig/include/linux/sysrq.h ++++ linux-3.2/include/linux/sysrq.h +@@ -38,6 +38,11 @@ struct sysrq_key_op { + int enable_mask; + }; + ++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK ++extern int sysrq_in_progress; ++#else ++#define sysrq_in_progress 0 ++#endif + #ifdef CONFIG_MAGIC_SYSRQ + + /* Generic SysRq interface -- you may call it from any device driver, supplying +Index: linux-3.2/arch/x86/kvm/x86.c +=================================================================== +--- linux-3.2.orig/arch/x86/kvm/x86.c ++++ linux-3.2/arch/x86/kvm/x86.c +@@ -5185,6 +5185,13 @@ int kvm_arch_init(void *opaque) + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out; +Index: linux-3.2/drivers/scsi/fcoe/fcoe.c +=================================================================== +--- linux-3.2.orig/drivers/scsi/fcoe/fcoe.c ++++ linux-3.2/drivers/scsi/fcoe/fcoe.c +@@ -1147,7 +1147,7 @@ static void fcoe_percpu_thread_destroy(u + struct sk_buff *skb; + #ifdef CONFIG_SMP + struct fcoe_percpu_s *p0; +- unsigned targ_cpu = get_cpu(); ++ unsigned targ_cpu = get_cpu_light(); + #endif /* CONFIG_SMP */ + + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); +@@ -1203,7 +1203,7 @@ static void fcoe_percpu_thread_destroy(u + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + } +- put_cpu(); ++ put_cpu_light(); + #else + /* + * This a non-SMP scenario where the singular Rx thread is +@@ -1426,11 +1426,11 @@ err2: + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1624,6 +1624,7 @@ static inline int fcoe_filter_frames(str + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); ++ put_cpu(); + return -EINVAL; + } + +@@ -1668,7 +1669,7 @@ static void fcoe_recv_frame(struct sk_bu + */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + +- stats = per_cpu_ptr(lport->dev_stats, get_cpu()); ++ stats = per_cpu_ptr(lport->dev_stats, get_cpu_light()); + if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { + if (stats->ErrorFrames < 5) + printk(KERN_WARNING "fcoe: FCoE version " +@@ -1700,13 +1701,13 @@ static void fcoe_recv_frame(struct sk_bu + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +Index: linux-3.2/drivers/scsi/fcoe/fcoe_ctlr.c +=================================================================== +--- linux-3.2.orig/drivers/scsi/fcoe/fcoe_ctlr.c ++++ linux-3.2/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -716,7 +716,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + unsigned long sel_time = 0; + struct fcoe_dev_stats *stats; + +- stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -749,7 +749,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + if (sel_time && !fip->sel_fcf && !fip->sel_time) { + sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); + fip->sel_time = sel_time; +Index: linux-3.2/drivers/scsi/libfc/fc_exch.c +=================================================================== +--- linux-3.2.orig/drivers/scsi/libfc/fc_exch.c ++++ linux-3.2/drivers/scsi/libfc/fc_exch.c +@@ -724,10 +724,10 @@ static struct fc_exch *fc_exch_em_alloc( + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { +Index: linux-3.2/arch/x86/crypto/aesni-intel_glue.c +=================================================================== +--- linux-3.2.orig/arch/x86/crypto/aesni-intel_glue.c ++++ linux-3.2/arch/x86/crypto/aesni-intel_glue.c +@@ -289,14 +289,14 @@ static int ecb_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -313,14 +313,14 @@ static int ecb_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -359,14 +359,14 @@ static int cbc_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -383,14 +383,14 @@ static int cbc_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -445,18 +445,20 @@ static int ctr_crypt(struct blkcipher_de + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } +Index: linux-3.2/drivers/md/dm.c +=================================================================== +--- linux-3.2.orig/drivers/md/dm.c ++++ linux-3.2/drivers/md/dm.c +@@ -1648,14 +1648,14 @@ static void dm_request_fn(struct request + if (map_request(ti, clone, md)) + goto requeued; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NORT(!irqs_disabled()); + spin_lock(q->queue_lock); + } + + goto out; + + requeued: +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NORT(!irqs_disabled()); + spin_lock(q->queue_lock); + + delay_and_out: +Index: linux-3.2/arch/Kconfig +=================================================================== +--- linux-3.2.orig/arch/Kconfig ++++ linux-3.2/arch/Kconfig +@@ -6,6 +6,7 @@ config OPROFILE + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +Index: linux-3.2/drivers/net/Kconfig +=================================================================== +--- linux-3.2.orig/drivers/net/Kconfig ++++ linux-3.2/drivers/net/Kconfig +@@ -154,6 +154,7 @@ config MACVTAP + + config NETCONSOLE + tristate "Network console logging support" ++ depends on !PREEMPT_RT_FULL + ---help--- + If you want to log kernel messages over the network, enable this. + See for details. +Index: linux-3.2/mm/Kconfig +=================================================================== +--- linux-3.2.orig/mm/Kconfig ++++ linux-3.2/mm/Kconfig +@@ -307,7 +307,7 @@ config NOMMU_INITIAL_TRIM_EXCESS + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on X86 && MMU ++ depends on X86 && MMU && !PREEMPT_RT_FULL + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and +Index: linux-3.2/init/Makefile +=================================================================== +--- linux-3.2.orig/init/Makefile ++++ linux-3.2/init/Makefile +@@ -29,4 +29,4 @@ silent_chk_compile.h = : + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +Index: linux-3.2/scripts/mkcompile_h +=================================================================== +--- linux-3.2.orig/scripts/mkcompile_h ++++ linux-3.2/scripts/mkcompile_h +@@ -4,7 +4,8 @@ TARGET=$1 + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length diff --git a/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch b/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch new file mode 100644 index 000000000..4696575a3 --- /dev/null +++ b/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch @@ -0,0 +1,10310 @@ +diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl +index c279158..196b8b9 100644 +--- a/Documentation/DocBook/drm.tmpl ++++ b/Documentation/DocBook/drm.tmpl +@@ -32,7 +32,7 @@ + The Linux DRM layer contains code intended to support the needs + of complex graphics devices, usually containing programmable + pipelines well suited to 3D graphics acceleration. Graphics +- drivers in the kernel can make use of DRM functions to make ++ drivers in the kernel may make use of DRM functions to make + tasks like memory management, interrupt handling and DMA easier, + and provide a uniform interface to applications. + +@@ -57,10 +57,10 @@ + existing drivers. + + +- First, we'll go over some typical driver initialization ++ First, we go over some typical driver initialization + requirements, like setting up command buffers, creating an + initial output configuration, and initializing core services. +- Subsequent sections will cover core internals in more detail, ++ Subsequent sections cover core internals in more detail, + providing implementation notes and examples. + + +@@ -74,7 +74,7 @@ + + + The core of every DRM driver is struct drm_driver. Drivers +- will typically statically initialize a drm_driver structure, ++ typically statically initialize a drm_driver structure, + then pass it to drm_init() at load time. + + +@@ -88,8 +88,8 @@ + + + static struct drm_driver driver = { +- /* don't use mtrr's here, the Xserver or user space app should +- * deal with them for intel hardware. ++ /* Don't use MTRRs here; the Xserver or userspace app should ++ * deal with them for Intel hardware. + */ + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | +@@ -154,8 +154,8 @@ + + + In the example above, taken from the i915 DRM driver, the driver +- sets several flags indicating what core features it supports. +- We'll go over the individual callbacks in later sections. Since ++ sets several flags indicating what core features it supports; ++ we go over the individual callbacks in later sections. Since + flags indicate which features your driver supports to the DRM + core, you need to set most of them prior to calling drm_init(). Some, + like DRIVER_MODESET can be set later based on user supplied parameters, +@@ -203,8 +203,8 @@ + DRIVER_HAVE_IRQDRIVER_IRQ_SHARED + + +- DRIVER_HAVE_IRQ indicates whether the driver has a IRQ +- handler, DRIVER_IRQ_SHARED indicates whether the device & ++ DRIVER_HAVE_IRQ indicates whether the driver has an IRQ ++ handler. DRIVER_IRQ_SHARED indicates whether the device & + handler support shared IRQs (note that this is required of + PCI drivers). + +@@ -214,8 +214,8 @@ + DRIVER_DMA_QUEUE + + +- If the driver queues DMA requests and completes them +- asynchronously, this flag should be set. Deprecated. ++ Should be set if the driver queues DMA requests and completes them ++ asynchronously. Deprecated. + + + +@@ -238,7 +238,7 @@ + + + In this specific case, the driver requires AGP and supports +- IRQs. DMA, as we'll see, is handled by device specific ioctls ++ IRQs. DMA, as discussed later, is handled by device-specific ioctls + in this case. It also supports the kernel mode setting APIs, though + unlike in the actual i915 driver source, this example unconditionally + exports KMS capability. +@@ -269,36 +269,34 @@ + initial output configuration. + + +- Note that the tasks performed at driver load time must not +- conflict with DRM client requirements. For instance, if user ++ If compatibility is a concern (e.g. with drivers converted over ++ to the new interfaces from the old ones), care must be taken to ++ prevent device initialization and control that is incompatible with ++ currently active userspace drivers. For instance, if user + level mode setting drivers are in use, it would be problematic + to perform output discovery & configuration at load time. +- Likewise, if pre-memory management aware user level drivers are ++ Likewise, if user-level drivers unaware of memory management are + in use, memory management and command buffer setup may need to +- be omitted. These requirements are driver specific, and care ++ be omitted. These requirements are driver-specific, and care + needs to be taken to keep both old and new applications and + libraries working. The i915 driver supports the "modeset" + module parameter to control whether advanced features are +- enabled at load time or in legacy fashion. If compatibility is +- a concern (e.g. with drivers converted over to the new interfaces +- from the old ones), care must be taken to prevent incompatible +- device initialization and control with the currently active +- userspace drivers. ++ enabled at load time or in legacy fashion. + + + + Driver private & performance counters + + The driver private hangs off the main drm_device structure and +- can be used for tracking various device specific bits of ++ can be used for tracking various device-specific bits of + information, like register offsets, command buffer status, + register state for suspend/resume, etc. At load time, a +- driver can simply allocate one and set drm_device.dev_priv +- appropriately; at unload the driver can free it and set +- drm_device.dev_priv to NULL. ++ driver may simply allocate one and set drm_device.dev_priv ++ appropriately; it should be freed and drm_device.dev_priv set ++ to NULL when the driver is unloaded. + + +- The DRM supports several counters which can be used for rough ++ The DRM supports several counters which may be used for rough + performance characterization. Note that the DRM stat counter + system is not often used by applications, and supporting + additional counters is completely optional. +@@ -307,15 +305,15 @@ + These interfaces are deprecated and should not be used. If performance + monitoring is desired, the developer should investigate and + potentially enhance the kernel perf and tracing infrastructure to export +- GPU related performance information to performance monitoring +- tools and applications. ++ GPU related performance information for consumption by performance ++ monitoring tools and applications. + + + + + Configuring the device + +- Obviously, device configuration will be device specific. ++ Obviously, device configuration is device-specific. + However, there are several common operations: finding a + device's PCI resources, mapping them, and potentially setting + up an IRQ handler. +@@ -323,10 +321,10 @@ + + Finding & mapping resources is fairly straightforward. The + DRM wrapper functions, drm_get_resource_start() and +- drm_get_resource_len() can be used to find BARs on the given ++ drm_get_resource_len(), may be used to find BARs on the given + drm_device struct. Once those values have been retrieved, the + driver load function can call drm_addmap() to create a new +- mapping for the BAR in question. Note you'll probably want a ++ mapping for the BAR in question. Note that you probably want a + drm_local_map_t in your driver private structure to track any + mappings you create. + +@@ -335,20 +333,20 @@ + + if compatibility with other operating systems isn't a concern + (DRM drivers can run under various BSD variants and OpenSolaris), +- native Linux calls can be used for the above, e.g. pci_resource_* ++ native Linux calls may be used for the above, e.g. pci_resource_* + and iomap*/iounmap. See the Linux device driver book for more + info. + + +- Once you have a register map, you can use the DRM_READn() and ++ Once you have a register map, you may use the DRM_READn() and + DRM_WRITEn() macros to access the registers on your device, or +- use driver specific versions to offset into your MMIO space +- relative to a driver specific base pointer (see I915_READ for +- example). ++ use driver-specific versions to offset into your MMIO space ++ relative to a driver-specific base pointer (see I915_READ for ++ an example). + + + If your device supports interrupt generation, you may want to +- setup an interrupt handler at driver load time as well. This ++ set up an interrupt handler when the driver is loaded. This + is done using the drm_irq_install() function. If your device + supports vertical blank interrupts, it should call + drm_vblank_init() to initialize the core vblank handling code before +@@ -357,7 +355,7 @@ + + + +- Once your interrupt handler is registered (it'll use your ++ Once your interrupt handler is registered (it uses your + drm_driver.irq_handler as the actual interrupt handling + function), you can safely enable interrupts on your device, + assuming any other state your interrupt handler uses is also +@@ -371,10 +369,10 @@ + using the pci_map_rom() call, a convenience function that + takes care of mapping the actual ROM, whether it has been + shadowed into memory (typically at address 0xc0000) or exists +- on the PCI device in the ROM BAR. Note that once you've +- mapped the ROM and extracted any necessary information, be +- sure to unmap it; on many devices the ROM address decoder is +- shared with other BARs, so leaving it mapped can cause ++ on the PCI device in the ROM BAR. Note that after the ROM ++ has been mapped and any necessary information has been extracted, ++ it should be unmapped; on many devices, the ROM address decoder is ++ shared with other BARs, so leaving it mapped could cause + undesired behavior like hangs or memory corruption. + + +@@ -389,9 +387,9 @@ + should support a memory manager. + + +- If your driver supports memory management (it should!), you'll ++ If your driver supports memory management (it should!), you + need to set that up at load time as well. How you initialize +- it depends on which memory manager you're using, TTM or GEM. ++ it depends on which memory manager you're using: TTM or GEM. + + + TTM initialization +@@ -401,7 +399,7 @@ + and devices with dedicated video RAM (VRAM), i.e. most discrete + graphics devices. If your device has dedicated RAM, supporting + TTM is desirable. TTM also integrates tightly with your +- driver specific buffer execution function. See the radeon ++ driver-specific buffer execution function. See the radeon + driver for examples. + + +@@ -429,21 +427,21 @@ + created by the memory manager at runtime. Your global TTM should + have a type of TTM_GLOBAL_TTM_MEM. The size field for the global + object should be sizeof(struct ttm_mem_global), and the init and +- release hooks should point at your driver specific init and +- release routines, which will probably eventually call +- ttm_mem_global_init and ttm_mem_global_release respectively. ++ release hooks should point at your driver-specific init and ++ release routines, which probably eventually call ++ ttm_mem_global_init and ttm_mem_global_release, respectively. + + + Once your global TTM accounting structure is set up and initialized +- (done by calling ttm_global_item_ref on the global object you +- just created), you'll need to create a buffer object TTM to ++ by calling ttm_global_item_ref() on it, ++ you need to create a buffer object TTM to + provide a pool for buffer object allocation by clients and the + kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, + and its size should be sizeof(struct ttm_bo_global). Again, +- driver specific init and release functions can be provided, +- likely eventually calling ttm_bo_global_init and +- ttm_bo_global_release, respectively. Also like the previous +- object, ttm_global_item_ref is used to create an initial reference ++ driver-specific init and release functions may be provided, ++ likely eventually calling ttm_bo_global_init() and ++ ttm_bo_global_release(), respectively. Also, like the previous ++ object, ttm_global_item_ref() is used to create an initial reference + count for the TTM, which will call your initialization function. + + +@@ -453,27 +451,26 @@ + GEM is an alternative to TTM, designed specifically for UMA + devices. It has simpler initialization and execution requirements + than TTM, but has no VRAM management capability. Core GEM +- initialization is comprised of a basic drm_mm_init call to create ++ is initialized by calling drm_mm_init() to create + a GTT DRM MM object, which provides an address space pool for +- object allocation. In a KMS configuration, the driver will +- need to allocate and initialize a command ring buffer following +- basic GEM initialization. Most UMA devices have a so-called ++ object allocation. In a KMS configuration, the driver ++ needs to allocate and initialize a command ring buffer following ++ core GEM initialization. A UMA device usually has what is called a + "stolen" memory region, which provides space for the initial + framebuffer and large, contiguous memory regions required by the +- device. This space is not typically managed by GEM, and must ++ device. This space is not typically managed by GEM, and it must + be initialized separately into its own DRM MM object. + + +- Initialization will be driver specific, and will depend on +- the architecture of the device. In the case of Intel ++ Initialization is driver-specific. In the case of Intel + integrated graphics chips like 965GM, GEM initialization can + be done by calling the internal GEM init function, + i915_gem_do_init(). Since the 965GM is a UMA device +- (i.e. it doesn't have dedicated VRAM), GEM will manage ++ (i.e. it doesn't have dedicated VRAM), GEM manages + making regular RAM available for GPU operations. Memory set + aside by the BIOS (called "stolen" memory by the i915 +- driver) will be managed by the DRM memrange allocator; the +- rest of the aperture will be managed by GEM. ++ driver) is managed by the DRM memrange allocator; the ++ rest of the aperture is managed by GEM. + + /* Basic memrange allocator for stolen space (aka vram) */ + drm_memrange_init(&dev_priv->vram, 0, prealloc_size); +@@ -483,7 +480,7 @@ + + + +- Once the memory manager has been set up, we can allocate the ++ Once the memory manager has been set up, we may allocate the + command buffer. In the i915 case, this is also done with a + GEM function, i915_gem_init_ringbuffer(). + +@@ -493,16 +490,25 @@ + + Output configuration + +- The final initialization task is output configuration. This involves +- finding and initializing the CRTCs, encoders and connectors +- for your device, creating an initial configuration and +- registering a framebuffer console driver. ++ The final initialization task is output configuration. This involves: ++ ++ ++ Finding and initializing the CRTCs, encoders, and connectors ++ for the device. ++ ++ ++ Creating an initial configuration. ++ ++ ++ Registering a framebuffer console driver. ++ ++ + + + Output discovery and initialization + +- Several core functions exist to create CRTCs, encoders and +- connectors, namely drm_crtc_init(), drm_connector_init() and ++ Several core functions exist to create CRTCs, encoders, and ++ connectors, namely: drm_crtc_init(), drm_connector_init(), and + drm_encoder_init(), along with several "helper" functions to + perform common tasks. + +@@ -555,10 +561,10 @@ void intel_crt_init(struct drm_device *dev) + + + In the example above (again, taken from the i915 driver), a +- CRT connector and encoder combination is created. A device +- specific i2c bus is also created, for fetching EDID data and ++ CRT connector and encoder combination is created. A device-specific ++ i2c bus is also created for fetching EDID data and + performing monitor detection. Once the process is complete, +- the new connector is registered with sysfs, to make its ++ the new connector is registered with sysfs to make its + properties available to applications. + + +@@ -567,12 +573,12 @@ void intel_crt_init(struct drm_device *dev) + Since many PC-class graphics devices have similar display output + designs, the DRM provides a set of helper functions to make + output management easier. The core helper routines handle +- encoder re-routing and disabling of unused functions following +- mode set. Using the helpers is optional, but recommended for ++ encoder re-routing and the disabling of unused functions following ++ mode setting. Using the helpers is optional, but recommended for + devices with PC-style architectures (i.e. a set of display planes + for feeding pixels to encoders which are in turn routed to + connectors). Devices with more complex requirements needing +- finer grained management can opt to use the core callbacks ++ finer grained management may opt to use the core callbacks + directly. + + +@@ -580,17 +586,25 @@ void intel_crt_init(struct drm_device *dev) + + + +- For each encoder, CRTC and connector, several functions must +- be provided, depending on the object type. Encoder objects +- need to provide a DPMS (basically on/off) function, mode fixup +- (for converting requested modes into native hardware timings), +- and prepare, set and commit functions for use by the core DRM +- helper functions. Connector helpers need to provide mode fetch and +- validity functions as well as an encoder matching function for +- returning an ideal encoder for a given connector. The core +- connector functions include a DPMS callback, (deprecated) +- save/restore routines, detection, mode probing, property handling, +- and cleanup functions. ++ Each encoder object needs to provide: ++ ++ ++ A DPMS (basically on/off) function. ++ ++ ++ A mode-fixup function (for converting requested modes into ++ native hardware timings). ++ ++ ++ Functions (prepare, set, and commit) for use by the core DRM ++ helper functions. ++ ++ ++ Connector helpers need to provide functions (mode-fetch, validity, ++ and encoder-matching) for returning an ideal encoder for a given ++ connector. The core connector functions include a DPMS callback, ++ save/restore routines (deprecated), detection, mode probing, ++ property handling, and cleanup functions. + + + +@@ -605,23 +619,34 @@ void intel_crt_init(struct drm_device *dev) + VBlank event handling + + The DRM core exposes two vertical blank related ioctls: +- DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL. ++ ++ ++ DRM_IOCTL_WAIT_VBLANK ++ ++ ++ This takes a struct drm_wait_vblank structure as its argument, ++ and it is used to block or request a signal when a specified ++ vblank event occurs. ++ ++ ++ ++ ++ DRM_IOCTL_MODESET_CTL ++ ++ ++ This should be called by application level drivers before and ++ after mode setting, since on many devices the vertical blank ++ counter is reset at that time. Internally, the DRM snapshots ++ the last vblank count when the ioctl is called with the ++ _DRM_PRE_MODESET command, so that the counter won't go backwards ++ (which is dealt with when _DRM_POST_MODESET is used). ++ ++ ++ ++ + + + +- DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure +- as its argument, and is used to block or request a signal when a +- specified vblank event occurs. +- +- +- DRM_IOCTL_MODESET_CTL should be called by application level +- drivers before and after mode setting, since on many devices the +- vertical blank counter will be reset at that time. Internally, +- the DRM snapshots the last vblank count when the ioctl is called +- with the _DRM_PRE_MODESET command so that the counter won't go +- backwards (which is dealt with when _DRM_POST_MODESET is used). +- +- + To support the functions above, the DRM core provides several + helper functions for tracking vertical blank counters, and + requires drivers to provide several callbacks: +@@ -632,24 +657,24 @@ void intel_crt_init(struct drm_device *dev) + register. The enable and disable vblank callbacks should enable + and disable vertical blank interrupts, respectively. In the + absence of DRM clients waiting on vblank events, the core DRM +- code will use the disable_vblank() function to disable +- interrupts, which saves power. They'll be re-enabled again when ++ code uses the disable_vblank() function to disable ++ interrupts, which saves power. They are re-enabled again when + a client calls the vblank wait ioctl above. + + +- Devices that don't provide a count register can simply use an ++ A device that doesn't provide a count register may simply use an + internal atomic counter incremented on every vertical blank +- interrupt, and can make their enable and disable vblank +- functions into no-ops. ++ interrupt (and then treat the enable_vblank() and disable_vblank() ++ callbacks as no-ops). + + + + + Memory management + +- The memory manager lies at the heart of many DRM operations, and +- is also required to support advanced client features like OpenGL +- pbuffers. The DRM currently contains two memory managers, TTM ++ The memory manager lies at the heart of many DRM operations; it ++ is required to support advanced client features like OpenGL ++ pbuffers. The DRM currently contains two memory managers: TTM + and GEM. + + +@@ -679,41 +704,46 @@ void intel_crt_init(struct drm_device *dev) + + GEM-enabled drivers must provide gem_init_object() and + gem_free_object() callbacks to support the core memory +- allocation routines. They should also provide several driver +- specific ioctls to support command execution, pinning, buffer ++ allocation routines. They should also provide several driver-specific ++ ioctls to support command execution, pinning, buffer + read & write, mapping, and domain ownership transfers. + + +- On a fundamental level, GEM involves several operations: memory +- allocation and freeing, command execution, and aperture management +- at command execution time. Buffer object allocation is relatively ++ On a fundamental level, GEM involves several operations: ++ ++ Memory allocation and freeing ++ Command execution ++ Aperture management at command execution time ++ ++ Buffer object allocation is relatively + straightforward and largely provided by Linux's shmem layer, which + provides memory to back each object. When mapped into the GTT + or used in a command buffer, the backing pages for an object are + flushed to memory and marked write combined so as to be coherent +- with the GPU. Likewise, when the GPU finishes rendering to an object, +- if the CPU accesses it, it must be made coherent with the CPU's view ++ with the GPU. Likewise, if the CPU accesses an object after the GPU ++ has finished rendering to the object, then the object must be made ++ coherent with the CPU's view + of memory, usually involving GPU cache flushing of various kinds. +- This core CPU<->GPU coherency management is provided by the GEM +- set domain function, which evaluates an object's current domain and ++ This core CPU<->GPU coherency management is provided by a ++ device-specific ioctl, which evaluates an object's current domain and + performs any necessary flushing or synchronization to put the object + into the desired coherency domain (note that the object may be busy, +- i.e. an active render target; in that case the set domain function +- will block the client and wait for rendering to complete before ++ i.e. an active render target; in that case, setting the domain ++ blocks the client and waits for rendering to complete before + performing any necessary flushing operations). + + + Perhaps the most important GEM function is providing a command + execution interface to clients. Client programs construct command +- buffers containing references to previously allocated memory objects +- and submit them to GEM. At that point, GEM will take care to bind ++ buffers containing references to previously allocated memory objects, ++ and then submit them to GEM. At that point, GEM takes care to bind + all the objects into the GTT, execute the buffer, and provide + necessary synchronization between clients accessing the same buffers. + This often involves evicting some objects from the GTT and re-binding + others (a fairly expensive operation), and providing relocation + support which hides fixed GTT offsets from clients. Clients must + take care not to submit command buffers that reference more objects +- than can fit in the GTT or GEM will reject them and no rendering ++ than can fit in the GTT; otherwise, GEM will reject them and no rendering + will occur. Similarly, if several objects in the buffer require + fence registers to be allocated for correct rendering (e.g. 2D blits + on pre-965 chips), care must be taken not to require more fence +@@ -729,7 +759,7 @@ void intel_crt_init(struct drm_device *dev) + Output management + + At the core of the DRM output management code is a set of +- structures representing CRTCs, encoders and connectors. ++ structures representing CRTCs, encoders, and connectors. + + + A CRTC is an abstraction representing a part of the chip that +@@ -765,21 +795,19 @@ void intel_crt_init(struct drm_device *dev) + + Framebuffer management + +- In order to set a mode on a given CRTC, encoder and connector +- configuration, clients need to provide a framebuffer object which +- will provide a source of pixels for the CRTC to deliver to the encoder(s) +- and ultimately the connector(s) in the configuration. A framebuffer +- is fundamentally a driver specific memory object, made into an opaque +- handle by the DRM addfb function. Once an fb has been created this +- way it can be passed to the KMS mode setting routines for use in +- a configuration. ++ Clients need to provide a framebuffer object which provides a source ++ of pixels for a CRTC to deliver to the encoder(s) and ultimately the ++ connector(s). A framebuffer is fundamentally a driver-specific memory ++ object, made into an opaque handle by the DRM's addfb() function. ++ Once a framebuffer has been created this way, it may be passed to the ++ KMS mode setting routines for use in a completed configuration. + + + + + Command submission & fencing + +- This should cover a few device specific command submission ++ This should cover a few device-specific command submission + implementations. + + +@@ -789,7 +817,7 @@ void intel_crt_init(struct drm_device *dev) + + The DRM core provides some suspend/resume code, but drivers + wanting full suspend/resume support should provide save() and +- restore() functions. These will be called at suspend, ++ restore() functions. These are called at suspend, + hibernate, or resume time, and should perform any state save or + restore required by your device across suspend or hibernate + states. +@@ -812,8 +840,8 @@ void intel_crt_init(struct drm_device *dev) + + The DRM core exports several interfaces to applications, + generally intended to be used through corresponding libdrm +- wrapper functions. In addition, drivers export device specific +- interfaces for use by userspace drivers & device aware ++ wrapper functions. In addition, drivers export device-specific ++ interfaces for use by userspace drivers & device-aware + applications through ioctls and sysfs files. + + +@@ -822,8 +850,8 @@ void intel_crt_init(struct drm_device *dev) + management, memory management, and output management. + + +- Cover generic ioctls and sysfs layout here. Only need high +- level info, since man pages will cover the rest. ++ Cover generic ioctls and sysfs layout here. We only need high-level ++ info, since man pages should cover the rest. + + + +diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt +index c21d777..7e62de1 100644 +--- a/Documentation/cgroups/freezer-subsystem.txt ++++ b/Documentation/cgroups/freezer-subsystem.txt +@@ -33,9 +33,9 @@ demonstrate this problem using nested bash shells: + + From a second, unrelated bash shell: + $ kill -SIGSTOP 16690 +- $ kill -SIGCONT 16990 ++ $ kill -SIGCONT 16690 + +- ++ + + This happens because bash can observe both signals and choose how it + responds to them. +diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt +index 4f34432..edad99a 100644 +--- a/Documentation/sound/alsa/HD-Audio-Models.txt ++++ b/Documentation/sound/alsa/HD-Audio-Models.txt +@@ -349,6 +349,7 @@ STAC92HD83* + ref Reference board + mic-ref Reference board with power management for ports + dell-s14 Dell laptop ++ dell-vostro-3500 Dell Vostro 3500 laptop + hp HP laptops with (inverted) mute-LED + hp-dv7-4000 HP dv-7 4000 + auto BIOS setup (default) +diff --git a/Kbuild b/Kbuild +index 4caab4f..b8b708a 100644 +--- a/Kbuild ++++ b/Kbuild +@@ -92,7 +92,7 @@ always += missing-syscalls + targets += missing-syscalls + + quiet_cmd_syscalls = CALL $< +- cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) ++ cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) + + missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE + $(call cmd,syscalls) +diff --git a/MAINTAINERS b/MAINTAINERS +index 4808256..071a996 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -1106,6 +1106,7 @@ F: drivers/media/video/s5p-fimc/ + ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT + M: Kyungmin Park + M: Kamil Debski ++M: Jeongtae Park + L: linux-arm-kernel@lists.infradead.org + L: linux-media@vger.kernel.org + S: Maintained +@@ -2342,6 +2343,13 @@ S: Supported + F: drivers/gpu/drm/i915 + F: include/drm/i915* + ++DRM DRIVERS FOR EXYNOS ++M: Inki Dae ++L: dri-devel@lists.freedesktop.org ++S: Supported ++F: drivers/gpu/drm/exynos ++F: include/drm/exynos* ++ + DSCC4 DRIVER + M: Francois Romieu + L: netdev@vger.kernel.org +@@ -6122,7 +6130,7 @@ F: sound/ + SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) + M: Liam Girdwood + M: Mark Brown +-T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git ++T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git + L: alsa-devel@alsa-project.org (moderated for non-subscribers) + W: http://alsa-project.org/main/index.php/ASoC + S: Supported +diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts +index 9b29a62..3f9abd6 100644 +--- a/arch/arm/boot/dts/tegra-ventana.dts ++++ b/arch/arm/boot/dts/tegra-ventana.dts +@@ -22,11 +22,10 @@ + sdhci@c8000400 { + cd-gpios = <&gpio 69 0>; /* gpio PI5 */ + wp-gpios = <&gpio 57 0>; /* gpio PH1 */ +- power-gpios = <&gpio 155 0>; /* gpio PT3 */ ++ power-gpios = <&gpio 70 0>; /* gpio PI6 */ + }; + + sdhci@c8000600 { +- power-gpios = <&gpio 70 0>; /* gpio PI6 */ + support-8bit; + }; + }; +diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c +index a4401d6..adad70d 100644 +--- a/arch/arm/mach-at91/at91cap9_devices.c ++++ b/arch/arm/mach-at91/at91cap9_devices.c +@@ -98,7 +98,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} + * USB HS Device (Gadget) + * -------------------------------------------------------------------- */ + +-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) ++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE) + + static struct resource usba_udc_resources[] = { + [0] = { +@@ -1021,8 +1021,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -1035,7 +1035,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c +index 01d8bbd..66591fa 100644 +--- a/arch/arm/mach-at91/at91rm9200_devices.c ++++ b/arch/arm/mach-at91/at91rm9200_devices.c +@@ -877,8 +877,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -891,7 +891,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c +index 24b6f8c..25e3464 100644 +--- a/arch/arm/mach-at91/at91sam9260_devices.c ++++ b/arch/arm/mach-at91/at91sam9260_devices.c +@@ -837,8 +837,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -851,7 +851,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c +index 3b70b38..ae78f4d 100644 +--- a/arch/arm/mach-at91/at91sam9261_devices.c ++++ b/arch/arm/mach-at91/at91sam9261_devices.c +@@ -816,8 +816,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -830,7 +830,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c +index 3faa1fd..ad017eb 100644 +--- a/arch/arm/mach-at91/at91sam9263_devices.c ++++ b/arch/arm/mach-at91/at91sam9263_devices.c +@@ -1196,8 +1196,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -1210,7 +1210,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c +index 000b5e1..09a16d6 100644 +--- a/arch/arm/mach-at91/at91sam9g45_devices.c ++++ b/arch/arm/mach-at91/at91sam9g45_devices.c +@@ -197,7 +197,7 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) {} + * USB HS Device (Gadget) + * -------------------------------------------------------------------- */ + +-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) ++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE) + static struct resource usba_udc_resources[] = { + [0] = { + .start = AT91SAM9G45_UDPHS_FIFO, +@@ -1332,8 +1332,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -1346,7 +1346,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c +index 305a851..628eb56 100644 +--- a/arch/arm/mach-at91/at91sam9rl_devices.c ++++ b/arch/arm/mach-at91/at91sam9rl_devices.c +@@ -75,7 +75,7 @@ void __init at91_add_device_hdmac(void) {} + * USB HS Device (Gadget) + * -------------------------------------------------------------------- */ + +-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE) ++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE) + + static struct resource usba_udc_resources[] = { + [0] = { +@@ -908,8 +908,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {} + #if defined(CONFIG_SERIAL_ATMEL) + static struct resource dbgu_resources[] = { + [0] = { +- .start = AT91_VA_BASE_SYS + AT91_DBGU, +- .end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1, ++ .start = AT91_BASE_SYS + AT91_DBGU, ++ .end = AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { +@@ -922,7 +922,6 @@ static struct resource dbgu_resources[] = { + static struct atmel_uart_data dbgu_data = { + .use_dma_tx = 0, + .use_dma_rx = 0, /* DBGU not capable of receive DMA */ +- .regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU), + }; + + static u64 dbgu_dmamask = DMA_BIT_MASK(32); +diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c +index 649b052..12a3f95 100644 +--- a/arch/arm/mach-at91/board-yl-9200.c ++++ b/arch/arm/mach-at91/board-yl-9200.c +@@ -384,7 +384,7 @@ static struct spi_board_info yl9200_spi_devices[] = { + #include