diff --git a/debian/changelog b/debian/changelog index ca55a6ed8..47ec54cdb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -78,6 +78,7 @@ linux (3.10.5-1) UNRELEASED; urgency=low * atl1c: Fix misuse of netdev_alloc_skb in refilling rx ring (Closes: #718505) * [rt] genpatch.py: Accept missing series-rt + * [rt] Update to 3.10.4-rt1 and reenable -- Ben Hutchings Tue, 30 Jul 2013 18:09:20 +0200 diff --git a/debian/config/defines b/debian/config/defines index b9c3d64dd..bfdd5c1d3 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -28,7 +28,7 @@ featuresets: rt [featureset-rt_base] -enabled: false +enabled: true [description] part-long-up: This kernel is not suitable for SMP (multi-processor, diff --git a/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch new file mode 100644 index 000000000..d3904fad3 --- /dev/null +++ b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch @@ -0,0 +1,134 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 19 Mar 2013 14:44:30 +0100 +Subject: [PATCH] kernel/SRCU: provide a static initializer +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +There are macros for static initializer for the three out of four +possible notifier types, that are: + ATOMIC_NOTIFIER_HEAD() + BLOCKING_NOTIFIER_HEAD() + RAW_NOTIFIER_HEAD() + +This patch provides a static initilizer for the forth type to make it +complete. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/notifier.h | 34 +++++++++++++++++++++++++--------- + include/linux/srcu.h | 8 ++++---- + 2 files changed, 29 insertions(+), 13 deletions(-) + +Index: linux-stable/include/linux/notifier.h +=================================================================== +--- linux-stable.orig/include/linux/notifier.h ++++ linux-stable/include/linux/notifier.h +@@ -6,7 +6,7 @@ + * + * Alan Cox + */ +- ++ + #ifndef _LINUX_NOTIFIER_H + #define _LINUX_NOTIFIER_H + #include +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + typedef int (*notifier_fn_t)(struct notifier_block *nb, +@@ -88,7 +86,7 @@ struct srcu_notifier_head { + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ ++ ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++ ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) ++ ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) ++ + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int + + /* + * Declared notifiers so far. I can imagine quite a few more chains +- * over time (eg laptop power reset chains, reboot chain (to clean ++ * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, +- * low memory chain, screenblank chain (for plug in modular screenblankers) ++ * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ +- ++ + /* CPU notfiers are defined in include/linux/cpu.h. */ + + /* netdevice notifiers are defined in include/linux/netdevice.h */ +Index: linux-stable/include/linux/srcu.h +=================================================================== +--- linux-stable.orig/include/linux/srcu.h ++++ linux-stable/include/linux/srcu.h +@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -104,11 +104,11 @@ void process_srcu(struct work_struct *wo + */ + #define DEFINE_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); + + #define DEFINE_STATIC_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- static struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); + + /** + * call_srcu() - Queue a callback for invocation after an SRCU grace period diff --git a/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch new file mode 100644 index 000000000..84d39dcff --- /dev/null +++ b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch @@ -0,0 +1,26 @@ +From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 17:09:55 +0100 +Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This is a copy from kmap_atomic_prot(). + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/mm/iomap_32.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/arch/x86/mm/iomap_32.c +=================================================================== +--- linux-stable.orig/arch/x86/mm/iomap_32.c ++++ linux-stable/arch/x86/mm/iomap_32.c +@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ + #ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; + #endif diff --git a/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch new file mode 100644 index 000000000..146fdc04a --- /dev/null +++ b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch @@ -0,0 +1,31 @@ +From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 21:37:27 +0100 +Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The tlb should be flushed on unmap and thus make the mapping entry +invalid. This is only done in the non-debug case which does not look +right. + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/mm/highmem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/arch/arm/mm/highmem.c +=================================================================== +--- linux-stable.orig/arch/arm/mm/highmem.c ++++ linux-stable/arch/arm/mm/highmem.c +@@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +- set_top_pte(vaddr, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_top_pte(vaddr, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ diff --git a/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch new file mode 100644 index 000000000..d2f581faf --- /dev/null +++ b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -0,0 +1,47 @@ +From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 1 Mar 2013 11:17:42 +0100 +Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and + hash bucket lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +In exit_pi_state_list() we have the following locking construct: + + spin_lock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); + + ... + spin_unlock(&hb->lock); + +In !RT this works, but on RT the migrate_enable() function which is +called from spin_unlock() sees atomic context due to the held pi_lock +and just decrements the migrate_disable_atomic counter of the +task. Now the next call to migrate_disable() sees the counter being +negative and issues a warning. That check should be in +migrate_enable() already. + +Fix this by dropping pi_lock before unlocking hb->lock and reaquire +pi_lock after that again. This is safe as the loop code reevaluates +head again under the pi_lock. + +Reported-by: Yong Zhang +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/futex.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/kernel/futex.c +=================================================================== +--- linux-stable.orig/kernel/futex.c ++++ linux-stable/kernel/futex.c +@@ -571,7 +571,9 @@ void exit_pi_state_list(struct task_stru + * task still owns the PI-state: + */ + if (head->next != next) { ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + continue; + } + diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch new file mode 100644 index 000000000..849583411 --- /dev/null +++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch @@ -0,0 +1,80 @@ +From b72b514282ffad0d665ea94932b968f388304079 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 19:01:05 +0100 +Subject: [PATCH] HACK: printk: drop the logbuf_lock more often +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The lock is hold with irgs off. The latency drops 500us+ on my arm bugs +with a "full" buffer after executing "dmesg" on the shell. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/printk.c | 27 ++++++++++++++++++++++++++- + 1 file changed, 26 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/printk.c +=================================================================== +--- linux-stable.orig/kernel/printk.c ++++ linux-stable/kernel/printk.c +@@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user + { + char *text; + int len = 0; ++ int attempts = 0; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); + if (!text) +@@ -1045,7 +1046,14 @@ static int syslog_print_all(char __user + u64 seq; + u32 idx; + enum log_flags prev; +- ++ int num_msg; ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + if (clear_seq < log_first_seq) { + /* messages are gone, move to first available one */ + clear_seq = log_first_seq; +@@ -1066,6 +1074,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1079,6 +1095,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1120,6 +1144,7 @@ static int syslog_print_all(char __user + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + raw_spin_unlock_irq(&logbuf_lock); + + kfree(text); diff --git a/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch new file mode 100644 index 000000000..f1802c040 --- /dev/null +++ b/debian/patches/features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch @@ -0,0 +1,183 @@ +From: Steven Rostedt +Date: Wed, 13 Feb 2013 09:26:05 -0500 +Subject: [PATCH] acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +We hit the following bug with 3.6-rt: + +[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002 +[ 5.898991] no locks held by swapper/3/0. +[ 5.898993] Modules linked in: +[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 +[ 5.898997] Call Trace: +[ 5.899011] [] __schedule_bug+0x67/0x90 +[ 5.899028] [] __schedule+0x793/0x7a0 +[ 5.899032] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 +[ 5.899034] [] schedule+0x29/0x70 +[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002 +[ 5.899037] no locks held by swapper/7/0. +[ 5.899039] [] rt_spin_lock_slowlock+0xe5/0x2f0 +[ 5.899040] Modules linked in: +[ 5.899041] +[ 5.899045] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 +[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1 +[ 5.899047] Call Trace: +[ 5.899049] [] rt_spin_lock+0x16/0x40 +[ 5.899052] [] __schedule_bug+0x67/0x90 +[ 5.899054] [] ? notifier_call_chain+0x80/0x80 +[ 5.899056] [] __schedule+0x793/0x7a0 +[ 5.899059] [] acpi_os_acquire_lock+0x1f/0x23 +[ 5.899062] [] ? debug_rt_mutex_print_deadlock+0x50/0x200 +[ 5.899068] [] acpi_write_bit_register+0x33/0xb0 +[ 5.899071] [] schedule+0x29/0x70 +[ 5.899072] [] ? acpi_read_bit_register+0x33/0x51 +[ 5.899074] [] rt_spin_lock_slowlock+0xe5/0x2f0 +[ 5.899077] [] acpi_idle_enter_bm+0x8a/0x28e +[ 5.899079] [] ? _raw_spin_unlock_irqrestore+0x38/0x90 +[ 5.899081] [] ? this_cpu_load+0x1a/0x30 +[ 5.899083] [] rt_spin_lock+0x16/0x40 +[ 5.899087] [] cpuidle_enter+0x19/0x20 +[ 5.899088] [] ? notifier_call_chain+0x80/0x80 +[ 5.899090] [] cpuidle_enter_state+0x17/0x50 +[ 5.899092] [] acpi_os_acquire_lock+0x1f/0x23 +[ 5.899094] [] cpuidle899101] [] ? + +As the acpi code disables interrupts in acpi_idle_enter_bm, and calls +code that grabs the acpi lock, it causes issues as the lock is currently +in RT a sleeping lock. + +The lock was converted from a raw to a sleeping lock due to some +previous issues, and tests that showed it didn't seem to matter. +Unfortunately, it did matter for one of our boxes. + +This patch converts the lock back to a raw lock. I've run this code on a +few of my own machines, one being my laptop that uses the acpi quite +extensively. I've been able to suspend and resume without issues. + +[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ] + +Signed-off-by: Steven Rostedt +Cc: John Kacur +Cc: Clark Williams +Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/acpi/acpica/acglobal.h | 2 +- + drivers/acpi/acpica/hwregs.c | 4 ++-- + drivers/acpi/acpica/hwxface.c | 4 ++-- + drivers/acpi/acpica/utmutex.c | 4 ++-- + include/acpi/platform/aclinux.h | 14 ++++++++++++++ + 5 files changed, 21 insertions(+), 7 deletions(-) + +Index: linux-stable/drivers/acpi/acpica/acglobal.h +=================================================================== +--- linux-stable.orig/drivers/acpi/acpica/acglobal.h ++++ linux-stable/drivers/acpi/acpica/acglobal.h +@@ -223,7 +223,7 @@ ACPI_EXTERN u8 acpi_gbl_global_lock_pend + * interrupt level + */ + ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */ +-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ ++ACPI_EXTERN acpi_raw_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ + ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock; + + /* Mutex for _OSI support */ +Index: linux-stable/drivers/acpi/acpica/hwregs.c +=================================================================== +--- linux-stable.orig/drivers/acpi/acpica/hwregs.c ++++ linux-stable/drivers/acpi/acpica/hwregs.c +@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo + ACPI_BITMASK_ALL_FIXED_STATUS, + ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* Clear the fixed events in PM1 A/B */ + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) + goto exit; +Index: linux-stable/drivers/acpi/acpica/hwxface.c +=================================================================== +--- linux-stable.orig/drivers/acpi/acpica/hwxface.c ++++ linux-stable/drivers/acpi/acpica/hwxface.c +@@ -365,7 +365,7 @@ acpi_status acpi_write_bit_register(u32 + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* + * At this point, we know that the parent register is one of the +@@ -426,7 +426,7 @@ acpi_status acpi_write_bit_register(u32 + + unlock_and_exit: + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + return_ACPI_STATUS(status); + } + +Index: linux-stable/drivers/acpi/acpica/utmutex.c +=================================================================== +--- linux-stable.orig/drivers/acpi/acpica/utmutex.c ++++ linux-stable/drivers/acpi/acpica/utmutex.c +@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi + return_ACPI_STATUS (status); + } + +- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); ++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void) + /* Delete the spinlocks */ + + acpi_os_delete_lock(acpi_gbl_gpe_lock); +- acpi_os_delete_lock(acpi_gbl_hardware_lock); ++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); + acpi_os_delete_lock(acpi_gbl_reference_count_lock); + + /* Delete the reader/writer lock */ +Index: linux-stable/include/acpi/platform/aclinux.h +=================================================================== +--- linux-stable.orig/include/acpi/platform/aclinux.h ++++ linux-stable/include/acpi/platform/aclinux.h +@@ -72,6 +72,7 @@ + + #define acpi_cache_t struct kmem_cache + #define acpi_spinlock spinlock_t * ++#define acpi_raw_spinlock raw_spinlock_t * + #define acpi_cpu_flags unsigned long + + #else /* !__KERNEL__ */ +@@ -174,6 +175,19 @@ static inline void *acpi_os_acquire_obje + lock ? AE_OK : AE_NO_MEMORY; \ + }) + ++#define acpi_os_create_raw_lock(__handle) \ ++({ \ ++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ ++ \ ++ if (lock) { \ ++ *(__handle) = lock; \ ++ raw_spin_lock_init(*(__handle)); \ ++ } \ ++ lock ? AE_OK : AE_NO_MEMORY; \ ++}) ++ ++#define acpi_os_delete_raw_lock(__handle) kfree(__handle) ++ + #endif /* __KERNEL__ */ + + #endif /* __ACLINUX_H__ */ diff --git a/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch new file mode 100644 index 000000000..5f4d77020 --- /dev/null +++ b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch @@ -0,0 +1,331 @@ +Subject: mm: Fixup all fault handlers to check current->pagefault_disable +From: Thomas Gleixner +Date: Thu, 17 Mar 2011 11:32:28 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Necessary for decoupling pagefault disable from preempt count. + +Signed-off-by: Thomas Gleixner +--- + arch/alpha/mm/fault.c | 2 +- + arch/arm/mm/fault.c | 2 +- + arch/avr32/mm/fault.c | 3 ++- + arch/cris/mm/fault.c | 2 +- + arch/frv/mm/fault.c | 2 +- + arch/ia64/mm/fault.c | 2 +- + arch/m32r/mm/fault.c | 2 +- + arch/m68k/mm/fault.c | 2 +- + arch/microblaze/mm/fault.c | 2 +- + arch/mips/mm/fault.c | 2 +- + arch/mn10300/mm/fault.c | 2 +- + arch/parisc/mm/fault.c | 2 +- + arch/powerpc/mm/fault.c | 2 +- + arch/s390/mm/fault.c | 6 ++++-- + arch/score/mm/fault.c | 2 +- + arch/sh/mm/fault.c | 2 +- + arch/sparc/mm/fault_32.c | 2 +- + arch/sparc/mm/fault_64.c | 2 +- + arch/tile/mm/fault.c | 2 +- + arch/um/kernel/trap.c | 2 +- + arch/x86/mm/fault.c | 2 +- + arch/xtensa/mm/fault.c | 2 +- + 22 files changed, 26 insertions(+), 23 deletions(-) + +Index: linux-stable/arch/alpha/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/alpha/mm/fault.c ++++ linux-stable/arch/alpha/mm/fault.c +@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns + + /* If we're in an interrupt context, or have no user context, + we must not take the fault. */ +- if (!mm || in_atomic()) ++ if (!mm || in_atomic() || current->pagefault_disabled) + goto no_context; + + #ifdef CONFIG_ALPHA_LARGE_VMALLOC +Index: linux-stable/arch/arm/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/arm/mm/fault.c ++++ linux-stable/arch/arm/mm/fault.c +@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + /* +Index: linux-stable/arch/avr32/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/avr32/mm/fault.c ++++ linux-stable/arch/avr32/mm/fault.c +@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) ++ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || ++ current->pagefault_disabled) + goto no_context; + + local_irq_enable(); +Index: linux-stable/arch/cris/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/cris/mm/fault.c ++++ linux-stable/arch/cris/mm/fault.c +@@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + retry: +Index: linux-stable/arch/frv/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/frv/mm/fault.c ++++ linux-stable/arch/frv/mm/fault.c +@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/ia64/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/ia64/mm/fault.c ++++ linux-stable/arch/ia64/mm/fault.c +@@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +Index: linux-stable/arch/m32r/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/m32r/mm/fault.c ++++ linux-stable/arch/m32r/mm/fault.c +@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto bad_area_nosemaphore; + + /* When running in the kernel we expect faults to occur only to +Index: linux-stable/arch/m68k/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/m68k/mm/fault.c ++++ linux-stable/arch/m68k/mm/fault.c +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + retry: +Index: linux-stable/arch/microblaze/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/microblaze/mm/fault.c ++++ linux-stable/arch/microblaze/mm/fault.c +@@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs, + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +Index: linux-stable/arch/mips/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/mips/mm/fault.c ++++ linux-stable/arch/mips/mm/fault.c +@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto bad_area_nosemaphore; + + retry: +Index: linux-stable/arch/mn10300/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/mn10300/mm/fault.c ++++ linux-stable/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + retry: +Index: linux-stable/arch/parisc/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/parisc/mm/fault.c ++++ linux-stable/arch/parisc/mm/fault.c +@@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs, + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + retry: +Index: linux-stable/arch/powerpc/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/powerpc/mm/fault.c ++++ linux-stable/arch/powerpc/mm/fault.c +@@ -264,7 +264,7 @@ int __kprobes do_page_fault(struct pt_re + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + +- if (in_atomic() || mm == NULL) { ++ if (in_atomic() || mm == NULL || current->pagefault_disabled) { + if (!user_mode(regs)) { + rc = SIGSEGV; + goto bail; +Index: linux-stable/arch/s390/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/s390/mm/fault.c ++++ linux-stable/arch/s390/mm/fault.c +@@ -296,7 +296,8 @@ static inline int do_exception(struct pt + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || ++ tsk->pagefault_disabled)) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +@@ -440,7 +441,8 @@ void __kprobes do_asce_exception(struct + clear_tsk_thread_flag(current, TIF_PER_TRAP); + + trans_exc_code = regs->int_parm_long; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || ++ current->pagefault_disabled())); + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/score/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/score/mm/fault.c ++++ linux-stable/arch/score/mm/fault.c +@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/sh/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/sh/mm/fault.c ++++ linux-stable/arch/sh/mm/fault.c +@@ -440,7 +440,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +Index: linux-stable/arch/sparc/mm/fault_32.c +=================================================================== +--- linux-stable.orig/arch/sparc/mm/fault_32.c ++++ linux-stable/arch/sparc/mm/fault_32.c +@@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_disabled) + goto no_context; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +Index: linux-stable/arch/sparc/mm/fault_64.c +=================================================================== +--- linux-stable.orig/arch/sparc/mm/fault_64.c ++++ linux-stable/arch/sparc/mm/fault_64.c +@@ -321,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fau + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (in_atomic() || !mm || current->pagefault_enabled) + goto intr_or_no_mm; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +Index: linux-stable/arch/tile/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/tile/mm/fault.c ++++ linux-stable/arch/tile/mm/fault.c +@@ -360,7 +360,7 @@ static int handle_page_fault(struct pt_r + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm) { ++ if (in_atomic() || !mm || current->pagefault_disabled) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +Index: linux-stable/arch/um/kernel/trap.c +=================================================================== +--- linux-stable.orig/arch/um/kernel/trap.c ++++ linux-stable/arch/um/kernel/trap.c +@@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic()) ++ if (in_atomic() || current->pagefault_disabled) + goto out_nosemaphore; + + retry: +Index: linux-stable/arch/x86/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/x86/mm/fault.c ++++ linux-stable/arch/x86/mm/fault.c +@@ -1104,7 +1104,7 @@ __do_page_fault(struct pt_regs *regs, un + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +Index: linux-stable/arch/xtensa/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/xtensa/mm/fault.c ++++ linux-stable/arch/xtensa/mm/fault.c +@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) { ++ if (in_atomic() || !mm || current->pagefault_disabled) { + bad_page_fault(regs, address, SIGSEGV); + return; + } diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch new file mode 100644 index 000000000..7be13d30b --- /dev/null +++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch @@ -0,0 +1,59 @@ +From: Benedikt Spranger +Date: Sat, 6 Mar 2010 17:47:10 +0100 +Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Setup and remove the interrupt handler in clock event mode selection. +This avoids calling the (shared) interrupt handler when the device is +not used. + +Signed-off-by: Benedikt Spranger +Signed-off-by: Thomas Gleixner + +--- + arch/arm/mach-at91/at91rm9200_time.c | 1 + + arch/arm/mach-at91/at91sam926x_time.c | 5 ++++- + 2 files changed, 5 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/arm/mach-at91/at91rm9200_time.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-at91/at91rm9200_time.c ++++ linux-stable/arch/arm/mach-at91/at91rm9200_time.c +@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mod + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: ++ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + case CLOCK_EVT_MODE_RESUME: + irqmask = 0; + break; +Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-at91/at91sam926x_time.c ++++ linux-stable/arch/arm/mach-at91/at91sam926x_time.c +@@ -77,7 +77,7 @@ static struct clocksource pit_clk = { + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }; + +- ++static struct irqaction at91sam926x_pit_irq; + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ +@@ -86,6 +86,8 @@ pit_clkevt_mode(enum clock_event_mode mo + { + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: ++ /* Set up irq handler */ ++ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq); + /* update clocksource counter */ + pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); + pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN +@@ -98,6 +100,7 @@ pit_clkevt_mode(enum clock_event_mode mo + case CLOCK_EVT_MODE_UNUSED: + /* disable irq, leaving the clocksource active */ + pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); ++ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq); + break; + case CLOCK_EVT_MODE_RESUME: + break; diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch new file mode 100644 index 000000000..707d44e03 --- /dev/null +++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch @@ -0,0 +1,35 @@ +From: Thomas Gleixner +Date: Sat, 1 May 2010 18:29:35 +0200 +Subject: ARM: at91: tclib: Default to tclib timer for RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +RT is not too happy about the shared timer interrupt in AT91 +devices. Default to tclib timer for RT. + +Signed-off-by: Thomas Gleixner + +--- + drivers/misc/Kconfig | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/drivers/misc/Kconfig +=================================================================== +--- linux-stable.orig/drivers/misc/Kconfig ++++ linux-stable/drivers/misc/Kconfig +@@ -63,6 +63,7 @@ config ATMEL_PWM + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK + config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC +- default y ++ default y if !PREEMPT_RT_FULL + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch new file mode 100644 index 000000000..752614fff --- /dev/null +++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch @@ -0,0 +1,326 @@ +Subject: preempt-rt: Convert arm boot_lock to raw +From: Frank Rowand +Date: Mon, 19 Sep 2011 14:51:14 -0700 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + + +The arm boot_lock is used by the secondary processor startup code. The locking +task is the idle thread, which has idle->sched_class == &idle_sched_class. +idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the +lock, the attempt to wake it when the lock becomes available will fail: + +try_to_wake_up() + ... + activate_task() + enqueue_task() + p->sched_class->enqueue_task(rq, p, flags) + +Fix by converting boot_lock to a raw spin lock. + +Signed-off-by: Frank Rowand +Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com +Signed-off-by: Thomas Gleixner +--- + arch/arm/mach-exynos/platsmp.c | 12 ++++++------ + arch/arm/mach-msm/platsmp.c | 10 +++++----- + arch/arm/mach-omap2/omap-smp.c | 10 +++++----- + arch/arm/mach-prima2/platsmp.c | 10 +++++----- + arch/arm/mach-spear/platsmp.c | 8 ++++---- + arch/arm/mach-ux500/platsmp.c | 10 +++++----- + arch/arm/plat-versatile/platsmp.c | 10 +++++----- + 7 files changed, 35 insertions(+), 35 deletions(-) + +Index: linux-stable/arch/arm/mach-exynos/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-exynos/platsmp.c ++++ linux-stable/arch/arm/mach-exynos/platsmp.c +@@ -71,7 +71,7 @@ static void __iomem *scu_base_addr(void) + return (void __iomem *)(S5P_VA_SCU); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __cpuinit exynos_secondary_init(unsigned int cpu) + { +@@ -84,8 +84,8 @@ static void __cpuinit exynos_secondary_i + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -97,7 +97,7 @@ static int __cpuinit exynos_boot_seconda + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -126,7 +126,7 @@ static int __cpuinit exynos_boot_seconda + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -165,7 +165,7 @@ static int __cpuinit exynos_boot_seconda + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-stable/arch/arm/mach-msm/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-msm/platsmp.c ++++ linux-stable/arch/arm/mach-msm/platsmp.c +@@ -30,7 +30,7 @@ + + extern void msm_secondary_startup(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static inline int get_core_count(void) + { +@@ -50,8 +50,8 @@ static void __cpuinit msm_secondary_init + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static __cpuinit void prepare_cold_cpu(unsigned int cpu) +@@ -88,7 +88,7 @@ static int __cpuinit msm_boot_secondary( + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -122,7 +122,7 @@ static int __cpuinit msm_boot_secondary( + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-stable/arch/arm/mach-omap2/omap-smp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-omap2/omap-smp.c ++++ linux-stable/arch/arm/mach-omap2/omap-smp.c +@@ -44,7 +44,7 @@ u16 pm44xx_errata; + /* SCU base address */ + static void __iomem *scu_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) + { +@@ -68,8 +68,8 @@ static void __cpuinit omap4_secondary_in + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -83,7 +83,7 @@ static int __cpuinit omap4_boot_secondar + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -160,7 +160,7 @@ static int __cpuinit omap4_boot_secondar + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +Index: linux-stable/arch/arm/mach-prima2/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-prima2/platsmp.c ++++ linux-stable/arch/arm/mach-prima2/platsmp.c +@@ -23,7 +23,7 @@ + static void __iomem *scu_base; + static void __iomem *rsc_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static struct map_desc scu_io_desc __initdata = { + .length = SZ_4K, +@@ -56,8 +56,8 @@ static void __cpuinit sirfsoc_secondary_ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static struct of_device_id rsc_ids[] = { +@@ -95,7 +95,7 @@ static int __cpuinit sirfsoc_boot_second + /* make sure write buffer is drained */ + mb(); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -128,7 +128,7 @@ static int __cpuinit sirfsoc_boot_second + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-stable/arch/arm/mach-spear/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-spear/platsmp.c ++++ linux-stable/arch/arm/mach-spear/platsmp.c +@@ -36,8 +36,8 @@ static void __cpuinit spear13xx_secondar + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -48,7 +48,7 @@ static int __cpuinit spear13xx_boot_seco + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -75,7 +75,7 @@ static int __cpuinit spear13xx_boot_seco + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-stable/arch/arm/mach-ux500/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/mach-ux500/platsmp.c ++++ linux-stable/arch/arm/mach-ux500/platsmp.c +@@ -52,7 +52,7 @@ static void __iomem *scu_base_addr(void) + return NULL; + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __cpuinit ux500_secondary_init(unsigned int cpu) + { +@@ -65,8 +65,8 @@ static void __cpuinit ux500_secondary_in + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -77,7 +77,7 @@ static int __cpuinit ux500_boot_secondar + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -98,7 +98,7 @@ static int __cpuinit ux500_boot_secondar + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +Index: linux-stable/arch/arm/plat-versatile/platsmp.c +=================================================================== +--- linux-stable.orig/arch/arm/plat-versatile/platsmp.c ++++ linux-stable/arch/arm/plat-versatile/platsmp.c +@@ -31,7 +31,7 @@ static void __cpuinit write_pen_release( + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __cpuinit versatile_secondary_init(unsigned int cpu) + { +@@ -44,8 +44,8 @@ void __cpuinit versatile_secondary_init( + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -56,7 +56,7 @@ int __cpuinit versatile_boot_secondary(u + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -86,7 +86,7 @@ int __cpuinit versatile_boot_secondary(u + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } diff --git a/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch new file mode 100644 index 000000000..32662ba6f --- /dev/null +++ b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch @@ -0,0 +1,23 @@ +Subject: arm-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:09:28 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/arch/arm/Kconfig +=================================================================== +--- linux-stable.orig/arch/arm/Kconfig ++++ linux-stable/arch/arm/Kconfig +@@ -1704,7 +1704,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU ++ depends on MMU && !PREEMPT_RT_FULL + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch new file mode 100644 index 000000000..cdb13a985 --- /dev/null +++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch @@ -0,0 +1,149 @@ +Subject: arm-enable-highmem-for-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Feb 2013 11:03:11 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 2 - + arch/arm/include/asm/switch_to.h | 9 ++++++++ + arch/arm/mm/highmem.c | 41 +++++++++++++++++++++++++++++++++++++-- + include/linux/highmem.h | 1 + 4 files changed, 50 insertions(+), 3 deletions(-) + +Index: linux-stable/arch/arm/Kconfig +=================================================================== +--- linux-stable.orig/arch/arm/Kconfig ++++ linux-stable/arch/arm/Kconfig +@@ -1704,7 +1704,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU && !PREEMPT_RT_FULL ++ depends on MMU + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address +Index: linux-stable/arch/arm/include/asm/switch_to.h +=================================================================== +--- linux-stable.orig/arch/arm/include/asm/switch_to.h ++++ linux-stable/arch/arm/include/asm/switch_to.h +@@ -3,6 +3,14 @@ + + #include + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ ++ + /* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself +@@ -12,6 +20,7 @@ extern struct task_struct *__switch_to(s + + #define switch_to(prev,next,last) \ + do { \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +Index: linux-stable/arch/arm/mm/highmem.c +=================================================================== +--- linux-stable.orig/arch/arm/mm/highmem.c ++++ linux-stable/arch/arm/mm/highmem.c +@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap); + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; +@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page) + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_top_pte(vaddr, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr) + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + #else +@@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + +@@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn) + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_top_pte(vaddr))); + #endif +- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const v + + return pte_page(get_top_pte(vaddr)); + } ++ ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), ++ next_p->kmap_pte[i]); ++ } ++} ++#endif +Index: linux-stable/include/linux/highmem.h +=================================================================== +--- linux-stable.orig/include/linux/highmem.h ++++ linux-stable/include/linux/highmem.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include + diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch new file mode 100644 index 000000000..de5e64dd6 --- /dev/null +++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch @@ -0,0 +1,114 @@ +Subject: arm-preempt-lazy-support.patch +From: Thomas Gleixner +Date: Wed, 31 Oct 2012 12:04:11 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 1 + + arch/arm/include/asm/thread_info.h | 3 +++ + arch/arm/kernel/asm-offsets.c | 1 + + arch/arm/kernel/entry-armv.S | 13 +++++++++++-- + arch/arm/kernel/signal.c | 3 ++- + 5 files changed, 18 insertions(+), 3 deletions(-) + +Index: linux-stable/arch/arm/Kconfig +=================================================================== +--- linux-stable.orig/arch/arm/Kconfig ++++ linux-stable/arch/arm/Kconfig +@@ -48,6 +48,7 @@ config ARM + select HAVE_MEMBLOCK + select HAVE_OPROFILE if (HAVE_PERF_EVENTS) + select HAVE_PERF_EVENTS ++ select HAVE_PREEMPT_LAZY + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_UID16 +Index: linux-stable/arch/arm/include/asm/thread_info.h +=================================================================== +--- linux-stable.orig/arch/arm/include/asm/thread_info.h ++++ linux-stable/arch/arm/include/asm/thread_info.h +@@ -50,6 +50,7 @@ struct cpu_context_save { + struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ +@@ -148,6 +149,7 @@ extern int vfp_restore_user_hwstate(stru + #define TIF_SIGPENDING 0 + #define TIF_NEED_RESCHED 1 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ ++#define TIF_NEED_RESCHED_LAZY 3 + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 + #define TIF_SYSCALL_TRACEPOINT 10 +@@ -161,6 +163,7 @@ extern int vfp_restore_user_hwstate(stru + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +Index: linux-stable/arch/arm/kernel/asm-offsets.c +=================================================================== +--- linux-stable.orig/arch/arm/kernel/asm-offsets.c ++++ linux-stable/arch/arm/kernel/asm-offsets.c +@@ -53,6 +53,7 @@ int main(void) + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); +Index: linux-stable/arch/arm/kernel/entry-armv.S +=================================================================== +--- linux-stable.orig/arch/arm/kernel/entry-armv.S ++++ linux-stable/arch/arm/kernel/entry-armv.S +@@ -204,11 +204,18 @@ __irq_svc: + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption ++ ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED ++ tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt ++1: + #endif + + svc_exit r5, irq = 1 @ return from exception +@@ -223,6 +230,8 @@ svc_preempt: + 1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED ++ bne 1b ++ tst r0, #_TIF_NEED_RESCHED_LAZY + moveq pc, r8 @ go again + b 1b + #endif +Index: linux-stable/arch/arm/kernel/signal.c +=================================================================== +--- linux-stable.orig/arch/arm/kernel/signal.c ++++ linux-stable/arch/arm/kernel/signal.c +@@ -576,7 +576,8 @@ asmlinkage int + do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) + { + do { +- if (likely(thread_flags & _TIF_NEED_RESCHED)) { ++ if (likely(thread_flags & (_TIF_NEED_RESCHED | ++ _TIF_NEED_RESCHED_LAZY))) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch new file mode 100644 index 000000000..fae12bbf7 --- /dev/null +++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch @@ -0,0 +1,67 @@ +From: Steven Rostedt +Date: Fri, 3 Jul 2009 08:44:29 -0500 +Subject: ata: Do not disable interrupts in ide code for preempt-rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use the local_irq_*_nort variants. + +Signed-off-by: Steven Rostedt +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/ata/libata-sff.c | 12 ++++++------ + 1 file changed, 6 insertions(+), 6 deletions(-) + +Index: linux-stable/drivers/ata/libata-sff.c +=================================================================== +--- linux-stable.orig/drivers/ata/libata-sff.c ++++ linux-stable/drivers/ata/libata-sff.c +@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu + do_write); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ next_sg: + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ next_sg: + count, rw); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch new file mode 100644 index 000000000..6b00169f1 --- /dev/null +++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch @@ -0,0 +1,100 @@ +Subject: block: Shorten interrupt disabled regions +From: Thomas Gleixner +Date: Wed, 22 Jun 2011 19:47:02 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Moving the blk_sched_flush_plug() call out of the interrupt/preempt +disabled region in the scheduler allows us to replace +local_irq_save/restore(flags) by local_irq_disable/enable() in +blk_flush_plug(). + +Now instead of doing this we disable interrupts explicitely when we +lock the request_queue and reenable them when we drop the lock. That +allows interrupts to be handled when the plug list contains requests +for more than one queue. + +Aside of that this change makes the scope of the irq disabled region +more obvious. The current code confused the hell out of me when +looking at: + + local_irq_save(flags); + spin_lock(q->queue_lock); + ... + queue_unplugged(q...); + scsi_request_fn(); + spin_unlock(q->queue_lock); + spin_lock(shost->host_lock); + spin_unlock_irq(shost->host_lock); + +-------------------^^^ ???? + + spin_lock_irq(q->queue_lock); + spin_unlock(q->lock); + local_irq_restore(flags); + +Also add a comment to __blk_run_queue() documenting that +q->request_fn() can drop q->queue_lock and reenable interrupts, but +must return with q->queue_lock held and interrupts disabled. + +Signed-off-by: Thomas Gleixner +Cc: Peter Zijlstra +Cc: Tejun Heo +Cc: Jens Axboe +Cc: Linus Torvalds +Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de +Signed-off-by: Thomas Gleixner +--- + block/blk-core.c | 12 ++---------- + 1 file changed, 2 insertions(+), 10 deletions(-) + +Index: linux-stable/block/blk-core.c +=================================================================== +--- linux-stable.orig/block/blk-core.c ++++ linux-stable/block/blk-core.c +@@ -2907,7 +2907,7 @@ static void queue_unplugged(struct reque + blk_run_queue_async(q); + else + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -2955,7 +2955,6 @@ EXPORT_SYMBOL(blk_check_plugged); + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -2973,11 +2972,6 @@ void blk_flush_plug_list(struct blk_plug + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -2990,7 +2984,7 @@ void blk_flush_plug_list(struct blk_plug + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + + /* +@@ -3017,8 +3011,6 @@ void blk_flush_plug_list(struct blk_plug + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch new file mode 100644 index 000000000..cc6d03ea3 --- /dev/null +++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch @@ -0,0 +1,48 @@ +Subject: block: Use cpu_chill() for retry loops +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 18:28:26 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Steven also observed a live lock when there was a +concurrent priority boosting going on. + +Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-ioc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +Index: linux-stable/block/blk-ioc.c +=================================================================== +--- linux-stable.orig/block/blk-ioc.c ++++ linux-stable/block/blk-ioc.c +@@ -8,6 +8,7 @@ + #include + #include /* for max_pfn/max_low_pfn */ + #include ++#include + + #include "blk.h" + +@@ -110,7 +111,7 @@ static void ioc_release_fn(struct work_s + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -187,7 +188,7 @@ retry: + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch new file mode 100644 index 000000000..50761f9f0 --- /dev/null +++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch @@ -0,0 +1,37 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:58 -0500 +Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/asm-generic/bug.h | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +Index: linux-stable/include/asm-generic/bug.h +=================================================================== +--- linux-stable.orig/include/asm-generic/bug.h ++++ linux-stable/include/asm-generic/bug.h +@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #endif diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch new file mode 100644 index 000000000..d9535bfbf --- /dev/null +++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch @@ -0,0 +1,143 @@ +From: Benedikt Spranger +Date: Mon, 8 Mar 2010 18:57:04 +0100 +Subject: clocksource: TCLIB: Allow higher clock rates for clock events +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +As default the TCLIB uses the 32KiHz base clock rate for clock events. +Add a compile time selection to allow higher clock resulution. + +Signed-off-by: Benedikt Spranger +Signed-off-by: Thomas Gleixner + +--- + drivers/clocksource/tcb_clksrc.c | 30 ++++++++++++++++-------------- + drivers/misc/Kconfig | 12 ++++++++++-- + 2 files changed, 26 insertions(+), 16 deletions(-) + +Index: linux-stable/drivers/clocksource/tcb_clksrc.c +=================================================================== +--- linux-stable.orig/drivers/clocksource/tcb_clksrc.c ++++ linux-stable/drivers/clocksource/tcb_clksrc.c +@@ -23,8 +23,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -74,6 +73,7 @@ static struct clocksource clksrc = { + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ u32 freq; + void __iomem *regs; + }; + +@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + + static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) +@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_PERIODIC: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ/2)/HZ, ++ tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod + case CLOCK_EVT_MODE_ONESHOT: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt = + .name = "tc_clkevt", + .features = CLOCK_EVT_FEAT_PERIODIC + | CLOCK_EVT_FEAT_ONESHOT, ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + /* Should be lower than at91rm9200's system timer */ + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, + .set_mode = tc_mode, + }, +@@ -184,8 +182,9 @@ static struct irqaction tc_irqaction = { + .handler = ch2_irq, + }; + +-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; + +@@ -322,8 +321,11 @@ static int __init tcb_clksrc_init(void) + clocksource_register_hz(&clksrc, divided_rate); + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + setup_clkevents(tc, clk32k_divisor_idx); +- ++#else ++ setup_clkevents(tc, best_divisor_idx); ++#endif + return 0; + } + arch_initcall(tcb_clksrc_init); +Index: linux-stable/drivers/misc/Kconfig +=================================================================== +--- linux-stable.orig/drivers/misc/Kconfig ++++ linux-stable/drivers/misc/Kconfig +@@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -93,6 +92,15 @@ config ATMEL_TCB_CLKSRC_BLOCK + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ ++ + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch new file mode 100644 index 000000000..e0c65e98b --- /dev/null +++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch @@ -0,0 +1,186 @@ +Subject: completion: Use simple wait queues +From: Thomas Gleixner +Date: Fri, 11 Jan 2013 11:23:51 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Completions have no long lasting callbacks and therefor do not need +the complex waitqueue variant. Use simple waitqueues which reduces the +contention on the waitqueue lock. + +Signed-off-by: Thomas Gleixner +--- + include/linux/completion.h | 8 ++++---- + include/linux/uprobes.h | 1 + + kernel/sched/core.c | 44 +++++++++++++++++++++++++------------------- + 3 files changed, 30 insertions(+), 23 deletions(-) + +Index: linux-stable/include/linux/completion.h +=================================================================== +--- linux-stable.orig/include/linux/completion.h ++++ linux-stable/include/linux/completion.h +@@ -8,7 +8,7 @@ + * See kernel/sched.c for details. + */ + +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +24,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +73,7 @@ struct completion { + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_head(&x->wait); + } + + extern void wait_for_completion(struct completion *); +Index: linux-stable/include/linux/uprobes.h +=================================================================== +--- linux-stable.orig/include/linux/uprobes.h ++++ linux-stable/include/linux/uprobes.h +@@ -26,6 +26,7 @@ + + #include + #include ++#include + + struct vm_area_struct; + struct mm_struct; +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -3028,7 +3028,10 @@ void migrate_disable(void) + } + + #ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } + #endif + + preempt_disable(); +@@ -3060,7 +3063,10 @@ void migrate_enable(void) + } + + #ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } + #endif + WARN_ON_ONCE(p->migrate_disable <= 0); + +@@ -3515,10 +3521,10 @@ void complete(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done++; +- __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 1); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -3535,10 +3541,10 @@ void complete_all(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; +- __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 0); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -3547,20 +3553,20 @@ do_wait_for_common(struct completion *x, + long (*action)(long), long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DEFINE_SWAITER(wait); + +- __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ swait_prepare_locked(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = action(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ swait_finish_locked(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -3574,9 +3580,9 @@ __wait_for_common(struct completion *x, + { + might_sleep(); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, action, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + return timeout; + } + +@@ -3752,12 +3758,12 @@ bool try_wait_for_completion(struct comp + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -3775,10 +3781,10 @@ bool completion_done(struct completion * + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(completion_done); diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch new file mode 100644 index 000000000..a003485c6 --- /dev/null +++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch @@ -0,0 +1,23 @@ +Subject: cond-resched-lock-rt-tweak.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 22:51:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -2483,7 +2483,7 @@ extern int _cond_resched(void); + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT ++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET + #else + #define PREEMPT_LOCK_OFFSET 0 diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch new file mode 100644 index 000000000..cdb19ea58 --- /dev/null +++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch @@ -0,0 +1,52 @@ +Subject: cond-resched-softirq-fix.patch +From: Thomas Gleixner +Date: Thu, 14 Jul 2011 09:56:44 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 4 ++++ + kernel/sched/core.c | 2 ++ + 2 files changed, 6 insertions(+) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -2494,12 +2494,16 @@ extern int __cond_resched_lock(spinlock_ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + /* + * Does a critical section need to be broken due to another +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4443,6 +4443,7 @@ int __cond_resched_lock(spinlock_t *lock + } + EXPORT_SYMBOL(__cond_resched_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL + int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); +@@ -4456,6 +4457,7 @@ int __sched __cond_resched_softirq(void) + return 0; + } + EXPORT_SYMBOL(__cond_resched_softirq); ++#endif + + /** + * yield - yield the current processor to other threads. diff --git a/debian/patches/features/all/rt/cpsw-net-cpsw-Use-fallback-for-active_slave.patch b/debian/patches/features/all/rt/cpsw-net-cpsw-Use-fallback-for-active_slave.patch new file mode 100644 index 000000000..be8079ae0 --- /dev/null +++ b/debian/patches/features/all/rt/cpsw-net-cpsw-Use-fallback-for-active_slave.patch @@ -0,0 +1,32 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 16 Apr 2013 12:34:09 +0200 +Subject: [PATCH] net/cpsw: Use fallback for active_slave +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +In case the .dts has not been yet updated we also try to look for the +old "cpts_active_slave" property. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/ti/cpsw.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/net/ethernet/ti/cpsw.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpsw.c ++++ linux-stable/drivers/net/ethernet/ti/cpsw.c +@@ -1466,8 +1466,12 @@ static int cpsw_probe_dt(struct cpsw_pla + + if (of_property_read_u32(node, "active_slave", &prop)) { + pr_err("Missing active_slave property in the DT.\n"); +- ret = -EINVAL; +- goto error_ret; ++ if (of_property_read_u32(node, "cpts_active_slave", &prop)) { ++ ret = -EINVAL; ++ goto error_ret; ++ } else { ++ pr_err("Using old cpts_active_slave as fallback.\n"); ++ } + } + data->active_slave = prop; + diff --git a/debian/patches/features/all/rt/cpsw-net-cpsw-use-a-lock-around-source-testing.patch b/debian/patches/features/all/rt/cpsw-net-cpsw-use-a-lock-around-source-testing.patch new file mode 100644 index 000000000..99ffc9402 --- /dev/null +++ b/debian/patches/features/all/rt/cpsw-net-cpsw-use-a-lock-around-source-testing.patch @@ -0,0 +1,65 @@ +From: Sebastian Andrzej Siewior +Date: Wed, 24 Apr 2013 20:01:22 +0200 +Subject: [PATCH] net/cpsw: use a lock around source testing +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +For some reason on RT it happens that the TX interrupt fires over and +over again but according to tx_stat there is nothing going on. Same goes +for RX but not that often. +With this lock around it this is gone. However I still see from time to +time interrupts where each source is set to 0. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/ti/cpsw.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/net/ethernet/ti/cpsw.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpsw.c ++++ linux-stable/drivers/net/ethernet/ti/cpsw.c +@@ -490,7 +490,7 @@ void cpsw_rx_handler(void *token, int le + skb_put(skb, len); + cpts_rx_timestamp(priv->cpts, skb); + skb->protocol = eth_type_trans(skb, ndev); +- netif_receive_skb(skb); ++ netif_rx(skb); + priv->stats.rx_bytes += len; + priv->stats.rx_packets++; + } else { +@@ -507,19 +507,24 @@ void cpsw_rx_handler(void *token, int le + static irqreturn_t cpsw_interrupt(int irq, void *dev_id) + { + struct cpsw_priv *priv = dev_id; ++ unsigned long flags; + u32 rx, tx, rx_thresh; + ++ spin_lock_irqsave(&priv->lock, flags); + rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat); + rx = __raw_readl(&priv->wr_regs->rx_stat); + tx = __raw_readl(&priv->wr_regs->tx_stat); +- if (!rx_thresh && !rx && !tx) ++ if (!rx_thresh && !rx && !tx) { ++ spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; ++ } + + cpsw_intr_disable(priv); + if (priv->irq_enabled == true) { + cpsw_disable_irq(priv); + priv->irq_enabled = false; + } ++ spin_unlock_irqrestore(&priv->lock, flags); + + if (netif_running(priv->ndev)) { + napi_schedule(&priv->napi); +@@ -541,7 +546,9 @@ static int cpsw_poll(struct napi_struct + { + struct cpsw_priv *priv = napi_to_priv(napi); + int num_tx, num_rx; ++ unsigned long flags; + ++ spin_lock_irqsave(&priv->lock, flags); + num_tx = cpdma_chan_process(priv->txch, 128); + if (num_tx) + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch new file mode 100644 index 000000000..927f27ac2 --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch @@ -0,0 +1,128 @@ +Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT +From: Steven Rostedt +Date: Fri, 02 Mar 2012 10:36:57 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Tasks can block on hotplug.lock in pin_current_cpu(), but their state +might be != RUNNING. So the mutex wakeup will set the state +unconditionally to RUNNING. That might cause spurious unexpected +wakeups. We could provide a state preserving mutex_lock() function, +but this is semantically backwards. So instead we convert the +hotplug.lock() to a spinlock for RT, which has the state preserving +semantics already. + +Signed-off-by: Steven Rostedt +Cc: Carsten Emde +Cc: John Kacur +Cc: Peter Zijlstra +Cc: Clark Williams +Cc: stable-rt@vger.kernel.org +Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 35 ++++++++++++++++++++++++++--------- + 1 file changed, 26 insertions(+), 9 deletions(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled; + + static struct { + struct task_struct *active_writer; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* Makes the lock keep the task's state */ ++ spinlock_t lock; ++#else + struct mutex lock; /* Synchronizes accesses to refcount, */ ++#endif + /* + * Also blocks the new readers during + * an ongoing cpu hotplug operation. +@@ -59,10 +64,22 @@ static struct { + int refcount; + } cpu_hotplug = { + .active_writer = NULL, ++#ifdef CONFIG_PREEMPT_RT_FULL ++ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), ++#else + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), ++#endif + .refcount = 0, + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) ++# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) ++#else ++# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) ++# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) ++#endif ++ + struct hotplug_pcp { + struct task_struct *unplug; + int refcount; +@@ -92,8 +109,8 @@ retry: + return; + } + preempt_enable(); +- mutex_lock(&cpu_hotplug.lock); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_lock(); ++ hotplug_unlock(); + preempt_disable(); + goto retry; + } +@@ -165,9 +182,9 @@ void get_online_cpus(void) + might_sleep(); + if (cpu_hotplug.active_writer == current) + return; +- mutex_lock(&cpu_hotplug.lock); ++ hotplug_lock(); + cpu_hotplug.refcount++; +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + + } + EXPORT_SYMBOL_GPL(get_online_cpus); +@@ -176,14 +193,14 @@ void put_online_cpus(void) + { + if (cpu_hotplug.active_writer == current) + return; +- mutex_lock(&cpu_hotplug.lock); + ++ hotplug_lock(); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) + wake_up_process(cpu_hotplug.active_writer); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + + } + EXPORT_SYMBOL_GPL(put_online_cpus); +@@ -215,11 +232,11 @@ static void cpu_hotplug_begin(void) + cpu_hotplug.active_writer = current; + + for (;;) { +- mutex_lock(&cpu_hotplug.lock); ++ hotplug_lock(); + if (likely(!cpu_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + schedule(); + } + } +@@ -227,7 +244,7 @@ static void cpu_hotplug_begin(void) + static void cpu_hotplug_done(void) + { + cpu_hotplug.active_writer = NULL; +- mutex_unlock(&cpu_hotplug.lock); ++ hotplug_unlock(); + } + + /* diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch new file mode 100644 index 000000000..dbe18bfc5 --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch @@ -0,0 +1,554 @@ +From: Steven Rostedt +Date: Mon, 16 Jul 2012 08:07:43 +0000 +Subject: cpu/rt: Rework cpu down for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Bringing a CPU down is a pain with the PREEMPT_RT kernel because +tasks can be preempted in many more places than in non-RT. In +order to handle per_cpu variables, tasks may be pinned to a CPU +for a while, and even sleep. But these tasks need to be off the CPU +if that CPU is going down. + +Several synchronization methods have been tried, but when stressed +they failed. This is a new approach. + +A sync_tsk thread is still created and tasks may still block on a +lock when the CPU is going down, but how that works is a bit different. +When cpu_down() starts, it will create the sync_tsk and wait on it +to inform that current tasks that are pinned on the CPU are no longer +pinned. But new tasks that are about to be pinned will still be allowed +to do so at this time. + +Then the notifiers are called. Several notifiers will bring down tasks +that will enter these locations. Some of these tasks will take locks +of other tasks that are on the CPU. If we don't let those other tasks +continue, but make them block until CPU down is done, the tasks that +the notifiers are waiting on will never complete as they are waiting +for the locks held by the tasks that are blocked. + +Thus we still let the task pin the CPU until the notifiers are done. +After the notifiers run, we then make new tasks entering the pinned +CPU sections grab a mutex and wait. This mutex is now a per CPU mutex +in the hotplug_pcp descriptor. + +To help things along, a new function in the scheduler code is created +called migrate_me(). This function will try to migrate the current task +off the CPU this is going down if possible. When the sync_tsk is created, +all tasks will then try to migrate off the CPU going down. There are +several cases that this wont work, but it helps in most cases. + +After the notifiers are called and if a task can't migrate off but enters +the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex +until the CPU down is complete. Then the scheduler will force the migration +anyway. + +Also, I found that THREAD_BOUND need to also be accounted for in the +pinned CPU, and the migrate_disable no longer treats them special. +This helps fix issues with ksoftirqd and workqueue that unbind on CPU down. + +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 7 + + kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++--------- + kernel/sched/core.c | 82 ++++++++++++++++- + 3 files changed, 284 insertions(+), 45 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1808,6 +1808,10 @@ extern void do_set_cpus_allowed(struct t + + extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); ++int migrate_me(void); ++void tell_sched_cpu_down_begin(int cpu); ++void tell_sched_cpu_down_done(int cpu); ++ + #else + static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +@@ -1820,6 +1824,9 @@ static inline int set_cpus_allowed_ptr(s + return -EINVAL; + return 0; + } ++static inline int migrate_me(void) { return 0; } ++static inline void tell_sched_cpu_down_begin(int cpu) { } ++static inline void tell_sched_cpu_down_done(int cpu) { } + #endif + + #ifdef CONFIG_NO_HZ_COMMON +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled; + + static struct { + struct task_struct *active_writer; +-#ifdef CONFIG_PREEMPT_RT_FULL +- /* Makes the lock keep the task's state */ +- spinlock_t lock; +-#else + struct mutex lock; /* Synchronizes accesses to refcount, */ +-#endif + /* + * Also blocks the new readers during + * an ongoing cpu hotplug operation. +@@ -64,28 +59,46 @@ static struct { + int refcount; + } cpu_hotplug = { + .active_writer = NULL, +-#ifdef CONFIG_PREEMPT_RT_FULL +- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), +-#else + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), +-#endif + .refcount = 0, + }; + +-#ifdef CONFIG_PREEMPT_RT_FULL +-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock) +-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock) +-#else +-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) +-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) +-#endif +- ++/** ++ * hotplug_pcp - per cpu hotplug descriptor ++ * @unplug: set when pin_current_cpu() needs to sync tasks ++ * @sync_tsk: the task that waits for tasks to finish pinned sections ++ * @refcount: counter of tasks in pinned sections ++ * @grab_lock: set when the tasks entering pinned sections should wait ++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished ++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) ++ * @mutex_init: zero if the mutex hasn't been initialized yet. ++ * ++ * Although @unplug and @sync_tsk may point to the same task, the @unplug ++ * is used as a flag and still exists after @sync_tsk has exited and ++ * @sync_tsk set to NULL. ++ */ + struct hotplug_pcp { + struct task_struct *unplug; ++ struct task_struct *sync_tsk; + int refcount; ++ int grab_lock; + struct completion synced; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spinlock_t lock; ++#else ++ struct mutex mutex; ++#endif ++ int mutex_init; + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) ++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) ++#else ++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) ++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) ++#endif ++ + static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + + /** +@@ -99,18 +112,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp + void pin_current_cpu(void) + { + struct hotplug_pcp *hp; ++ int force = 0; + + retry: + hp = &__get_cpu_var(hotplug_pcp); + +- if (!hp->unplug || hp->refcount || preempt_count() > 1 || ++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; + return; + } +- preempt_enable(); +- hotplug_lock(); +- hotplug_unlock(); ++ if (hp->grab_lock) { ++ preempt_enable(); ++ hotplug_lock(hp); ++ hotplug_unlock(hp); ++ } else { ++ preempt_enable(); ++ /* ++ * Try to push this task off of this CPU. ++ */ ++ if (!migrate_me()) { ++ preempt_disable(); ++ hp = &__get_cpu_var(hotplug_pcp); ++ if (!hp->grab_lock) { ++ /* ++ * Just let it continue it's already pinned ++ * or about to sleep. ++ */ ++ force = 1; ++ goto retry; ++ } ++ preempt_enable(); ++ } ++ } + preempt_disable(); + goto retry; + } +@@ -131,26 +165,84 @@ void unpin_current_cpu(void) + wake_up_process(hp->unplug); + } + +-/* +- * FIXME: Is this really correct under all circumstances ? +- */ ++static void wait_for_pinned_cpus(struct hotplug_pcp *hp) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++} ++ + static int sync_unplug_thread(void *data) + { + struct hotplug_pcp *hp = data; + + preempt_disable(); + hp->unplug = current; ++ wait_for_pinned_cpus(hp); ++ ++ /* ++ * This thread will synchronize the cpu_down() with threads ++ * that have pinned the CPU. When the pinned CPU count reaches ++ * zero, we inform the cpu_down code to continue to the next step. ++ */ + set_current_state(TASK_UNINTERRUPTIBLE); +- while (hp->refcount) { +- schedule_preempt_disabled(); ++ preempt_enable(); ++ complete(&hp->synced); ++ ++ /* ++ * If all succeeds, the next step will need tasks to wait till ++ * the CPU is offline before continuing. To do this, the grab_lock ++ * is set and tasks going into pin_current_cpu() will block on the ++ * mutex. But we still need to wait for those that are already in ++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() ++ * will kick this thread out. ++ */ ++ while (!hp->grab_lock && !kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ ++ /* Make sure grab_lock is seen before we see a stale completion */ ++ smp_mb(); ++ ++ /* ++ * Now just before cpu_down() enters stop machine, we need to make ++ * sure all tasks that are in pinned CPU sections are out, and new ++ * tasks will now grab the lock, keeping them from entering pinned ++ * CPU sections. ++ */ ++ if (!kthread_should_stop()) { ++ preempt_disable(); ++ wait_for_pinned_cpus(hp); ++ preempt_enable(); ++ complete(&hp->synced); ++ } ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); +- preempt_enable(); +- complete(&hp->synced); ++ ++ /* ++ * Force this thread off this CPU as it's going down and ++ * we don't want any more work on this CPU. ++ */ ++ current->flags &= ~PF_NO_SETAFFINITY; ++ do_set_cpus_allowed(current, cpu_present_mask); ++ migrate_me(); + return 0; + } + ++static void __cpu_unplug_sync(struct hotplug_pcp *hp) ++{ ++ wake_up_process(hp->sync_tsk); ++ wait_for_completion(&hp->synced); ++} ++ + /* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. +@@ -158,23 +250,83 @@ static int sync_unplug_thread(void *data + static int cpu_unplug_begin(unsigned int cpu) + { + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); +- struct task_struct *tsk; ++ int err; ++ ++ /* Protected by cpu_hotplug.lock */ ++ if (!hp->mutex_init) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spin_lock_init(&hp->lock); ++#else ++ mutex_init(&hp->mutex); ++#endif ++ hp->mutex_init = 1; ++ } ++ ++ /* Inform the scheduler to migrate tasks off this CPU */ ++ tell_sched_cpu_down_begin(cpu); + + init_completion(&hp->synced); +- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); +- if (IS_ERR(tsk)) +- return (PTR_ERR(tsk)); +- kthread_bind(tsk, cpu); +- wake_up_process(tsk); +- wait_for_completion(&hp->synced); ++ ++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(hp->sync_tsk)) { ++ err = PTR_ERR(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++ return err; ++ } ++ kthread_bind(hp->sync_tsk, cpu); ++ ++ /* ++ * Wait for tasks to get out of the pinned sections, ++ * it's still OK if new tasks enter. Some CPU notifiers will ++ * wait for tasks that are going to enter these sections and ++ * we must not have them block. ++ */ ++ __cpu_unplug_sync(hp); ++ + return 0; + } + ++static void cpu_unplug_sync(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ init_completion(&hp->synced); ++ /* The completion needs to be initialzied before setting grab_lock */ ++ smp_wmb(); ++ ++ /* Grab the mutex before setting grab_lock */ ++ hotplug_lock(hp); ++ hp->grab_lock = 1; ++ ++ /* ++ * The CPU notifiers have been completed. ++ * Wait for tasks to get out of pinned CPU sections and have new ++ * tasks block until the CPU is completely down. ++ */ ++ __cpu_unplug_sync(hp); ++ ++ /* All done with the sync thread */ ++ kthread_stop(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++} ++ + static void cpu_unplug_done(unsigned int cpu) + { + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + hp->unplug = NULL; ++ /* Let all tasks know cpu unplug is finished before cleaning up */ ++ smp_wmb(); ++ ++ if (hp->sync_tsk) ++ kthread_stop(hp->sync_tsk); ++ ++ if (hp->grab_lock) { ++ hotplug_unlock(hp); ++ /* protected by cpu_hotplug.lock */ ++ hp->grab_lock = 0; ++ } ++ tell_sched_cpu_down_done(cpu); + } + + void get_online_cpus(void) +@@ -182,9 +334,9 @@ void get_online_cpus(void) + might_sleep(); + if (cpu_hotplug.active_writer == current) + return; +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + cpu_hotplug.refcount++; +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + + } + EXPORT_SYMBOL_GPL(get_online_cpus); +@@ -194,14 +346,13 @@ void put_online_cpus(void) + if (cpu_hotplug.active_writer == current) + return; + +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) + wake_up_process(cpu_hotplug.active_writer); +- hotplug_unlock(); +- ++ mutex_unlock(&cpu_hotplug.lock); + } + EXPORT_SYMBOL_GPL(put_online_cpus); + +@@ -232,11 +383,11 @@ static void cpu_hotplug_begin(void) + cpu_hotplug.active_writer = current; + + for (;;) { +- hotplug_lock(); ++ mutex_lock(&cpu_hotplug.lock); + if (likely(!cpu_hotplug.refcount)) + break; + __set_current_state(TASK_UNINTERRUPTIBLE); +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + schedule(); + } + } +@@ -244,7 +395,7 @@ static void cpu_hotplug_begin(void) + static void cpu_hotplug_done(void) + { + cpu_hotplug.active_writer = NULL; +- hotplug_unlock(); ++ mutex_unlock(&cpu_hotplug.lock); + } + + /* +@@ -445,6 +596,9 @@ static int __ref _cpu_down(unsigned int + } + smpboot_park_threads(cpu); + ++ /* Notifiers are done. Don't let any more tasks pin this CPU. */ ++ cpu_unplug_sync(cpu); ++ + err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); + if (err) { + /* CPU didn't die: tell everyone. Can't complain. */ +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -2963,7 +2963,7 @@ void migrate_disable(void) + { + struct task_struct *p = current; + +- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { ++ if (in_atomic()) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; + #endif +@@ -2994,7 +2994,7 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + +- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { ++ if (in_atomic()) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; + #endif +@@ -4966,6 +4966,84 @@ void do_set_cpus_allowed(struct task_str + cpumask_copy(&p->cpus_allowed, new_mask); + } + ++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); ++static DEFINE_MUTEX(sched_down_mutex); ++static cpumask_t sched_down_cpumask; ++ ++void tell_sched_cpu_down_begin(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_set_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++void tell_sched_cpu_down_done(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_clear_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++/** ++ * migrate_me - try to move the current task off this cpu ++ * ++ * Used by the pin_current_cpu() code to try to get tasks ++ * to move off the current CPU as it is going down. ++ * It will only move the task if the task isn't pinned to ++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) ++ * and the task has to be in a RUNNING state. Otherwise the ++ * movement of the task will wake it up (change its state ++ * to running) when the task did not expect it. ++ * ++ * Returns 1 if it succeeded in moving the current task ++ * 0 otherwise. ++ */ ++int migrate_me(void) ++{ ++ struct task_struct *p = current; ++ struct migration_arg arg; ++ struct cpumask *cpumask; ++ struct cpumask *mask; ++ unsigned long flags; ++ unsigned int dest_cpu; ++ struct rq *rq; ++ ++ /* ++ * We can not migrate tasks bounded to a CPU or tasks not ++ * running. The movement of the task will wake it up. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY || p->state) ++ return 0; ++ ++ mutex_lock(&sched_down_mutex); ++ rq = task_rq_lock(p, &flags); ++ ++ cpumask = &__get_cpu_var(sched_cpumasks); ++ mask = &p->cpus_allowed; ++ ++ cpumask_andnot(cpumask, mask, &sched_down_cpumask); ++ ++ if (!cpumask_weight(cpumask)) { ++ /* It's only on this CPU? */ ++ task_rq_unlock(rq, p, &flags); ++ mutex_unlock(&sched_down_mutex); ++ return 0; ++ } ++ ++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); ++ ++ arg.task = p; ++ arg.dest_cpu = dest_cpu; ++ ++ task_rq_unlock(rq, p, &flags); ++ ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ tlb_migrate_finish(p->mm); ++ mutex_unlock(&sched_down_mutex); ++ ++ return 1; ++} ++ + /* + * This is how migration works: + * diff --git a/debian/patches/features/all/rt/cpu-rt-variants.patch b/debian/patches/features/all/rt/cpu-rt-variants.patch new file mode 100644 index 000000000..fda028686 --- /dev/null +++ b/debian/patches/features/all/rt/cpu-rt-variants.patch @@ -0,0 +1,29 @@ +Subject: cpu-rt-variants.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 15:42:38 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/smp.h | 8 ++++++++ + 1 file changed, 8 insertions(+) + +Index: linux-stable/include/linux/smp.h +=================================================================== +--- linux-stable.orig/include/linux/smp.h ++++ linux-stable/include/linux/smp.h +@@ -223,6 +223,14 @@ static inline void kick_all_cpus_sync(vo + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define get_cpu_light() get_cpu() ++# define put_cpu_light() put_cpu() ++#else ++# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++# define put_cpu_light() migrate_enable() ++#endif ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch new file mode 100644 index 000000000..eb7a22183 --- /dev/null +++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch @@ -0,0 +1,39 @@ +Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT +From: Thomas Gleixner +Date: Wed, 14 Dec 2011 01:03:49 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +We can't deal with the cpumask allocations which happen in atomic +context (see arch/x86/kernel/apic/io_apic.c) on RT right now. + +Signed-off-by: Thomas Gleixner +--- + arch/x86/Kconfig | 2 +- + lib/Kconfig | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/x86/Kconfig +=================================================================== +--- linux-stable.orig/arch/x86/Kconfig ++++ linux-stable/arch/x86/Kconfig +@@ -799,7 +799,7 @@ config IOMMU_HELPER + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +Index: linux-stable/lib/Kconfig +=================================================================== +--- linux-stable.orig/lib/Kconfig ++++ linux-stable/lib/Kconfig +@@ -315,6 +315,7 @@ config CHECK_SIGNATURE + + config CPUMASK_OFFSTACK + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS ++ depends on !PREEMPT_RT_FULL + help + Use dynamic allocation for cpumask_var_t, instead of putting + them on the stack. This is a bit more expensive, but avoids diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch new file mode 100644 index 000000000..873bccdc1 --- /dev/null +++ b/debian/patches/features/all/rt/debugobjects-rt.patch @@ -0,0 +1,26 @@ +Subject: debugobjects-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:41:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/debugobjects.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +Index: linux-stable/lib/debugobjects.c +=================================================================== +--- linux-stable.orig/lib/debugobjects.c ++++ linux-stable/lib/debugobjects.c +@@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct d + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch new file mode 100644 index 000000000..ea3038f99 --- /dev/null +++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch @@ -0,0 +1,37 @@ +Subject: dm: Make rt aware +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 23:06:09 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has +interrupts legitimately enabled here as we cant deadlock against the +irq thread due to the "sleeping spinlocks" conversion. + +Reported-by: Luis Claudio R. Goncalves +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + drivers/md/dm.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/md/dm.c +=================================================================== +--- linux-stable.orig/drivers/md/dm.c ++++ linux-stable/drivers/md/dm.c +@@ -1706,14 +1706,14 @@ static void dm_request_fn(struct request + if (map_request(ti, clone, md)) + goto requeued; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); + } + + goto out; + + requeued: +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); + + delay_and_out: diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch new file mode 100644 index 000000000..13b8686b6 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch @@ -0,0 +1,28 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:24 -0500 +Subject: drivers/net: Use disable_irq_nosync() in 8139too +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use disable_irq_nosync() instead of disable_irq() as this might be +called in atomic context with netpoll. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/net/ethernet/realtek/8139too.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/drivers/net/ethernet/realtek/8139too.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/realtek/8139too.c ++++ linux-stable/drivers/net/ethernet/realtek/8139too.c +@@ -2213,7 +2213,7 @@ static void rtl8139_poll_controller(stru + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } diff --git a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch new file mode 100644 index 000000000..f82a8c271 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch @@ -0,0 +1,141 @@ +From: Thomas Gleixner +Date: Sat, 20 Jun 2009 11:36:54 +0200 +Subject: drivers/net: fix livelock issues +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro +optimization. The reason is that the softirq thread is rescheduling +itself on that return value. Depending on priorities it starts to +monoplize the CPU and livelock on UP systems. + +Remove it. + +Signed-off-by: Thomas Gleixner + +--- + drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +----- + drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +-- + drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +-- + drivers/net/ethernet/neterion/s2io.c | 7 +------ + drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++---- + drivers/net/ethernet/tehuti/tehuti.c | 9 ++------- + drivers/net/rionet.c | 6 +----- + 7 files changed, 9 insertions(+), 31 deletions(-) + +Index: linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -2168,11 +2168,7 @@ static netdev_tx_t atl1c_xmit_frame(stru + } + + tpd_req = atl1c_cal_tpd_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { +- if (netif_msg_pktdata(adapter)) +- dev_info(&adapter->pdev->dev, "tx locked\n"); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ +Index: linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ++++ linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +@@ -1799,8 +1799,7 @@ static netdev_tx_t atl1e_xmit_frame(stru + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) +- return NETDEV_TX_LOCKED; ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ +Index: linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/chelsio/cxgb/sge.c ++++ linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c +@@ -1665,8 +1665,7 @@ static int t1_sge_tx(struct sk_buff *skb + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + +- if (!spin_trylock(&q->lock)) +- return NETDEV_TX_LOCKED; ++ spin_lock(&q->lock); + + reclaim_completed_tx(sge, q); + +Index: linux-stable/drivers/net/ethernet/neterion/s2io.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/neterion/s2io.c ++++ linux-stable/drivers/net/ethernet/neterion/s2io.c +@@ -4089,12 +4089,7 @@ static netdev_tx_t s2io_xmit(struct sk_b + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + +- if (do_spin_lock) +- spin_lock_irqsave(&fifo->tx_lock, flags); +- else { +- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&fifo->tx_lock, flags); + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { +Index: linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -2109,10 +2109,8 @@ static int pch_gbe_xmit_frame(struct sk_ + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + unsigned long flags; + +- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { +- /* Collision - tell upper layer to requeue */ +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&tx_ring->tx_lock, flags); ++ + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); +Index: linux-stable/drivers/net/ethernet/tehuti/tehuti.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/tehuti/tehuti.c ++++ linux-stable/drivers/net/ethernet/tehuti/tehuti.c +@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc + unsigned long flags; + + ENTER; +- local_irq_save(flags); +- if (!spin_trylock(&priv->tx_lock)) { +- local_irq_restore(flags); +- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", +- BDX_DRV_NAME, ndev->name); +- return NETDEV_TX_LOCKED; +- } ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ +Index: linux-stable/drivers/net/rionet.c +=================================================================== +--- linux-stable.orig/drivers/net/rionet.c ++++ linux-stable/drivers/net/rionet.c +@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b + unsigned long flags; + int add_num = 1; + +- local_irq_save(flags); +- if (!spin_trylock(&rnet->tx_lock)) { +- local_irq_restore(flags); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&rnet->tx_lock, flags); + + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nets[rnet->mport->id].nact; diff --git a/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch new file mode 100644 index 000000000..ec9551594 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch @@ -0,0 +1,58 @@ +From: Thomas Gleixner +Date: Thu, 1 Apr 2010 20:20:57 +0200 +Subject: drivers: net: gianfar: Make RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The adjust_link() disables interrupts before taking the queue +locks. On RT those locks are converted to "sleeping" locks and +therefor the local_irq_save/restore must be converted to +local_irq_save/restore_nort. + +Reported-by: Xianghua Xiao +Signed-off-by: Thomas Gleixner +Tested-by: Xianghua Xiao + +--- + drivers/net/ethernet/freescale/gianfar.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/freescale/gianfar.c ++++ linux-stable/drivers/net/ethernet/freescale/gianfar.c +@@ -1674,7 +1674,7 @@ void stop_gfar(struct net_device *dev) + + + /* Lock it down */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + lock_rx_qs(priv); + +@@ -1682,7 +1682,7 @@ void stop_gfar(struct net_device *dev) + + unlock_rx_qs(priv); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { +@@ -2983,7 +2983,7 @@ static void adjust_link(struct net_devic + struct phy_device *phydev = priv->phydev; + int new_state = 0; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + if (phydev->link) { +@@ -3052,7 +3052,7 @@ static void adjust_link(struct net_devic + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + /* Update the hash table based on the current list of multicast diff --git a/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch b/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch new file mode 100644 index 000000000..9776725a1 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch @@ -0,0 +1,26 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:18 -0500 +Subject: drivers/net: tulip_remove_one needs to call pci_disable_device() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Otherwise the device is not completely shut down. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/net/ethernet/dec/tulip/tulip_core.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/dec/tulip/tulip_core.c ++++ linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c +@@ -1943,6 +1943,7 @@ static void tulip_remove_one(struct pci_ + pci_iounmap(pdev, tp->base_addr); + free_netdev (dev); + pci_release_regions (pdev); ++ pci_disable_device (pdev); + pci_set_drvdata (pdev, NULL); + + /* pci_power_off (pdev, -1); */ diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch new file mode 100644 index 000000000..91f051f87 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch @@ -0,0 +1,51 @@ +From: Steven Rostedt +Date: Fri, 3 Jul 2009 08:30:00 -0500 +Subject: drivers/net: vortex fix locking issues +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Argh, cut and paste wasn't enough... + +Use this patch instead. It needs an irq disable. But, believe it or not, +on SMP this is actually better. If the irq is shared (as it is in Mark's +case), we don't stop the irq of other devices from being handled on +another CPU (unfortunately for Mark, he pinned all interrupts to one CPU). + +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + + drivers/net/ethernet/3com/3c59x.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Signed-off-by: Ingo Molnar + +Index: linux-stable/drivers/net/ethernet/3com/3c59x.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/3com/3c59x.c ++++ linux-stable/drivers/net/ethernet/3com/3c59x.c +@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1918,12 +1918,12 @@ static void vortex_tx_timeout(struct net + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch new file mode 100644 index 000000000..4323d7619 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch @@ -0,0 +1,42 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:30 -0500 +Subject: drivers: random: Reduce preempt disabled region +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +No need to keep preemption disabled across the whole function. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/char/random.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +Index: linux-stable/drivers/char/random.c +=================================================================== +--- linux-stable.orig/drivers/char/random.c ++++ linux-stable/drivers/char/random.c +@@ -676,9 +676,12 @@ static void add_timer_randomness(struct + preempt_disable(); + /* if over the trickle threshold, use only 1 in 4096 samples */ + if (input_pool.entropy_count > trickle_thresh && +- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) +- goto out; ++ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) { ++ preempt_enable(); ++ return; ++ } + ++ preempt_enable(); + sample.jiffies = jiffies; + sample.cycles = get_cycles(); + sample.num = num; +@@ -719,8 +722,6 @@ static void add_timer_randomness(struct + credit_entropy_bits(&input_pool, + min_t(int, fls(delta>>1), 11)); + } +-out: +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, diff --git a/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch new file mode 100644 index 000000000..ac61d5aeb --- /dev/null +++ b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch @@ -0,0 +1,36 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:01 -0500 +Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar + +--- + drivers/tty/tty_buffer.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +Index: linux-stable/drivers/tty/tty_buffer.c +=================================================================== +--- linux-stable.orig/drivers/tty/tty_buffer.c ++++ linux-stable/drivers/tty/tty_buffer.c +@@ -517,10 +517,15 @@ void tty_flip_buffer_push(struct tty_por + buf->tail->commit = buf->tail->used; + spin_unlock_irqrestore(&buf->lock, flags); + ++#ifndef CONFIG_PREEMPT_RT_FULL + if (port->low_latency) + flush_to_ldisc(&buf->work); + else + schedule_work(&buf->work); ++#else ++ flush_to_ldisc(&buf->work); ++#endif ++ + } + EXPORT_SYMBOL(tty_flip_buffer_push); + +@@ -545,4 +550,3 @@ void tty_buffer_init(struct tty_port *po + buf->memory_used = 0; + INIT_WORK(&buf->work, flush_to_ldisc); + } +- diff --git a/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch new file mode 100644 index 000000000..c462d45c1 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch @@ -0,0 +1,45 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:01 -0500 +Subject: serial: 8250: Clean up the locking for -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/tty/serial/8250/8250_core.c | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +Index: linux-stable/drivers/tty/serial/8250/8250_core.c +=================================================================== +--- linux-stable.orig/drivers/tty/serial/8250/8250_core.c ++++ linux-stable/drivers/tty/serial/8250/8250_core.c +@@ -2860,14 +2860,10 @@ serial8250_console_write(struct console + + touch_nmi_watchdog(); + +- local_irq_save(flags); +- if (port->sysrq) { +- /* serial8250_handle_irq() already took the lock */ +- locked = 0; +- } else if (oops_in_progress) { +- locked = spin_trylock(&port->lock); +- } else +- spin_lock(&port->lock); ++ if (port->sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&port->lock, flags); ++ else ++ spin_lock_irqsave(&port->lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -2899,8 +2895,7 @@ serial8250_console_write(struct console + serial8250_modem_status(up); + + if (locked) +- spin_unlock(&port->lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&port->lock, flags); + } + + static int __init serial8250_console_setup(struct console *co, char *options) diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch new file mode 100644 index 000000000..6eddefb4d --- /dev/null +++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch @@ -0,0 +1,41 @@ +Subject: drivers-tty-fix-omap-lock-crap.patch +From: Thomas Gleixner +Date: Thu, 28 Jul 2011 13:32:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/omap-serial.c | 12 ++++-------- + 1 file changed, 4 insertions(+), 8 deletions(-) + +Index: linux-stable/drivers/tty/serial/omap-serial.c +=================================================================== +--- linux-stable.orig/drivers/tty/serial/omap-serial.c ++++ linux-stable/drivers/tty/serial/omap-serial.c +@@ -1169,13 +1169,10 @@ serial_omap_console_write(struct console + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1204,8 +1201,7 @@ serial_omap_console_write(struct console + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch new file mode 100644 index 000000000..00a0c3d1b --- /dev/null +++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch @@ -0,0 +1,47 @@ +Subject: drivers-tty-pl011-irq-disable-madness.patch +From: Thomas Gleixner +Date: Tue, 08 Jan 2013 21:36:51 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/amba-pl011.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +Index: linux-stable/drivers/tty/serial/amba-pl011.c +=================================================================== +--- linux-stable.orig/drivers/tty/serial/amba-pl011.c ++++ linux-stable/drivers/tty/serial/amba-pl011.c +@@ -1923,13 +1923,19 @@ pl011_console_write(struct console *co, + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -1951,8 +1957,7 @@ pl011_console_write(struct console *co, + writew(old_cr, uap->port.membase + UART011_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } diff --git a/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch new file mode 100644 index 000000000..cb2c68281 --- /dev/null +++ b/debian/patches/features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch @@ -0,0 +1,62 @@ +From d841118ac80c5bfb18f47984bc40687eed08b714 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 25 Apr 2013 18:12:52 +0200 +Subject: [PATCH] drm/i915: drop trace_i915_gem_ring_dispatch on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This tracepoint is responsible for: + +|[<814cc358>] __schedule_bug+0x4d/0x59 +|[<814d24cc>] __schedule+0x88c/0x930 +|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50 +|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50 +|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250 +|[<814d27d9>] schedule+0x29/0x70 +|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278 +|[<814d3786>] rt_spin_lock+0x26/0x30 +|[] gen6_gt_force_wake_get+0x29/0x60 [i915] +|[] gen6_ring_get_irq+0x5f/0x100 [i915] +|[] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915] +|[] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915] +|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60 +|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180 +|[<8101d063>] ? native_sched_clock+0x13/0x80 +|[] i915_gem_execbuffer2+0x99/0x280 [i915] +|[] drm_ioctl+0x4c3/0x570 [drm] +|[<8101d0d9>] ? sched_clock+0x9/0x10 +|[] ? i915_gem_execbuffer+0x480/0x480 [i915] +|[<810f1c18>] ? rb_commit+0x68/0xa0 +|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0 +|[<81197467>] do_vfs_ioctl+0x97/0x540 +|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130 +|[<811979a1>] sys_ioctl+0x91/0xb0 +|[<814db931>] tracesys+0xe1/0xe6 + +Chris Wilson does not like to move i915_trace_irq_get() out of the macro + +|No. This enables the IRQ, as well as making a number of +|very expensively serialised read, unconditionally. + +so it is gone now on RT. + +Cc: stable-rt@vger.kernel.org +Reported-by: Joakim Hernberg +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/drivers/gpu/drm/i915/i915_gem_execbuffer.c +=================================================================== +--- linux-stable.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c ++++ linux-stable/drivers/gpu/drm/i915/i915_gem_execbuffer.c +@@ -1071,7 +1071,9 @@ i915_gem_do_execbuffer(struct drm_device + goto err; + } + ++#ifndef CONFIG_PREEMPT_RT_BASE + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); ++#endif + + i915_gem_execbuffer_move_to_active(&eb->objects, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring); diff --git a/debian/patches/features/all/rt/early-printk-consolidate.patch b/debian/patches/features/all/rt/early-printk-consolidate.patch new file mode 100644 index 000000000..b94b93599 --- /dev/null +++ b/debian/patches/features/all/rt/early-printk-consolidate.patch @@ -0,0 +1,49 @@ +Subject: early-printk-consolidate.patch +From: Thomas Gleixner +Date: Sat, 23 Jul 2011 11:04:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/sparc/kernel/setup_32.c | 1 + + arch/sparc/kernel/setup_64.c | 8 +++++++- + 2 files changed, 8 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/sparc/kernel/setup_32.c +=================================================================== +--- linux-stable.orig/arch/sparc/kernel/setup_32.c ++++ linux-stable/arch/sparc/kernel/setup_32.c +@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p) + + boot_flags_init(*cmdline_p); + ++ early_console = &prom_early_console; + register_console(&prom_early_console); + + printk("ARCH: "); +Index: linux-stable/arch/sparc/kernel/setup_64.c +=================================================================== +--- linux-stable.orig/arch/sparc/kernel/setup_64.c ++++ linux-stable/arch/sparc/kernel/setup_64.c +@@ -551,6 +551,12 @@ static void __init init_sparc64_elf_hwca + pause_patch(); + } + ++static inline void register_prom_console(void) ++{ ++ early_console = &prom_early_console; ++ register_console(&prom_early_console); ++} ++ + void __init setup_arch(char **cmdline_p) + { + /* Initialize PROM console and command line. */ +@@ -562,7 +568,7 @@ void __init setup_arch(char **cmdline_p) + #ifdef CONFIG_EARLYFB + if (btext_find_display()) + #endif +- register_console(&prom_early_console); ++ register_prom_console(); + + if (tlb_type == hypervisor) + printk("ARCH: SUN4V\n"); diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch new file mode 100644 index 000000000..8149ef638 --- /dev/null +++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch @@ -0,0 +1,29 @@ +Subject: epoll.patch +From: Thomas Gleixner +Date: Fri, 08 Jul 2011 16:35:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + fs/eventpoll.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/fs/eventpoll.c +=================================================================== +--- linux-stable.orig/fs/eventpoll.c ++++ linux-stable/fs/eventpoll.c +@@ -500,12 +500,12 @@ static int ep_poll_wakeup_proc(void *pri + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + static void ep_remove_wait_queue(struct eppoll_entry *pwq) diff --git a/debian/patches/features/all/rt/filemap-fix-up.patch b/debian/patches/features/all/rt/filemap-fix-up.patch new file mode 100644 index 000000000..b053c765a --- /dev/null +++ b/debian/patches/features/all/rt/filemap-fix-up.patch @@ -0,0 +1,25 @@ +Subject: filemap-fix-up.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 18:56:24 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +Wrecked-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org +--- + mm/filemap.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/mm/filemap.c +=================================================================== +--- linux-stable.orig/mm/filemap.c ++++ linux-stable/mm/filemap.c +@@ -1974,7 +1974,7 @@ size_t iov_iter_copy_from_user_atomic(st + char *kaddr; + size_t copied; + +- BUG_ON(!in_atomic()); ++ BUG_ON(!pagefault_disabled()); + kaddr = kmap_atomic(page); + if (likely(i->nr_segs == 1)) { + int left; diff --git a/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch new file mode 100644 index 000000000..05f5fd55b --- /dev/null +++ b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch @@ -0,0 +1,115 @@ +From: Steven Rostedt +Subject: x86: Do not disable preemption in int3 on 32bit +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Preemption must be disabled before enabling interrupts in do_trap +on x86_64 because the stack in use for int3 and debug is a per CPU +stack set by th IST. But 32bit does not have an IST and the stack +still belongs to the current task and there is no problem in scheduling +out the task. + +Keep preemption enabled on X86_32 when enabling interrupts for +do_trap(). + +The name of the function is changed from preempt_conditional_sti/cli() +to conditional_sti/cli_ist(), to annotate that this function is used +when the stack is on the IST. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++--------- + 1 file changed, 23 insertions(+), 9 deletions(-) + +Index: linux-stable/arch/x86/kernel/traps.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/traps.c ++++ linux-stable/arch/x86/kernel/traps.c +@@ -85,9 +85,21 @@ static inline void conditional_sti(struc + local_irq_enable(); + } + +-static inline void preempt_conditional_sti(struct pt_regs *regs) ++static inline void conditional_sti_ist(struct pt_regs *regs) + { ++#ifdef CONFIG_X86_64 ++ /* ++ * X86_64 uses a per CPU stack on the IST for certain traps ++ * like int3. The task can not be preempted when using one ++ * of these stacks, thus preemption must be disabled, otherwise ++ * the stack can be corrupted if the task is scheduled out, ++ * and another task comes in and uses this stack. ++ * ++ * On x86_32 the task keeps its own stack and it is OK if the ++ * task schedules out. ++ */ + inc_preempt_count(); ++#endif + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); + } +@@ -98,11 +110,13 @@ static inline void conditional_cli(struc + local_irq_disable(); + } + +-static inline void preempt_conditional_cli(struct pt_regs *regs) ++static inline void conditional_cli_ist(struct pt_regs *regs) + { + if (regs->flags & X86_EFLAGS_IF) + local_irq_disable(); ++#ifdef CONFIG_X86_64 + dec_preempt_count(); ++#endif + } + + static int __kprobes +@@ -235,9 +249,9 @@ dotraplinkage void do_stack_segment(stru + prev_state = exception_enter(); + if (notify_die(DIE_TRAP, "stack segment", regs, error_code, + X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + } + exception_exit(prev_state); + } +@@ -340,9 +354,9 @@ dotraplinkage void __kprobes notrace do_ + * as we may switch to the interrupt stack. + */ + debug_stack_usage_inc(); +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + exit: + exception_exit(prev_state); +@@ -448,12 +462,12 @@ dotraplinkage void __kprobes do_debug(st + debug_stack_usage_inc(); + + /* It's safe to allow irq's after DR6 has been saved */ +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, + X86_TRAP_DB); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + goto exit; + } +@@ -473,7 +487,7 @@ dotraplinkage void __kprobes do_debug(st + si_code = get_si_code(tsk->thread.debugreg6); + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + + exit: diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch new file mode 100644 index 000000000..09e2ceeca --- /dev/null +++ b/debian/patches/features/all/rt/fs-block-rt-support.patch @@ -0,0 +1,23 @@ +Subject: fs-block-rt-support.patch +From: Thomas Gleixner +Date: Tue, 14 Jun 2011 17:05:09 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + block/blk-core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/block/blk-core.c +=================================================================== +--- linux-stable.orig/block/blk-core.c ++++ linux-stable/block/blk-core.c +@@ -232,7 +232,7 @@ EXPORT_SYMBOL(blk_delay_queue); + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch new file mode 100644 index 000000000..6e305d9fd --- /dev/null +++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -0,0 +1,121 @@ +Subject: fs: dcache: Use cpu_chill() in trylock loops +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 21:00:34 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + fs/autofs4/autofs_i.h | 1 + + fs/autofs4/expire.c | 2 +- + fs/dcache.c | 7 ++++--- + fs/namespace.c | 7 ++++--- + 4 files changed, 10 insertions(+), 7 deletions(-) + +Index: linux-stable/fs/autofs4/autofs_i.h +=================================================================== +--- linux-stable.orig/fs/autofs4/autofs_i.h ++++ linux-stable/fs/autofs4/autofs_i.h +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + #include + #include + +Index: linux-stable/fs/autofs4/expire.c +=================================================================== +--- linux-stable.orig/fs/autofs4/expire.c ++++ linux-stable/fs/autofs4/expire.c +@@ -157,7 +157,7 @@ again: + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +Index: linux-stable/fs/dcache.c +=================================================================== +--- linux-stable.orig/fs/dcache.c ++++ linux-stable/fs/dcache.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + #include "internal.h" + #include "mount.h" + +@@ -453,7 +454,7 @@ static inline struct dentry *dentry_kill + if (inode && !spin_trylock(&inode->i_lock)) { + relock: + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + return dentry; /* try again with same dentry */ + } + if (IS_ROOT(dentry)) +@@ -835,7 +836,7 @@ relock: + + if (!spin_trylock(&dentry->d_lock)) { + spin_unlock(&dcache_lru_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + +@@ -2072,7 +2073,7 @@ again: + if (dentry->d_count == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto again; + } + dentry->d_flags &= ~DCACHE_CANT_MOUNT; +Index: linux-stable/fs/namespace.c +=================================================================== +--- linux-stable.orig/fs/namespace.c ++++ linux-stable/fs/namespace.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include "pnode.h" + #include "internal.h" + +@@ -317,7 +318,7 @@ int __mnt_want_write(struct vfsmount *m) + smp_mb(); + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); +- cpu_relax(); ++ cpu_chill(); + preempt_disable(); + } + /* +@@ -1289,7 +1290,7 @@ static int do_umount(struct mount *mnt, + return retval; + } + +-/* ++/* + * Is the caller allowed to modify his namespace? + */ + static inline bool may_mount(void) +@@ -1717,7 +1718,7 @@ static int do_loopback(struct path *path + + err = -EINVAL; + if (mnt_ns_loop(&old_path)) +- goto out; ++ goto out; + + mp = lock_mount(path); + err = PTR_ERR(mp); diff --git a/debian/patches/features/all/rt/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch b/debian/patches/features/all/rt/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch new file mode 100644 index 000000000..f57629311 --- /dev/null +++ b/debian/patches/features/all/rt/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch @@ -0,0 +1,47 @@ +From d32f5420ac164141683c51e8a8fce666423de492 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 3 Apr 2013 12:43:30 +0200 +Subject: [PATCH] fs/fscache: remove spin_lock() from the condition in while() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The spinlock() within the condition in while() will cause a compile error +if it is not a function. This is not a problem on mainline but it does not +look pretty and there is no reason to do it that way. +That patch writes it a little differently and avoids the double condition. + +Signed-off-by: Sebastian Andrzej Siewior +--- + fs/fscache/page.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +Index: linux-stable/fs/fscache/page.c +=================================================================== +--- linux-stable.orig/fs/fscache/page.c ++++ linux-stable/fs/fscache/page.c +@@ -796,11 +796,13 @@ void fscache_invalidate_writes(struct fs + + _enter(""); + +- while (spin_lock(&cookie->stores_lock), +- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, +- ARRAY_SIZE(results), +- FSCACHE_COOKIE_PENDING_TAG), +- n > 0) { ++ spin_lock(&cookie->stores_lock); ++ while (1) { ++ n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, ++ ARRAY_SIZE(results), ++ FSCACHE_COOKIE_PENDING_TAG); ++ if (n == 0) ++ break; + for (i = n - 1; i >= 0; i--) { + page = results[i]; + radix_tree_delete(&cookie->stores, page->index); +@@ -810,6 +812,7 @@ void fscache_invalidate_writes(struct fs + + for (i = n - 1; i >= 0; i--) + page_cache_release(results[i]); ++ spin_lock(&cookie->stores_lock); + } + + spin_unlock(&cookie->stores_lock); diff --git a/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch new file mode 100644 index 000000000..81e9ad33c --- /dev/null +++ b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch @@ -0,0 +1,32 @@ +From: Mike Galbraith +Date: Wed, 11 Jul 2012 22:05:20 +0000 +Subject: fs, jbd: pull your plug when waiting for space +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +With an -rt kernel, and a heavy sync IO load, tasks can jam +up on journal locks without unplugging, which can lead to +terminal IO starvation. Unplug and schedule when waiting for space. + +Signed-off-by: Mike Galbraith +Cc: Steven Rostedt +Cc: Theodore Tso +Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net +Signed-off-by: Thomas Gleixner + +--- + fs/jbd/checkpoint.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/fs/jbd/checkpoint.c +=================================================================== +--- linux-stable.orig/fs/jbd/checkpoint.c ++++ linux-stable/fs/jbd/checkpoint.c +@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou + if (journal->j_flags & JFS_ABORT) + return; + spin_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch new file mode 100644 index 000000000..1275f927d --- /dev/null +++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch @@ -0,0 +1,105 @@ +From: Thomas Gleixner +Date: Fri, 18 Mar 2011 10:11:25 +0100 +Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +bit_spin_locks break under RT. + +Based on a previous patch from Steven Rostedt +Signed-off-by: Thomas Gleixner + +-- + + include/linux/buffer_head.h | 10 ++++++++++ + include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ + 2 files changed, 34 insertions(+) + +Index: linux-stable/include/linux/buffer_head.h +=================================================================== +--- linux-stable.orig/include/linux/buffer_head.h ++++ linux-stable/include/linux/buffer_head.h +@@ -76,6 +76,11 @@ struct buffer_head { + atomic_t b_count; /* users using this buffer_head */ + #ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t b_uptodate_lock; ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif + #endif + }; + +@@ -107,6 +112,11 @@ static inline void buffer_head_init_lock + { + #ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&bh->b_uptodate_lock); ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif + #endif + } + +Index: linux-stable/include/linux/jbd_common.h +=================================================================== +--- linux-stable.orig/include/linux/jbd_common.h ++++ linux-stable/include/linux/jbd_common.h +@@ -39,32 +39,56 @@ static inline struct journal_head *bh2jh + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #endif diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch new file mode 100644 index 000000000..4945c412f --- /dev/null +++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch @@ -0,0 +1,33 @@ +From: Thomas Gleixner +Date: Sun, 19 Jul 2009 08:44:27 -0500 +Subject: fs: namespace preemption fix +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On RT we cannot loop with preemption disabled here as +mnt_make_readonly() might have been preempted. We can safely enable +preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT +as well. + +Signed-off-by: Thomas Gleixner + +--- + fs/namespace.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +Index: linux-stable/fs/namespace.c +=================================================================== +--- linux-stable.orig/fs/namespace.c ++++ linux-stable/fs/namespace.c +@@ -315,8 +315,11 @@ int __mnt_want_write(struct vfsmount *m) + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); + cpu_relax(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch new file mode 100644 index 000000000..ad2b9a5dc --- /dev/null +++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch @@ -0,0 +1,62 @@ +From: Mike Galbraith +Date: Fri, 3 Jul 2009 08:44:12 -0500 +Subject: fs: ntfs: disable interrupt only on !RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote: +> * Nick Piggin wrote: +> +> > > [10138.175796] [] show_trace+0x12/0x14 +> > > [10138.180291] [] dump_stack+0x16/0x18 +> > > [10138.184769] [] native_smp_call_function_mask+0x138/0x13d +> > > [10138.191117] [] smp_call_function+0x1e/0x24 +> > > [10138.196210] [] on_each_cpu+0x25/0x50 +> > > [10138.200807] [] flush_tlb_all+0x1e/0x20 +> > > [10138.205553] [] kmap_high+0x1b6/0x417 +> > > [10138.210118] [] kmap+0x4d/0x4f +> > > [10138.214102] [] ntfs_end_buffer_async_read+0x228/0x2f9 +> > > [10138.220163] [] end_bio_bh_io_sync+0x26/0x3f +> > > [10138.225352] [] bio_endio+0x42/0x6d +> > > [10138.229769] [] __end_that_request_first+0x115/0x4ac +> > > [10138.235682] [] end_that_request_chunk+0x8/0xa +> > > [10138.241052] [] ide_end_request+0x55/0x10a +> > > [10138.246058] [] ide_dma_intr+0x6f/0xac +> > > [10138.250727] [] ide_intr+0x93/0x1e0 +> > > [10138.255125] [] handle_IRQ_event+0x5c/0xc9 +> > +> > Looks like ntfs is kmap()ing from interrupt context. Should be using +> > kmap_atomic instead, I think. +> +> it's not atomic interrupt context but irq thread context - and -rt +> remaps kmap_atomic() to kmap() internally. + +Hm. Looking at the change to mm/bounce.c, perhaps I should do this +instead? + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + fs/ntfs/aops.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/fs/ntfs/aops.c +=================================================================== +--- linux-stable.orig/fs/ntfs/aops.c ++++ linux-stable/fs/ntfs/aops.c +@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s + recs = PAGE_CACHE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch new file mode 100644 index 000000000..0487fe069 --- /dev/null +++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch @@ -0,0 +1,168 @@ +From: Thomas Gleixner +Date: Fri, 18 Mar 2011 09:18:52 +0100 +Subject: buffer_head: Replace bh_uptodate_lock for -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Wrap the bit_spin_lock calls into a separate inline and add the RT +replacements with a real spinlock. + +Signed-off-by: Thomas Gleixner +--- + fs/buffer.c | 21 +++++++-------------- + fs/ntfs/aops.c | 10 +++------- + include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ + 3 files changed, 44 insertions(+), 21 deletions(-) + +Index: linux-stable/fs/buffer.c +=================================================================== +--- linux-stable.orig/fs/buffer.c ++++ linux-stable/fs/buffer.c +@@ -288,8 +288,7 @@ static void end_buffer_async_read(struct + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -302,8 +301,7 @@ static void end_buffer_async_read(struct + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -315,9 +313,7 @@ static void end_buffer_async_read(struct + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -351,8 +347,7 @@ void end_buffer_async_write(struct buffe + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -364,15 +359,12 @@ void end_buffer_async_write(struct buffe + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3275,6 +3267,7 @@ struct buffer_head *alloc_buffer_head(gf + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +Index: linux-stable/fs/ntfs/aops.c +=================================================================== +--- linux-stable.orig/fs/ntfs/aops.c ++++ linux-stable/fs/ntfs/aops.c +@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +Index: linux-stable/include/linux/buffer_head.h +=================================================================== +--- linux-stable.orig/include/linux/buffer_head.h ++++ linux-stable/include/linux/buffer_head.h +@@ -74,8 +74,42 @@ struct buffer_head { + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch new file mode 100644 index 000000000..c800e01a2 --- /dev/null +++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch @@ -0,0 +1,90 @@ +Subject: ftrace-migrate-disable-tracing.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:56:42 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/ftrace_event.h | 2 ++ + kernel/trace/trace.c | 11 +++++++---- + kernel/trace/trace_events.c | 1 + + kernel/trace/trace_output.c | 5 +++++ + 4 files changed, 15 insertions(+), 4 deletions(-) + +Index: linux-stable/include/linux/ftrace_event.h +=================================================================== +--- linux-stable.orig/include/linux/ftrace_event.h ++++ linux-stable/include/linux/ftrace_event.h +@@ -56,6 +56,8 @@ struct trace_entry { + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned short migrate_disable; ++ unsigned short padding; + }; + + #define FTRACE_MAX_EVENT \ +Index: linux-stable/kernel/trace/trace.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace.c ++++ linux-stable/kernel/trace/trace.c +@@ -368,7 +368,7 @@ int __trace_puts(unsigned long ip, const + + local_save_flags(irq_flags); + buffer = global_trace.trace_buffer.buffer; +- event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, ++ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, + irq_flags, preempt_count()); + if (!event) + return 0; +@@ -1419,6 +1419,8 @@ tracing_generic_entry_update(struct trac + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -2322,9 +2324,10 @@ static void print_lat_help_header(struct + seq_puts(m, "# | / _----=> need-resched \n"); + seq_puts(m, "# || / _---=> hardirq/softirq \n"); + seq_puts(m, "# ||| / _--=> preempt-depth \n"); +- seq_puts(m, "# |||| / delay \n"); +- seq_puts(m, "# cmd pid ||||| time | caller \n"); +- seq_puts(m, "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# |||| / _--=> migrate-disable\n"); ++ seq_puts(m, "# ||||| / delay \n"); ++ seq_puts(m, "# cmd pid |||||| time | caller \n"); ++ seq_puts(m, "# \\ / ||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +Index: linux-stable/kernel/trace/trace_events.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace_events.c ++++ linux-stable/kernel/trace/trace_events.c +@@ -154,6 +154,7 @@ static int trace_define_common_fields(vo + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned short, migrate_disable); + + return ret; + } +Index: linux-stable/kernel/trace/trace_output.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace_output.c ++++ linux-stable/kernel/trace/trace_output.c +@@ -635,6 +635,11 @@ int trace_print_lat_fmt(struct trace_seq + else + ret = trace_seq_putc(s, '.'); + ++ if (entry->migrate_disable) ++ ret = trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ ret = trace_seq_putc(s, '.'); ++ + return ret; + } + diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch new file mode 100644 index 000000000..3cd80df17 --- /dev/null +++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch @@ -0,0 +1,119 @@ +From: Steven Rostedt +Subject: futex: Fix bug on when a requeued RT task times out +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Requeue with timeout causes a bug with PREEMPT_RT_FULL. + +The bug comes from a timed out condition. + + + TASK 1 TASK 2 + ------ ------ + futex_wait_requeue_pi() + futex_wait_queue_me() + + + double_lock_hb(); + + raw_spin_lock(pi_lock); + if (current->pi_blocked_on) { + } else { + current->pi_blocked_on = PI_WAKE_INPROGRESS; + run_spin_unlock(pi_lock); + spin_lock(hb->lock); <-- blocked! + + + plist_for_each_entry_safe(this) { + rt_mutex_start_proxy_lock(); + task_blocks_on_rt_mutex(); + BUG_ON(task->pi_blocked_on)!!!! + +The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the +problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to +grab the hb->lock, which it fails to do so. As the hb->lock is a mutex, +it will block and set the "pi_blocked_on" to the hb->lock. + +When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails +because the task1's pi_blocked_on is no longer set to that, but instead, +set to the hb->lock. + +The fix: + +When calling rt_mutex_start_proxy_lock() a check is made to see +if the proxy tasks pi_blocked_on is set. If so, exit out early. +Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies +the proxy task that it is being requeued, and will handle things +appropriately. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + + +--- + kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++- + kernel/rtmutex_common.h | 1 + + 2 files changed, 32 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -70,7 +70,8 @@ static void fixup_rt_mutex_waiters(struc + + static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) + { +- return waiter && waiter != PI_WAKEUP_INPROGRESS; ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; + } + + /* +@@ -982,6 +983,35 @@ int rt_mutex_start_proxy_lock(struct rt_ + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock_irq(&task->pi_lock); ++ raw_spin_unlock(&lock->wait_lock); ++ return -EAGAIN; ++ } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ + ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); + + if (ret && !rt_mutex_owner(lock)) { +Index: linux-stable/kernel/rtmutex_common.h +=================================================================== +--- linux-stable.orig/kernel/rtmutex_common.h ++++ linux-stable/kernel/rtmutex_common.h +@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut + * PI-futex support (proxy locking functions, etc.): + */ + #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) + + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch new file mode 100644 index 000000000..afa77fdfb --- /dev/null +++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch @@ -0,0 +1,42 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:57 -0500 +Subject: genirq: disable irqpoll on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Creates long latencies for no value + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/irq/spurious.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +Index: linux-stable/kernel/irq/spurious.c +=================================================================== +--- linux-stable.orig/kernel/irq/spurious.c ++++ linux-stable/kernel/irq/spurious.c +@@ -340,6 +340,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ printk(KERN_WARNING "irqfixup boot option not supported " ++ "w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -352,6 +357,11 @@ module_param(irqfixup, int, 0644); + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ printk(KERN_WARNING "irqpoll boot option not supported " ++ "w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch new file mode 100644 index 000000000..cec878cbe --- /dev/null +++ b/debian/patches/features/all/rt/genirq-force-threading.patch @@ -0,0 +1,51 @@ +Subject: genirq-force-threading.patch +From: Thomas Gleixner +Date: Sun, 03 Apr 2011 11:57:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/interrupt.h | 8 ++++++-- + kernel/irq/manage.c | 2 ++ + 2 files changed, 8 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -383,9 +383,13 @@ static inline int disable_irq_wake(unsig + + + #ifdef CONFIG_IRQ_FORCED_THREADING +-extern bool force_irqthreads; ++# ifndef CONFIG_PREEMPT_RT_BASE ++ extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +Index: linux-stable/kernel/irq/manage.c +=================================================================== +--- linux-stable.orig/kernel/irq/manage.c ++++ linux-stable/kernel/irq/manage.c +@@ -22,6 +22,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + + static int __init setup_forced_irqthreads(char *arg) +@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + /** diff --git a/debian/patches/features/all/rt/genirq-nodebug-shirq.patch b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch new file mode 100644 index 000000000..92cda948c --- /dev/null +++ b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch @@ -0,0 +1,23 @@ +From: Thomas Gleixner +Date: Sun, 18 Mar 2011 10:22:04 +0100 +Subject: genirq: Disable DEBUG_SHIRQ for rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/Kconfig.debug | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/lib/Kconfig.debug +=================================================================== +--- linux-stable.orig/lib/Kconfig.debug ++++ linux-stable/lib/Kconfig.debug +@@ -164,7 +164,7 @@ config DEBUG_KERNEL + + config DEBUG_SHIRQ + bool "Debug shared IRQ handlers" +- depends on DEBUG_KERNEL && GENERIC_HARDIRQS ++ depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE + help + Enable this to generate a spurious interrupt as soon as a shared + interrupt handler is registered, and just before one is deregistered. diff --git a/debian/patches/features/all/rt/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch b/debian/patches/features/all/rt/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch new file mode 100644 index 000000000..b8703259c --- /dev/null +++ b/debian/patches/features/all/rt/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch @@ -0,0 +1,62 @@ +From b580b7eedc8ee3990b118003c4793291387c40ac Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 21 Jun 2013 11:38:28 +0200 +Subject: [PATCH] gpu: i915: allow the user not to do the wbinvd +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The wbinvd() renders the system with i915 unusable on RT. Using this +expensive instruction avoids GPU trouble according to + https://bugs.freedesktop.org/show_bug.cgi?id=62191 + +As a workaround for RT it is recommended to pin each GPU related process +to the same CPU and then disable this instruction via the module +paramter. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++++-- + 1 file changed, 15 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/gpu/drm/i915/i915_gem.c +=================================================================== +--- linux-stable.orig/drivers/gpu/drm/i915/i915_gem.c ++++ linux-stable/drivers/gpu/drm/i915/i915_gem.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); + static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +@@ -2676,6 +2677,10 @@ static inline int fence_number(struct dr + return fence - dev_priv->fence_regs; + } + ++static bool do_wbinvd = true; ++module_param(do_wbinvd, bool, 0644); ++MODULE_PARM_DESC(do_wbinvd, "Do expensive synchronization. Say no after you pin each GPU process to the same CPU in order to lower the latency."); ++ + static void i915_gem_write_fence__ipi(void *data) + { + wbinvd(); +@@ -2699,8 +2704,16 @@ static void i915_gem_object_update_fence + * on each processor in order to manually flush all memory + * transactions before updating the fence register. + */ +- if (HAS_LLC(obj->base.dev)) +- on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); ++ if (HAS_LLC(obj->base.dev)) { ++ if (do_wbinvd) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ pr_err_once("WARNING! The i915 invalidates all caches which increases the latency."); ++ pr_err_once("As a workaround use 'i915.do_wbinvd=no' and PIN each process doing "); ++ pr_err_once("any kind of GPU activity to the same CPU to avoid problems."); ++#endif ++ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); ++ } ++ } + i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); + + if (enable) { diff --git a/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch b/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch new file mode 100644 index 000000000..4178f9326 --- /dev/null +++ b/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch @@ -0,0 +1,38 @@ +Subject: hardirq.h: Define softirq_count() as OUL to kill build warning +From: Yong Zhang +Date: Thu, 13 Oct 2011 17:19:09 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +kernel/lockdep.c: In function ‘print_bad_irq_dependency’: +kernel/lockdep.c:1476:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’ +kernel/lockdep.c: In function ‘print_usage_bug’: +kernel/lockdep.c:2193:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’ + +kernel/lockdep.i show this: + printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", + curr->comm, task_pid_nr(curr), + curr->hardirq_context, ((current_thread_info()->preempt_count) & (((1UL << (10))-1) << ((0 + 8) + 8))) >> ((0 + 8) + 8), + curr->softirq_context, (0U) >> (0 + 8), + curr->hardirqs_enabled, + curr->softirqs_enabled); + +Signed-off-by: Yong Zhang +Link: http://lkml.kernel.org/r/20111013091909.GA32739@zhy +Signed-off-by: Thomas Gleixner +--- + include/linux/hardirq.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/include/linux/hardirq.h +=================================================================== +--- linux-stable.orig/include/linux/hardirq.h ++++ linux-stable/include/linux/hardirq.h +@@ -85,7 +85,7 @@ + # define softirq_count() (preempt_count() & SOFTIRQ_MASK) + # define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + #else +-# define softirq_count() (0U) ++# define softirq_count() (0UL) + extern int in_serving_softirq(void); + #endif + diff --git a/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch b/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch new file mode 100644 index 000000000..2e8af538a --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch @@ -0,0 +1,62 @@ +Subject: hotplug: Call cpu_unplug_begin() before DOWN_PREPARE +From: Yong Zhang +Date: Sun, 16 Oct 2011 18:56:44 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because +at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is +rebuilt. Otherwise the 'sync_unplug' thread will be running on the cpu +on which it's created and not bound on the cpu which is about to go +down. + +I found that by an incorrect warning on smp_processor_id() called by +sync_unplug/1, and trace shows below: +(echo 1 > /sys/device/system/cpu/cpu1/online) + bash-1664 [000] 83.136620: _cpu_down: Bind sync_unplug to cpu 1 + bash-1664 [000] 83.136623: sched_wait_task: comm=sync_unplug/1 pid=1724 prio=120 + bash-1664 [000] 83.136624: _cpu_down: Wake sync_unplug + bash-1664 [000] 83.136629: sched_wakeup: comm=sync_unplug/1 pid=1724 prio=120 success=1 target_cpu=000 + +Wants to be folded back.... + +Signed-off-by: Yong Zhang +Link: http://lkml.kernel.org/r/1318762607-2261-3-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 16 +++++++--------- + 1 file changed, 7 insertions(+), 9 deletions(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -411,22 +411,20 @@ static int __ref _cpu_down(unsigned int + return -EBUSY; + } + +- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); ++ cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); + if (err) { +- nr_calls--; +- __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); +- printk("%s: attempt to take down CPU %u failed\n", +- __func__, cpu); ++ printk("cpu_unplug_begin(%d) failed\n", cpu); + goto out_cancel; + } + +- cpu_hotplug_begin(); +- err = cpu_unplug_begin(cpu); ++ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { + nr_calls--; + __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); +- printk("cpu_unplug_begin(%d) failed\n", cpu); +- goto out_cancel; ++ printk("%s: attempt to take down CPU %u failed\n", ++ __func__, cpu); ++ goto out_release; + } + smpboot_park_threads(cpu); + diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch new file mode 100644 index 000000000..e22d81086 --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch @@ -0,0 +1,232 @@ +Subject: hotplug: Lightweight get online cpus +From: Thomas Gleixner +Date: Wed, 15 Jun 2011 12:36:06 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +get_online_cpus() is a heavy weight function which involves a global +mutex. migrate_disable() wants a simpler construct which prevents only +a CPU from going doing while a task is in a migrate disabled section. + +Implement a per cpu lockless mechanism, which serializes only in the +real unplug case on a global mutex. That serialization affects only +tasks on the cpu which should be brought down. + +Signed-off-by: Thomas Gleixner +--- + include/linux/cpu.h | 12 +++- + kernel/cpu.c | 126 ++++++++++++++++++++++++++++++++++++++++++++++++++-- + 2 files changed, 131 insertions(+), 7 deletions(-) + +Index: linux-stable/include/linux/cpu.h +=================================================================== +--- linux-stable.orig/include/linux/cpu.h ++++ linux-stable/include/linux/cpu.h +@@ -1,15 +1,15 @@ + /* + * include/linux/cpu.h - generic cpu definition + * +- * This is mainly for topological representation. We define the +- * basic 'struct cpu' here, which can be embedded in per-arch ++ * This is mainly for topological representation. We define the ++ * basic 'struct cpu' here, which can be embedded in per-arch + * definitions of processors. + * + * Basic handling of the devices is done in drivers/base/cpu.c +- * and system devices are handled in drivers/base/sys.c. ++ * and system devices are handled in drivers/base/sys.c. + * + * CPUs are exported via sysfs in the class/cpu/devices/ +- * directory. ++ * directory. + */ + #ifndef _LINUX_CPU_H_ + #define _LINUX_CPU_H_ +@@ -177,6 +177,8 @@ extern void get_online_cpus(void); + extern void put_online_cpus(void); + extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) + #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) +@@ -202,6 +204,8 @@ static inline void cpu_hotplug_driver_un + #define put_online_cpus() do { } while (0) + #define cpu_hotplug_disable() do { } while (0) + #define cpu_hotplug_enable() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ + #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -63,6 +63,101 @@ static struct { + .refcount = 0, + }; + ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ int refcount; ++ struct completion synced; ++}; ++ ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); ++ ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! ++ */ ++void pin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ ++retry: ++ if (!hp->unplug || hp->refcount || preempt_count() > 1 || ++ hp->unplug == current) { ++ hp->refcount++; ++ return; ++ } ++ preempt_enable(); ++ mutex_lock(&cpu_hotplug.lock); ++ mutex_unlock(&cpu_hotplug.lock); ++ preempt_disable(); ++ goto retry; ++} ++ ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ ++ WARN_ON(hp->refcount <= 0); ++ ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current) ++ wake_up_process(hp->unplug); ++} ++ ++/* ++ * FIXME: Is this really correct under all circumstances ? ++ */ ++static int sync_unplug_thread(void *data) ++{ ++ struct hotplug_pcp *hp = data; ++ ++ preempt_disable(); ++ hp->unplug = current; ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ set_current_state(TASK_RUNNING); ++ preempt_enable(); ++ complete(&hp->synced); ++ return 0; ++} ++ ++/* ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. ++ */ ++static int cpu_unplug_begin(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ struct task_struct *tsk; ++ ++ init_completion(&hp->synced); ++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); ++ if (IS_ERR(tsk)) ++ return (PTR_ERR(tsk)); ++ kthread_bind(tsk, cpu); ++ wake_up_process(tsk); ++ wait_for_completion(&hp->synced); ++ return 0; ++} ++ ++static void cpu_unplug_done(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ hp->unplug = NULL; ++} ++ + void get_online_cpus(void) + { + might_sleep(); +@@ -285,13 +380,14 @@ static int __ref take_cpu_down(void *_pa + /* Requires cpu_add_remove_lock to be held */ + static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) + { +- int err, nr_calls = 0; ++ int mycpu, err, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; ++ cpumask_var_t cpumask; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -299,7 +395,20 @@ static int __ref _cpu_down(unsigned int + if (!cpu_online(cpu)) + return -EINVAL; + +- cpu_hotplug_begin(); ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ preempt_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ preempt_enable(); ++ return -EBUSY; ++ } ++ preempt_enable(); + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -307,7 +416,16 @@ static int __ref _cpu_down(unsigned int + __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); + printk("%s: attempt to take down CPU %u failed\n", + __func__, cpu); +- goto out_release; ++ goto out_cancel; ++ } ++ ++ cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); ++ if (err) { ++ nr_calls--; ++ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; + } + smpboot_park_threads(cpu); + +@@ -339,6 +457,8 @@ static int __ref _cpu_down(unsigned int + check_for_tasks(cpu); + + out_release: ++ cpu_unplug_done(cpu); ++out_cancel: + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch new file mode 100644 index 000000000..4f7f93fac --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch @@ -0,0 +1,27 @@ +Subject: hotplug: sync_unplug: No "\n" in task name +From: Yong Zhang +Date: Sun, 16 Oct 2011 18:56:43 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Otherwise the output will look a little odd. + +Signed-off-by: Yong Zhang +Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -142,7 +142,7 @@ static int cpu_unplug_begin(unsigned int + struct task_struct *tsk; + + init_completion(&hp->synced); +- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu); ++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(tsk)) + return (PTR_ERR(tsk)); + kthread_bind(tsk, cpu); diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch new file mode 100644 index 000000000..505f04d60 --- /dev/null +++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch @@ -0,0 +1,39 @@ +Subject: hotplug-use-migrate-disable.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 19:35:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -403,14 +403,13 @@ static int __ref _cpu_down(unsigned int + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); +- preempt_disable(); ++ migrate_disable(); + mycpu = smp_processor_id(); + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); +- preempt_enable(); ++ migrate_enable(); + return -EBUSY; + } +- preempt_enable(); + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -461,6 +460,7 @@ static int __ref _cpu_down(unsigned int + out_release: + cpu_unplug_done(cpu); + out_cancel: ++ migrate_enable(); + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch new file mode 100644 index 000000000..978450ea5 --- /dev/null +++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch @@ -0,0 +1,465 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:31 -0500 +Subject: hrtimer: fixup hrtimer callback changes for preempt-rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +In preempt-rt we can not call the callbacks which take sleeping locks +from the timer interrupt context. + +Bring back the softirq split for now, until we fixed the signal +delivery problem for real. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Ingo Molnar + +--- + include/linux/hrtimer.h | 3 + kernel/hrtimer.c | 216 ++++++++++++++++++++++++++++++++++++++++------- + kernel/sched/core.c | 1 + kernel/sched/rt.c | 1 + kernel/time/tick-sched.c | 1 + kernel/watchdog.c | 1 + 6 files changed, 195 insertions(+), 28 deletions(-) + +Index: linux-stable/include/linux/hrtimer.h +=================================================================== +--- linux-stable.orig/include/linux/hrtimer.h ++++ linux-stable/include/linux/hrtimer.h +@@ -111,6 +111,8 @@ struct hrtimer { + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++ struct list_head cb_entry; ++ int irqsafe; + #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + ktime_t praecox; + #endif +@@ -150,6 +152,7 @@ struct hrtimer_clock_base { + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t resolution; + ktime_t (*get_time)(void); + ktime_t softirq_time; +Index: linux-stable/kernel/hrtimer.c +=================================================================== +--- linux-stable.orig/kernel/hrtimer.c ++++ linux-stable/kernel/hrtimer.c +@@ -607,8 +607,7 @@ static int hrtimer_reprogram(struct hrti + * When the callback is running, we do not reprogram the clock event + * device. The timer callback is either running on a different CPU or + * the callback is executed in the hrtimer_interrupt context. The +- * reprogramming is handled either by the softirq, which called the +- * callback or at the end of the hrtimer_interrupt. ++ * reprogramming is handled at the end of the hrtimer_interrupt. + */ + if (hrtimer_callback_running(timer)) + return 0; +@@ -643,6 +642,9 @@ static int hrtimer_reprogram(struct hrti + return res; + } + ++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now); ++static int hrtimer_rt_defer(struct hrtimer *timer); ++ + /* + * Initialize the high resolution related parts of cpu_base + */ +@@ -659,9 +661,18 @@ static inline void hrtimer_init_hres(str + * and expiry check is done in the hrtimer_interrupt or in the softirq. + */ + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { +- return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); ++ if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base))) ++ return 0; ++ if (!wakeup) ++ return -ETIME; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ if (!hrtimer_rt_defer(timer)) ++ return -ETIME; ++#endif ++ return 1; + } + + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) +@@ -746,12 +757,18 @@ static inline int hrtimer_switch_to_hres + static inline void + hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { + return 0; + } + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } + static inline void retrigger_next_event(void *arg) { } ++static inline int hrtimer_reprogram(struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ return 0; ++} + + #endif /* CONFIG_HIGH_RES_TIMERS */ + +@@ -885,9 +902,9 @@ void hrtimer_wait_for_timer(const struct + { + struct hrtimer_clock_base *base = timer->base; + +- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) ++ if (base && base->cpu_base && !timer->irqsafe) + wait_event(base->cpu_base->wait, +- !(timer->state & HRTIMER_STATE_CALLBACK)); ++ !(timer->state & HRTIMER_STATE_CALLBACK)); + } + + #else +@@ -937,6 +954,11 @@ static void __remove_hrtimer(struct hrti + if (!(timer->state & HRTIMER_STATE_ENQUEUED)) + goto out; + ++ if (unlikely(!list_empty(&timer->cb_entry))) { ++ list_del_init(&timer->cb_entry); ++ goto out; ++ } ++ + next_timer = timerqueue_getnext(&base->active); + timerqueue_del(&base->active, &timer->node); + if (&timer->node == next_timer) { +@@ -1044,9 +1066,19 @@ int __hrtimer_start_range_ns(struct hrti + * + * XXX send_remote_softirq() ? + */ +- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) +- && hrtimer_enqueue_reprogram(timer, new_base)) { +- if (wakeup) { ++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { ++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); ++ if (ret < 0) { ++ /* ++ * In case we failed to reprogram the timer (mostly ++ * because out current timer is already elapsed), ++ * remove it again and report a failure. This avoids ++ * stale base->first entries. ++ */ ++ debug_deactivate(timer); ++ __remove_hrtimer(timer, new_base, ++ timer->state & HRTIMER_STATE_CALLBACK, 0); ++ } else if (ret > 0) { + /* + * We need to drop cpu_base->lock to avoid a + * lock ordering issue vs. rq->lock. +@@ -1054,9 +1086,7 @@ int __hrtimer_start_range_ns(struct hrti + raw_spin_unlock(&new_base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + local_irq_restore(flags); +- return ret; +- } else { +- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ return 0; + } + } + +@@ -1225,6 +1255,7 @@ static void __hrtimer_init(struct hrtime + + base = hrtimer_clockid_to_base(clock_id); + timer->base = &cpu_base->clock_base[base]; ++ INIT_LIST_HEAD(&timer->cb_entry); + timerqueue_init(&timer->node); + + #ifdef CONFIG_TIMER_STATS +@@ -1308,10 +1339,128 @@ static void __run_hrtimer(struct hrtimer + timer->state &= ~HRTIMER_STATE_CALLBACK; + } + +-#ifdef CONFIG_HIGH_RES_TIMERS +- + static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ /* ++ * Note, we clear the callback flag before we requeue the ++ * timer otherwise we trigger the callback_running() check ++ * in hrtimer_reprogram(). ++ */ ++ timer->state &= ~HRTIMER_STATE_CALLBACK; ++ ++ if (restart != HRTIMER_NORESTART) { ++ BUG_ON(hrtimer_active(timer)); ++ /* ++ * Enqueue the timer, if it's the leftmost timer then ++ * we need to reprogram it. ++ */ ++ if (!enqueue_hrtimer(timer, base)) ++ return; ++ ++#ifndef CONFIG_HIGH_RES_TIMERS ++ } ++#else ++ if (base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ ++ } else if (hrtimer_active(timer)) { ++ /* ++ * If the timer was rearmed on another CPU, reprogram ++ * the event device. ++ */ ++ if (&timer->node == base->active.next && ++ base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) ++ goto requeue; ++ } ++ return; ++ ++requeue: ++ /* ++ * Timer is expired. Thus move it from tree to pending list ++ * again. ++ */ ++ __remove_hrtimer(timer, base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &base->expired); ++#endif ++} ++ ++/* ++ * The changes in mainline which removed the callback modes from ++ * hrtimer are not yet working with -rt. The non wakeup_process() ++ * based callbacks which involve sleeping locks need to be treated ++ * seperately. ++ */ ++static void hrtimer_rt_run_pending(void) ++{ ++ enum hrtimer_restart (*fn)(struct hrtimer *); ++ struct hrtimer_cpu_base *cpu_base; ++ struct hrtimer_clock_base *base; ++ struct hrtimer *timer; ++ int index, restart; ++ ++ local_irq_disable(); ++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); ++ ++ raw_spin_lock(&cpu_base->lock); ++ ++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { ++ base = &cpu_base->clock_base[index]; ++ ++ while (!list_empty(&base->expired)) { ++ timer = list_first_entry(&base->expired, ++ struct hrtimer, cb_entry); ++ ++ /* ++ * Same as the above __run_hrtimer function ++ * just we run with interrupts enabled. ++ */ ++ debug_hrtimer_deactivate(timer); ++ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); ++ timer_stats_account_hrtimer(timer); ++ fn = timer->function; ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ restart = fn(timer); ++ raw_spin_lock_irq(&cpu_base->lock); ++ ++ hrtimer_rt_reprogram(restart, timer, base); ++ } ++ } ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ ++ wake_up_timer_waiters(cpu_base); ++} ++ ++static int hrtimer_rt_defer(struct hrtimer *timer) ++{ ++ if (timer->irqsafe) ++ return 0; ++ ++ __remove_hrtimer(timer, timer->base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &timer->base->expired); ++ return 1; ++} ++ ++#else ++ ++static inline void hrtimer_rt_run_pending(void) ++{ ++ hrtimer_peek_ahead_timers(); ++} ++ ++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } ++ ++#endif ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++ + /* + * High resolution timer interrupt + * Called with interrupts disabled +@@ -1320,7 +1469,7 @@ void hrtimer_interrupt(struct clock_even + { + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + ktime_t expires_next, now, entry_time, delta; +- int i, retries = 0; ++ int i, retries = 0, raise = 0; + + BUG_ON(!cpu_base->hres_active); + cpu_base->nr_events++; +@@ -1389,7 +1538,10 @@ retry: + break; + } + +- __run_hrtimer(timer, &basenow); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &basenow); ++ else ++ raise = 1; + } + } + +@@ -1404,6 +1556,10 @@ retry: + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; ++ ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ + return; + } + +@@ -1483,18 +1639,18 @@ void hrtimer_peek_ahead_timers(void) + __hrtimer_peek_ahead_timers(); + local_irq_restore(flags); + } +- +-static void run_hrtimer_softirq(struct softirq_action *h) +-{ +- hrtimer_peek_ahead_timers(); +-} +- + #else /* CONFIG_HIGH_RES_TIMERS */ + + static inline void __hrtimer_peek_ahead_timers(void) { } + + #endif /* !CONFIG_HIGH_RES_TIMERS */ + ++ ++static void run_hrtimer_softirq(struct softirq_action *h) ++{ ++ hrtimer_rt_run_pending(); ++} ++ + /* + * Called from timer softirq every jiffy, expire hrtimers: + * +@@ -1527,7 +1683,7 @@ void hrtimer_run_queues(void) + struct timerqueue_node *node; + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + struct hrtimer_clock_base *base; +- int index, gettime = 1; ++ int index, gettime = 1, raise = 0; + + if (hrtimer_hres_active()) + return; +@@ -1552,12 +1708,16 @@ void hrtimer_run_queues(void) + hrtimer_get_expires_tv64(timer)) + break; + +- __run_hrtimer(timer, &base->softirq_time); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(timer, &base->softirq_time); ++ else ++ raise = 1; + } + raw_spin_unlock(&cpu_base->lock); + } + +- wake_up_timer_waiters(cpu_base); ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + /* +@@ -1579,6 +1739,7 @@ static enum hrtimer_restart hrtimer_wake + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) + { + sl->timer.function = hrtimer_wakeup; ++ sl->timer.irqsafe = 1; + sl->task = task; + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); +@@ -1715,6 +1876,7 @@ static void __cpuinit init_hrtimers_cpu( + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + cpu_base->clock_base[i].cpu_base = cpu_base; + timerqueue_init_head(&cpu_base->clock_base[i].active); ++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); + } + + hrtimer_init_hres(cpu_base); +@@ -1833,9 +1995,7 @@ void __init hrtimers_init(void) + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&hrtimers_nb); +-#ifdef CONFIG_HIGH_RES_TIMERS + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); +-#endif + } + + /** +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -489,6 +489,7 @@ static void init_rq_hrtick(struct rq *rq + + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.irqsafe = 1; + } + #else /* CONFIG_SCHED_HRTICK */ + static inline void hrtick_clear(struct rq *rq) +Index: linux-stable/kernel/sched/rt.c +=================================================================== +--- linux-stable.orig/kernel/sched/rt.c ++++ linux-stable/kernel/sched/rt.c +@@ -43,6 +43,7 @@ void init_rt_bandwidth(struct rt_bandwid + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.irqsafe = 1; + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +Index: linux-stable/kernel/time/tick-sched.c +=================================================================== +--- linux-stable.orig/kernel/time/tick-sched.c ++++ linux-stable/kernel/time/tick-sched.c +@@ -1133,6 +1133,7 @@ void tick_setup_sched_timer(void) + * Emulate tick processing via per-CPU hrtimers: + */ + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ ts->sched_timer.irqsafe = 1; + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per cpu) */ +Index: linux-stable/kernel/watchdog.c +=================================================================== +--- linux-stable.orig/kernel/watchdog.c ++++ linux-stable/kernel/watchdog.c +@@ -357,6 +357,7 @@ static void watchdog_enable(unsigned int + /* kick off the timer for the hardlockup detector */ + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; ++ hrtimer->irqsafe = 1; + + if (!watchdog_enabled) { + kthread_park(current); diff --git a/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch new file mode 100644 index 000000000..fd9f62a80 --- /dev/null +++ b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch @@ -0,0 +1,40 @@ +Subject: hrtimer: Raise softirq if hrtimer irq stalled +From: Watanabe +Date: Sun, 28 Oct 2012 11:13:44 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When the hrtimer stall detection hits the softirq is not raised. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/hrtimer.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +Index: linux-stable/kernel/hrtimer.c +=================================================================== +--- linux-stable.orig/kernel/hrtimer.c ++++ linux-stable/kernel/hrtimer.c +@@ -1551,11 +1551,7 @@ retry: + if (expires_next.tv64 == KTIME_MAX || + !tick_program_event(expires_next, 0)) { + cpu_base->hang_detected = 0; +- +- if (raise) +- raise_softirq_irqoff(HRTIMER_SOFTIRQ); +- +- return; ++ goto out; + } + + /* +@@ -1599,6 +1595,9 @@ retry: + tick_program_event(expires_next, 1); + printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", + ktime_to_ns(delta)); ++out: ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + /* diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch new file mode 100644 index 000000000..81ede1975 --- /dev/null +++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch @@ -0,0 +1,204 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:34 -0500 +Subject: hrtimers: prepare full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Make cancellation of a running callback in softirq context safe +against preemption. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/hrtimer.h | 10 ++++++++++ + kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++- + kernel/itimer.c | 1 + + kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++ + 4 files changed, 76 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/hrtimer.h +=================================================================== +--- linux-stable.orig/include/linux/hrtimer.h ++++ linux-stable/include/linux/hrtimer.h +@@ -193,6 +193,9 @@ struct hrtimer_cpu_base { + unsigned long nr_hangs; + ktime_t max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + }; + +@@ -388,6 +391,13 @@ static inline int hrtimer_restart(struct + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif ++ + /* Query timers: */ + extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); + extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); +Index: linux-stable/kernel/hrtimer.c +=================================================================== +--- linux-stable.orig/kernel/hrtimer.c ++++ linux-stable/kernel/hrtimer.c +@@ -863,6 +863,32 @@ u64 hrtimer_forward(struct hrtimer *time + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define wake_up_timer_waiters(b) wake_up(&(b)->wait) ++ ++/** ++ * hrtimer_wait_for_timer - Wait for a running timer ++ * ++ * @timer: timer to wait for ++ * ++ * The function waits in case the timers callback function is ++ * currently executed on the waitqueue of the timer base. The ++ * waitqueue is woken up after the timer callback function has ++ * finished execution. ++ */ ++void hrtimer_wait_for_timer(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = timer->base; ++ ++ if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base)) ++ wait_event(base->cpu_base->wait, ++ !(timer->state & HRTIMER_STATE_CALLBACK)); ++} ++ ++#else ++# define wake_up_timer_waiters(b) do { } while (0) ++#endif ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -1115,7 +1141,7 @@ int hrtimer_cancel(struct hrtimer *timer + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_wait_for_timer(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1532,6 +1558,8 @@ void hrtimer_run_queues(void) + } + raw_spin_unlock(&cpu_base->lock); + } ++ ++ wake_up_timer_waiters(cpu_base); + } + + /* +@@ -1692,6 +1720,9 @@ static void __cpuinit init_hrtimers_cpu( + } + + hrtimer_init_hres(cpu_base); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ init_waitqueue_head(&cpu_base->wait); ++#endif + } + + #ifdef CONFIG_HOTPLUG_CPU +Index: linux-stable/kernel/itimer.c +=================================================================== +--- linux-stable.orig/kernel/itimer.c ++++ linux-stable/kernel/itimer.c +@@ -213,6 +213,7 @@ again: + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_wait_for_timer(&tsk->signal->real_timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +Index: linux-stable/kernel/posix-timers.c +=================================================================== +--- linux-stable.orig/kernel/posix-timers.c ++++ linux-stable/kernel/posix-timers.c +@@ -818,6 +818,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ + return overrun; + } + ++/* ++ * Protected by RCU! ++ */ ++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (kc->timer_set == common_timer_set) ++ hrtimer_wait_for_timer(&timr->it.real.timer); ++ else ++ /* FIXME: Whacky hack for posix-cpu-timers */ ++ schedule_timeout(1); ++#endif ++} ++ + /* Set a POSIX.1b interval timer. */ + /* timr->it_lock is taken. */ + static int +@@ -895,6 +909,7 @@ retry: + if (!timr) + return -EINVAL; + ++ rcu_read_lock(); + kc = clockid_to_kclock(timr->it_clock); + if (WARN_ON_ONCE(!kc || !kc->timer_set)) + error = -EINVAL; +@@ -903,9 +918,12 @@ retry: + + unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ timer_wait_for_callback(kc, timr); + rtn = NULL; // We already got the old time... ++ rcu_read_unlock(); + goto retry; + } ++ rcu_read_unlock(); + + if (old_setting && !error && + copy_to_user(old_setting, &old_spec, sizeof (old_spec))) +@@ -943,10 +961,15 @@ retry_delete: + if (!timer) + return -EINVAL; + ++ rcu_read_lock(); + if (timer_delete_hook(timer) == TIMER_RETRY) { + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } ++ rcu_read_unlock(); + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -972,8 +995,18 @@ static void itimer_delete(struct k_itime + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + ++ /* On RT we can race with a deletion */ ++ if (!timer->it_signal) { ++ unlock_timer(timer, flags); ++ return; ++ } ++ + if (timer_delete_hook(timer) == TIMER_RETRY) { ++ rcu_read_lock(); + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } + list_del(&timer->list); diff --git a/debian/patches/features/all/rt/hwlatdetect.patch b/debian/patches/features/all/rt/hwlatdetect.patch new file mode 100644 index 000000000..c98aec402 --- /dev/null +++ b/debian/patches/features/all/rt/hwlatdetect.patch @@ -0,0 +1,1353 @@ +Subject: hwlatdetect.patch +From: Carsten Emde +Date: Tue, 19 Jul 2011 13:53:12 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Jon Masters developed this wonderful SMI detector. For details please +consult Documentation/hwlat_detector.txt. It could be ported to Linux +3.0 RT without any major change. + +Signed-off-by: Carsten Emde + +--- + Documentation/hwlat_detector.txt | 64 ++ + drivers/misc/Kconfig | 29 + drivers/misc/Makefile | 1 + drivers/misc/hwlat_detector.c | 1212 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 1306 insertions(+) + +Index: linux-stable/Documentation/hwlat_detector.txt +=================================================================== +--- /dev/null ++++ linux-stable/Documentation/hwlat_detector.txt +@@ -0,0 +1,64 @@ ++Introduction: ++------------- ++ ++The module hwlat_detector is a special purpose kernel module that is used to ++detect large system latencies induced by the behavior of certain underlying ++hardware or firmware, independent of Linux itself. The code was developed ++originally to detect SMIs (System Management Interrupts) on x86 systems, ++however there is nothing x86 specific about this patchset. It was ++originally written for use by the "RT" patch since the Real Time ++kernel is highly latency sensitive. ++ ++SMIs are usually not serviced by the Linux kernel, which typically does not ++even know that they are occuring. SMIs are instead are set up by BIOS code ++and are serviced by BIOS code, usually for "critical" events such as ++management of thermal sensors and fans. Sometimes though, SMIs are used for ++other tasks and those tasks can spend an inordinate amount of time in the ++handler (sometimes measured in milliseconds). Obviously this is a problem if ++you are trying to keep event service latencies down in the microsecond range. ++ ++The hardware latency detector works by hogging all of the cpus for configurable ++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter ++for some period, then looking for gaps in the TSC data. Any gap indicates a ++time when the polling was interrupted and since the machine is stopped and ++interrupts turned off the only thing that could do that would be an SMI. ++ ++Note that the SMI detector should *NEVER* be used in a production environment. ++It is intended to be run manually to determine if the hardware platform has a ++problem with long system firmware service routines. ++ ++Usage: ++------ ++ ++Loading the module hwlat_detector passing the parameter "enabled=1" (or by ++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only ++step required to start the hwlat_detector. It is possible to redefine the ++threshold in microseconds (us) above which latency spikes will be taken ++into account (parameter "threshold="). ++ ++Example: ++ ++ # modprobe hwlat_detector enabled=1 threshold=100 ++ ++After the module is loaded, it creates a directory named "hwlat_detector" under ++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary ++to have debugfs mounted, which might be on /sys/debug on your system. ++ ++The /debug/hwlat_detector interface contains the following files: ++ ++count - number of latency spikes observed since last reset ++enable - a global enable/disable toggle (0/1), resets count ++max - maximum hardware latency actually observed (usecs) ++sample - a pipe from which to read current raw sample data ++ in the format ++ (can be opened O_NONBLOCK for a single sample) ++threshold - minimum latency value to be considered (usecs) ++width - time period to sample with CPUs held (usecs) ++ must be less than the total window size (enforced) ++window - total period of sampling, width being inside (usecs) ++ ++By default we will set width to 500,000 and window to 1,000,000, meaning that ++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we ++observe any latencies that exceed the threshold (initially 100 usecs), ++then we write to a global sample ring buffer of 8K samples, which is ++consumed by reading from the "sample" (pipe) debugfs file interface. +Index: linux-stable/drivers/misc/Kconfig +=================================================================== +--- linux-stable.orig/drivers/misc/Kconfig ++++ linux-stable/drivers/misc/Kconfig +@@ -130,6 +130,35 @@ config IBM_ASM + for information on the specific driver level and support statement + for your IBM server. + ++config HWLAT_DETECTOR ++ tristate "Testing module to detect hardware-induced latencies" ++ depends on DEBUG_FS ++ depends on RING_BUFFER ++ default m ++ ---help--- ++ A simple hardware latency detector. Use this module to detect ++ large latencies introduced by the behavior of the underlying ++ system firmware external to Linux. We do this using periodic ++ use of stop_machine to grab all available CPUs and measure ++ for unexplainable gaps in the CPU timestamp counter(s). By ++ default, the module is not enabled until the "enable" file ++ within the "hwlat_detector" debugfs directory is toggled. ++ ++ This module is often used to detect SMI (System Management ++ Interrupts) on x86 systems, though is not x86 specific. To ++ this end, we default to using a sample window of 1 second, ++ during which we will sample for 0.5 seconds. If an SMI or ++ similar event occurs during that time, it is recorded ++ into an 8K samples global ring buffer until retreived. ++ ++ WARNING: This software should never be enabled (it can be built ++ but should not be turned on after it is loaded) in a production ++ environment where high latencies are a concern since the ++ sampling mechanism actually introduces latencies for ++ regular tasks while the CPU(s) are being held. ++ ++ If unsure, say N ++ + config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI +Index: linux-stable/drivers/misc/Makefile +=================================================================== +--- linux-stable.orig/drivers/misc/Makefile ++++ linux-stable/drivers/misc/Makefile +@@ -53,3 +53,4 @@ obj-$(CONFIG_INTEL_MEI) += mei/ + obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ + obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o + obj-$(CONFIG_SRAM) += sram.o ++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o +Index: linux-stable/drivers/misc/hwlat_detector.c +=================================================================== +--- /dev/null ++++ linux-stable/drivers/misc/hwlat_detector.c +@@ -0,0 +1,1212 @@ ++/* ++ * hwlat_detector.c - A simple Hardware Latency detector. ++ * ++ * Use this module to detect large system latencies induced by the behavior of ++ * certain underlying system hardware or firmware, independent of Linux itself. ++ * The code was developed originally to detect the presence of SMIs on Intel ++ * and AMD systems, although there is no dependency upon x86 herein. ++ * ++ * The classical example usage of this module is in detecting the presence of ++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a ++ * somewhat special form of hardware interrupt spawned from earlier CPU debug ++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge ++ * LPC (or other device) to generate a special interrupt under certain ++ * circumstances, for example, upon expiration of a special SMI timer device, ++ * due to certain external thermal readings, on certain I/O address accesses, ++ * and other situations. An SMI hits a special CPU pin, triggers a special ++ * SMI mode (complete with special memory map), and the OS is unaware. ++ * ++ * Although certain hardware-inducing latencies are necessary (for example, ++ * a modern system often requires an SMI handler for correct thermal control ++ * and remote management) they can wreak havoc upon any OS-level performance ++ * guarantees toward low-latency, especially when the OS is not even made ++ * aware of the presence of these interrupts. For this reason, we need a ++ * somewhat brute force mechanism to detect these interrupts. In this case, ++ * we do it by hogging all of the CPU(s) for configurable timer intervals, ++ * sampling the built-in CPU timer, looking for discontiguous readings. ++ * ++ * WARNING: This implementation necessarily introduces latencies. Therefore, ++ * you should NEVER use this module in a production environment ++ * requiring any kind of low-latency performance guarantee(s). ++ * ++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. ++ * ++ * Includes useful feedback from Clark Williams ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ ++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ ++#define U64STR_SIZE 22 /* 20 digits max */ ++ ++#define VERSION "1.0.0" ++#define BANNER "hwlat_detector: " ++#define DRVNAME "hwlat_detector" ++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ ++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ ++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ ++ ++/* Module metadata */ ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Jon Masters "); ++MODULE_DESCRIPTION("A simple hardware latency detector"); ++MODULE_VERSION(VERSION); ++ ++/* Module parameters */ ++ ++static int debug; ++static int enabled; ++static int threshold; ++ ++module_param(debug, int, 0); /* enable debug */ ++module_param(enabled, int, 0); /* enable detector */ ++module_param(threshold, int, 0); /* latency threshold */ ++ ++/* Buffering and sampling */ ++ ++static struct ring_buffer *ring_buffer; /* sample buffer */ ++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ ++static unsigned long buf_size = BUF_SIZE_DEFAULT; ++static struct task_struct *kthread; /* sampling thread */ ++ ++/* DebugFS filesystem entries */ ++ ++static struct dentry *debug_dir; /* debugfs directory */ ++static struct dentry *debug_max; /* maximum TSC delta */ ++static struct dentry *debug_count; /* total detect count */ ++static struct dentry *debug_sample_width; /* sample width us */ ++static struct dentry *debug_sample_window; /* sample window us */ ++static struct dentry *debug_sample; /* raw samples us */ ++static struct dentry *debug_threshold; /* threshold us */ ++static struct dentry *debug_enable; /* enable/disable */ ++ ++/* Individual samples and global state */ ++ ++struct sample; /* latency sample */ ++struct data; /* Global state */ ++ ++/* Sampling functions */ ++static int __buffer_add_sample(struct sample *sample); ++static struct sample *buffer_get_sample(struct sample *sample); ++static int get_sample(void *unused); ++ ++/* Threading and state */ ++static int kthread_fn(void *unused); ++static int start_kthread(void); ++static int stop_kthread(void); ++static void __reset_stats(void); ++static int init_stats(void); ++ ++/* Debugfs interface */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry); ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry); ++static int debug_sample_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static int debug_sample_release(struct inode *inode, struct file *filp); ++static int debug_enable_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static ssize_t debug_enable_fwrite(struct file *file, ++ const char __user *user_buffer, ++ size_t user_size, loff_t *offset); ++ ++/* Initialization functions */ ++static int init_debugfs(void); ++static void free_debugfs(void); ++static int detector_init(void); ++static void detector_exit(void); ++ ++/* Individual latency samples are stored here when detected and packed into ++ * the ring_buffer circular buffer, where they are overwritten when ++ * more than buf_size/sizeof(sample) samples are received. */ ++struct sample { ++ u64 seqnum; /* unique sequence */ ++ u64 duration; /* ktime delta */ ++ struct timespec timestamp; /* wall time */ ++ unsigned long lost; ++}; ++ ++/* keep the global state somewhere. Mostly used under stop_machine. */ ++static struct data { ++ ++ struct mutex lock; /* protect changes */ ++ ++ u64 count; /* total since reset */ ++ u64 max_sample; /* max hardware latency */ ++ u64 threshold; /* sample threshold level */ ++ ++ u64 sample_window; /* total sampling window (on+off) */ ++ u64 sample_width; /* active sampling portion of window */ ++ ++ atomic_t sample_open; /* whether the sample file is open */ ++ ++ wait_queue_head_t wq; /* waitqeue for new sample values */ ++ ++} data; ++ ++/** ++ * __buffer_add_sample - add a new latency sample recording to the ring buffer ++ * @sample: The new latency sample value ++ * ++ * This receives a new latency sample and records it in a global ring buffer. ++ * No additional locking is used in this case - suited for stop_machine use. ++ */ ++static int __buffer_add_sample(struct sample *sample) ++{ ++ return ring_buffer_write(ring_buffer, ++ sizeof(struct sample), sample); ++} ++ ++/** ++ * buffer_get_sample - remove a hardware latency sample from the ring buffer ++ * @sample: Pre-allocated storage for the sample ++ * ++ * This retrieves a hardware latency sample from the global circular buffer ++ */ ++static struct sample *buffer_get_sample(struct sample *sample) ++{ ++ struct ring_buffer_event *e = NULL; ++ struct sample *s = NULL; ++ unsigned int cpu = 0; ++ ++ if (!sample) ++ return NULL; ++ ++ mutex_lock(&ring_buffer_mutex); ++ for_each_online_cpu(cpu) { ++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); ++ if (e) ++ break; ++ } ++ ++ if (e) { ++ s = ring_buffer_event_data(e); ++ memcpy(sample, s, sizeof(struct sample)); ++ } else ++ sample = NULL; ++ mutex_unlock(&ring_buffer_mutex); ++ ++ return sample; ++} ++ ++/** ++ * get_sample - sample the CPU TSC and look for likely hardware latencies ++ * @unused: This is not used but is a part of the stop_machine API ++ * ++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential ++ * hardware-induced latency. Called under stop_machine, with data.lock held. ++ */ ++static int get_sample(void *unused) ++{ ++ ktime_t start, t1, t2; ++ s64 diff, total = 0; ++ u64 sample = 0; ++ int ret = 1; ++ ++ start = ktime_get(); /* start timestamp */ ++ ++ do { ++ ++ t1 = ktime_get(); /* we'll look for a discontinuity */ ++ t2 = ktime_get(); ++ ++ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */ ++ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */ ++ ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ printk(KERN_ERR BANNER "time running backwards\n"); ++ goto out; ++ } ++ ++ if (diff > sample) ++ sample = diff; /* only want highest value */ ++ ++ } while (total <= data.sample_width); ++ ++ /* If we exceed the threshold value, we have found a hardware latency */ ++ if (sample > data.threshold) { ++ struct sample s; ++ ++ data.count++; ++ s.seqnum = data.count; ++ s.duration = sample; ++ s.timestamp = CURRENT_TIME; ++ __buffer_add_sample(&s); ++ ++ /* Keep a running maximum ever recorded hardware latency */ ++ if (sample > data.max_sample) ++ data.max_sample = sample; ++ } ++ ++ ret = 0; ++out: ++ return ret; ++} ++ ++/* ++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread ++ * @unused: A required part of the kthread API. ++ * ++ * Used to periodically sample the CPU TSC via a call to get_sample. We ++ * use stop_machine, whith does (intentionally) introduce latency since we ++ * need to ensure nothing else might be running (and thus pre-empting). ++ * Obviously this should never be used in production environments. ++ * ++ * stop_machine will schedule us typically only on CPU0 which is fine for ++ * almost every real-world hardware latency situation - but we might later ++ * generalize this if we find there are any actualy systems with alternate ++ * SMI delivery or other non CPU0 hardware latencies. ++ */ ++static int kthread_fn(void *unused) ++{ ++ int err = 0; ++ u64 interval = 0; ++ ++ while (!kthread_should_stop()) { ++ ++ mutex_lock(&data.lock); ++ ++ err = stop_machine(get_sample, unused, 0); ++ if (err) { ++ /* Houston, we have a problem */ ++ mutex_unlock(&data.lock); ++ goto err_out; ++ } ++ ++ wake_up(&data.wq); /* wake up reader(s) */ ++ ++ interval = data.sample_window - data.sample_width; ++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ ++ ++ mutex_unlock(&data.lock); ++ ++ if (msleep_interruptible(interval)) ++ goto out; ++ } ++ goto out; ++err_out: ++ printk(KERN_ERR BANNER "could not call stop_machine, disabling\n"); ++ enabled = 0; ++out: ++ return err; ++ ++} ++ ++/** ++ * start_kthread - Kick off the hardware latency sampling/detector kthread ++ * ++ * This starts a kernel thread that will sit and sample the CPU timestamp ++ * counter (TSC or similar) and look for potential hardware latencies. ++ */ ++static int start_kthread(void) ++{ ++ kthread = kthread_run(kthread_fn, NULL, ++ DRVNAME); ++ if (IS_ERR(kthread)) { ++ printk(KERN_ERR BANNER "could not start sampling thread\n"); ++ enabled = 0; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop ++ * ++ * This kicks the running hardware latency sampling/detector kernel thread and ++ * tells it to stop sampling now. Use this on unload and at system shutdown. ++ */ ++static int stop_kthread(void) ++{ ++ int ret; ++ ++ ret = kthread_stop(kthread); ++ ++ return ret; ++} ++ ++/** ++ * __reset_stats - Reset statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We call this ++ * function in order to reset those when "enable" is toggled on or off, and ++ * also at initialization. Should be called with data.lock held. ++ */ ++static void __reset_stats(void) ++{ ++ data.count = 0; ++ data.max_sample = 0; ++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ ++} ++ ++/** ++ * init_stats - Setup global state statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We also use ++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware ++ * induced system latencies. This function initializes these structures and ++ * allocates the global ring buffer also. ++ */ ++static int init_stats(void) ++{ ++ int ret = -ENOMEM; ++ ++ mutex_init(&data.lock); ++ init_waitqueue_head(&data.wq); ++ atomic_set(&data.sample_open, 0); ++ ++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); ++ ++ if (WARN(!ring_buffer, KERN_ERR BANNER ++ "failed to allocate ring buffer!\n")) ++ goto out; ++ ++ __reset_stats(); ++ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ ++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ ++ ++ ret = 0; ++ ++out: ++ return ret; ++ ++} ++ ++/* ++ * simple_data_read - Wrapper read function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * @entry: The entry to read from ++ * ++ * This function provides a generic read implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_read directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual read (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ u64 val = 0; ++ int len = 0; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (!entry) ++ return -EFAULT; ++ ++ mutex_lock(&data.lock); ++ val = *entry; ++ mutex_unlock(&data.lock); ++ ++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); ++ ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); ++ ++} ++ ++/* ++ * simple_data_write - Wrapper write function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to write value from ++ * @cnt: The maximum number of bytes to write ++ * @ppos: The current "file" position ++ * @entry: The entry to write to ++ * ++ * This function provides a generic write implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_write directly, but we need to make sure that the data.lock ++ * spinlock is held during the actual write (even though we likely won't ever ++ * actually race here as the updater runs under a stop_machine context). ++ */ ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ *entry = val; ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/** ++ * debug_count_fopen - Open function for "count" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "count" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_count_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_count_fread - Read function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to read the ++ * number of latency readings exceeding the configured threshold since ++ * the detector was last reset (e.g. by writing a zero into "count"). ++ */ ++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_count_fwrite - Write function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to write a ++ * desired value, especially to zero the total count. ++ */ ++static ssize_t debug_count_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "enable" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_enable_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_enable_fread - Read function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * whether the detector is currently enabled ("0\n" or "1\n" returned). ++ */ ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[4]; ++ ++ if ((cnt < sizeof(buf)) || (*ppos)) ++ return 0; ++ ++ buf[0] = enabled ? '1' : '0'; ++ buf[1] = '\n'; ++ buf[2] = '\0'; ++ if (copy_to_user(ubuf, buf, strlen(buf))) ++ return -EFAULT; ++ return *ppos = strlen(buf); ++} ++ ++/** ++ * debug_enable_fwrite - Write function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to enable or ++ * disable the detector, which will have the side-effect of possibly ++ * also resetting the global stats and kicking off the measuring ++ * kthread (on an enable) or the converse (upon a disable). ++ */ ++static ssize_t debug_enable_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[4]; ++ int csize = min(cnt, sizeof(buf)); ++ long val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[sizeof(buf)-1] = '\0'; /* just in case */ ++ err = strict_strtoul(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ if (val) { ++ if (enabled) ++ goto unlock; ++ enabled = 1; ++ __reset_stats(); ++ if (start_kthread()) ++ return -EFAULT; ++ } else { ++ if (!enabled) ++ goto unlock; ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) { ++ printk(KERN_ERR BANNER "cannot stop kthread\n"); ++ return -EFAULT; ++ } ++ wake_up(&data.wq); /* reader(s) should return */ ++ } ++unlock: ++ return csize; ++} ++ ++/** ++ * debug_max_fopen - Open function for "max" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "max" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_max_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_max_fread - Read function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * the maximum latency value observed since it was last reset. ++ */ ++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++/** ++ * debug_max_fwrite - Write function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to reset the ++ * maximum or set it to some other desired value - if, then, subsequent ++ * measurements exceed this value, the maximum will be updated. ++ */ ++static ssize_t debug_max_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++ ++/** ++ * debug_sample_fopen - An open function for "sample" debugfs interface ++ * @inode: The in-kernel inode representation of this debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function handles opening the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. Can be opened blocking or non-blocking, ++ * affecting whether it behaves as a buffer read pipe, or does not. ++ * Implements simple locking to prevent multiple simultaneous use. ++ */ ++static int debug_sample_fopen(struct inode *inode, struct file *filp) ++{ ++ if (!atomic_add_unless(&data.sample_open, 1, 1)) ++ return -EBUSY; ++ else ++ return 0; ++} ++ ++/** ++ * debug_sample_fread - A read function for "sample" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that will contain the samples read ++ * @cnt: The maximum bytes to read from the debugfs "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function handles reading from the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. By default this will block pending a new ++ * value written into the sample buffer, unless there are already a ++ * number of value(s) waiting in the buffer, or the sample file was ++ * previously opened in a non-blocking mode of operation. ++ */ ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ int len = 0; ++ char buf[64]; ++ struct sample *sample = NULL; ++ ++ if (!enabled) ++ return 0; ++ ++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); ++ if (!sample) ++ return -ENOMEM; ++ ++ while (!buffer_get_sample(sample)) { ++ ++ DEFINE_WAIT(wait); ++ ++ if (filp->f_flags & O_NONBLOCK) { ++ len = -EAGAIN; ++ goto out; ++ } ++ ++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&data.wq, &wait); ++ ++ if (signal_pending(current)) { ++ len = -EINTR; ++ goto out; ++ } ++ ++ if (!enabled) { /* enable was toggled */ ++ len = 0; ++ goto out; ++ } ++ } ++ ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration); ++ ++ ++ /* handling partial reads is more trouble than it's worth */ ++ if (len > cnt) ++ goto out; ++ ++ if (copy_to_user(ubuf, buf, len)) ++ len = -EFAULT; ++ ++out: ++ kfree(sample); ++ return len; ++} ++ ++/** ++ * debug_sample_release - Release function for "sample" debugfs interface ++ * @inode: The in-kernel inode represenation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function completes the close of the debugfs interface "sample" file. ++ * Frees the sample_open "lock" so that other users may open the interface. ++ */ ++static int debug_sample_release(struct inode *inode, struct file *filp) ++{ ++ atomic_dec(&data.sample_open); ++ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fopen - Open function for "threshold" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_threshold_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fread - Read function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * the current threshold level at which a latency will be recorded in the ++ * global ring buffer, typically on the order of 10us. ++ */ ++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); ++} ++ ++/** ++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * the threshold level at which any subsequently detected latencies will ++ * be recorded into the global ring buffer. ++ */ ++static ssize_t debug_threshold_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return ret; ++} ++ ++/** ++ * debug_width_fopen - Open function for "width" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "width" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_width_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_width_fread - Read function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latecy periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. ++ */ ++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); ++} ++ ++/** ++ * debug_width_fwrite - Write function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latency periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. It ++ * is enforced that width is less that the total window size. ++ */ ++static ssize_t debug_width_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (val < data.sample_window) ++ data.sample_width = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return csize; ++} ++ ++/** ++ * debug_window_fopen - Open function for "window" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. ++ */ ++static int debug_window_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_window_fread - Read function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to read the total window size. ++ */ ++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); ++} ++ ++/** ++ * debug_window_fwrite - Write function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "window" debufds ++ * interface to the hardware latency detetector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to write a new total window size. It ++ * is enfoced that any value written must be greater than the sample width ++ * size, or an error results. ++ */ ++static ssize_t debug_window_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = strict_strtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (data.sample_width < val) ++ data.sample_window = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/* ++ * Function pointers for the "count" debugfs file operations ++ */ ++static const struct file_operations count_fops = { ++ .open = debug_count_fopen, ++ .read = debug_count_fread, ++ .write = debug_count_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "enable" debugfs file operations ++ */ ++static const struct file_operations enable_fops = { ++ .open = debug_enable_fopen, ++ .read = debug_enable_fread, ++ .write = debug_enable_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "max" debugfs file operations ++ */ ++static const struct file_operations max_fops = { ++ .open = debug_max_fopen, ++ .read = debug_max_fread, ++ .write = debug_max_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "sample" debugfs file operations ++ */ ++static const struct file_operations sample_fops = { ++ .open = debug_sample_fopen, ++ .read = debug_sample_fread, ++ .release = debug_sample_release, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "threshold" debugfs file operations ++ */ ++static const struct file_operations threshold_fops = { ++ .open = debug_threshold_fopen, ++ .read = debug_threshold_fread, ++ .write = debug_threshold_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "width" debugfs file operations ++ */ ++static const struct file_operations width_fops = { ++ .open = debug_width_fopen, ++ .read = debug_width_fread, ++ .write = debug_width_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "window" debugfs file operations ++ */ ++static const struct file_operations window_fops = { ++ .open = debug_window_fopen, ++ .read = debug_window_fread, ++ .write = debug_window_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/** ++ * init_debugfs - A function to initialize the debugfs interface files ++ * ++ * This function creates entries in debugfs for "hwlat_detector", including ++ * files to read values from the detector, current samples, and the ++ * maximum sample that has been captured since the hardware latency ++ * dectector was started. ++ */ ++static int init_debugfs(void) ++{ ++ int ret = -ENOMEM; ++ ++ debug_dir = debugfs_create_dir(DRVNAME, NULL); ++ if (!debug_dir) ++ goto err_debug_dir; ++ ++ debug_sample = debugfs_create_file("sample", 0444, ++ debug_dir, NULL, ++ &sample_fops); ++ if (!debug_sample) ++ goto err_sample; ++ ++ debug_count = debugfs_create_file("count", 0444, ++ debug_dir, NULL, ++ &count_fops); ++ if (!debug_count) ++ goto err_count; ++ ++ debug_max = debugfs_create_file("max", 0444, ++ debug_dir, NULL, ++ &max_fops); ++ if (!debug_max) ++ goto err_max; ++ ++ debug_sample_window = debugfs_create_file("window", 0644, ++ debug_dir, NULL, ++ &window_fops); ++ if (!debug_sample_window) ++ goto err_window; ++ ++ debug_sample_width = debugfs_create_file("width", 0644, ++ debug_dir, NULL, ++ &width_fops); ++ if (!debug_sample_width) ++ goto err_width; ++ ++ debug_threshold = debugfs_create_file("threshold", 0644, ++ debug_dir, NULL, ++ &threshold_fops); ++ if (!debug_threshold) ++ goto err_threshold; ++ ++ debug_enable = debugfs_create_file("enable", 0644, ++ debug_dir, &enabled, ++ &enable_fops); ++ if (!debug_enable) ++ goto err_enable; ++ ++ else { ++ ret = 0; ++ goto out; ++ } ++ ++err_enable: ++ debugfs_remove(debug_threshold); ++err_threshold: ++ debugfs_remove(debug_sample_width); ++err_width: ++ debugfs_remove(debug_sample_window); ++err_window: ++ debugfs_remove(debug_max); ++err_max: ++ debugfs_remove(debug_count); ++err_count: ++ debugfs_remove(debug_sample); ++err_sample: ++ debugfs_remove(debug_dir); ++err_debug_dir: ++out: ++ return ret; ++} ++ ++/** ++ * free_debugfs - A function to cleanup the debugfs file interface ++ */ ++static void free_debugfs(void) ++{ ++ /* could also use a debugfs_remove_recursive */ ++ debugfs_remove(debug_enable); ++ debugfs_remove(debug_threshold); ++ debugfs_remove(debug_sample_width); ++ debugfs_remove(debug_sample_window); ++ debugfs_remove(debug_max); ++ debugfs_remove(debug_count); ++ debugfs_remove(debug_sample); ++ debugfs_remove(debug_dir); ++} ++ ++/** ++ * detector_init - Standard module initialization code ++ */ ++static int detector_init(void) ++{ ++ int ret = -ENOMEM; ++ ++ printk(KERN_INFO BANNER "version %s\n", VERSION); ++ ++ ret = init_stats(); ++ if (0 != ret) ++ goto out; ++ ++ ret = init_debugfs(); ++ if (0 != ret) ++ goto err_stats; ++ ++ if (enabled) ++ ret = start_kthread(); ++ ++ goto out; ++ ++err_stats: ++ ring_buffer_free(ring_buffer); ++out: ++ return ret; ++ ++} ++ ++/** ++ * detector_exit - Standard module cleanup code ++ */ ++static void detector_exit(void) ++{ ++ int err; ++ ++ if (enabled) { ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) ++ printk(KERN_ERR BANNER "cannot stop kthread\n"); ++ } ++ ++ free_debugfs(); ++ ring_buffer_free(ring_buffer); /* free up the ring buffer */ ++ ++} ++ ++module_init(detector_init); ++module_exit(detector_exit); diff --git a/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch new file mode 100644 index 000000000..e511d8af3 --- /dev/null +++ b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch @@ -0,0 +1,37 @@ +From 5145351047b216cca13aaca99f939a9a594c6c4d Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 11:35:49 +0100 +Subject: [PATCH 2/3] i2c/omap: drop the lock hard irq context +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The lock is taken while reading two registers. On RT the first lock is +taken in hard irq where it might sleep and in the threaded irq. +The threaded irq runs in oneshot mode so the hard irq does not run until +the thread the completes so there is no reason to grab the lock. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/i2c/busses/i2c-omap.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +Index: linux-stable/drivers/i2c/busses/i2c-omap.c +=================================================================== +--- linux-stable.orig/drivers/i2c/busses/i2c-omap.c ++++ linux-stable/drivers/i2c/busses/i2c-omap.c +@@ -881,15 +881,12 @@ omap_i2c_isr(int irq, void *dev_id) + u16 mask; + u16 stat; + +- spin_lock(&dev->lock); +- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); ++ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + + if (stat & mask) + ret = IRQ_WAKE_THREAD; + +- spin_unlock(&dev->lock); +- + return ret; + } + diff --git a/debian/patches/features/all/rt/i915_compile_fix.patch b/debian/patches/features/all/rt/i915_compile_fix.patch new file mode 100644 index 000000000..0b36c7d30 --- /dev/null +++ b/debian/patches/features/all/rt/i915_compile_fix.patch @@ -0,0 +1,25 @@ +From: Sebastian Andrzej Siewior +Subject: gpu/i915: don't open code these things +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions") +the owner check is still there. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/drivers/gpu/drm/i915/i915_gem.c +=================================================================== +--- linux-stable.orig/drivers/gpu/drm/i915/i915_gem.c ++++ linux-stable/drivers/gpu/drm/i915/i915_gem.c +@@ -4412,7 +4412,7 @@ static bool mutex_is_locked_by(struct mu + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch new file mode 100644 index 000000000..4b1575e68 --- /dev/null +++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch @@ -0,0 +1,184 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:16 -0500 +Subject: ide: Do not disable interrupts for PREEMPT-RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use the local_irq_*_nort variants. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/ide/alim15x3.c | 4 ++-- + drivers/ide/hpt366.c | 4 ++-- + drivers/ide/ide-io-std.c | 8 ++++---- + drivers/ide/ide-io.c | 2 +- + drivers/ide/ide-iops.c | 4 ++-- + drivers/ide/ide-probe.c | 4 ++-- + drivers/ide/ide-taskfile.c | 6 +++--- + 7 files changed, 16 insertions(+), 16 deletions(-) + +Index: linux-stable/drivers/ide/alim15x3.c +=================================================================== +--- linux-stable.orig/drivers/ide/alim15x3.c ++++ linux-stable/drivers/ide/alim15x3.c +@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ out: + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +Index: linux-stable/drivers/ide/hpt366.c +=================================================================== +--- linux-stable.orig/drivers/ide/hpt366.c ++++ linux-stable/drivers/ide/hpt366.c +@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +Index: linux-stable/drivers/ide/ide-io-std.c +=================================================================== +--- linux-stable.orig/drivers/ide/ide-io-std.c ++++ linux-stable/drivers/ide/ide-io-std.c +@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +Index: linux-stable/drivers/ide/ide-io.c +=================================================================== +--- linux-stable.orig/drivers/ide/ide-io.c ++++ linux-stable/drivers/ide/ide-io.c +@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +Index: linux-stable/drivers/ide/ide-iops.c +=================================================================== +--- linux-stable.orig/drivers/ide/ide-iops.c ++++ linux-stable/drivers/ide/ide-iops.c +@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +Index: linux-stable/drivers/ide/ide-probe.c +=================================================================== +--- linux-stable.orig/drivers/ide/ide-probe.c ++++ linux-stable/drivers/ide/ide-probe.c +@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +Index: linux-stable/drivers/ide/ide-taskfile.c +=================================================================== +--- linux-stable.orig/drivers/ide/ide-taskfile.c ++++ linux-stable/drivers/ide/ide-taskfile.c +@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page) + offset; + +@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s + kunmap_atomic(buf); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + diff --git a/debian/patches/features/all/rt/idle-state.patch b/debian/patches/features/all/rt/idle-state.patch new file mode 100644 index 000000000..f85a60037 --- /dev/null +++ b/debian/patches/features/all/rt/idle-state.patch @@ -0,0 +1,22 @@ +Subject: sched: Init idle->on_rq in init_idle() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 23:03:29 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -5034,6 +5034,7 @@ void __cpuinit init_idle(struct task_str + rcu_read_unlock(); + + rq->curr = rq->idle = idle; ++ idle->on_rq = 1; + #if defined(CONFIG_SMP) + idle->on_cpu = 1; + #endif diff --git a/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch new file mode 100644 index 000000000..bf686bd87 --- /dev/null +++ b/debian/patches/features/all/rt/idr-use-local-lock-for-protection.patch @@ -0,0 +1,101 @@ +From: Thomas Gleixner +Subject: idr: Use local lock instead of preempt enable/disable +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +We need to protect the per cpu variable and prevent migration. + +Signed-off-by: Thomas Gleixner +--- + include/linux/idr.h | 4 ++++ + lib/idr.c | 37 ++++++++++++++++++++++++++++++++++--- + 2 files changed, 38 insertions(+), 3 deletions(-) + +Index: linux-stable/include/linux/idr.h +=================================================================== +--- linux-stable.orig/include/linux/idr.h ++++ linux-stable/include/linux/idr.h +@@ -92,10 +92,14 @@ void idr_init(struct idr *idp); + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++void idr_preload_end(void); ++#else + static inline void idr_preload_end(void) + { + preempt_enable(); + } ++#endif + + /** + * idr_find - return pointer for given id +Index: linux-stable/lib/idr.c +=================================================================== +--- linux-stable.orig/lib/idr.c ++++ linux-stable/lib/idr.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + + #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) + #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) +@@ -389,6 +390,36 @@ int __idr_get_new_above(struct idr *idp, + } + EXPORT_SYMBOL(__idr_get_new_above); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_LOCAL_IRQ_LOCK(idr_lock); ++ ++static inline void idr_preload_lock(void) ++{ ++ local_lock(idr_lock); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ local_unlock(idr_lock); ++} ++ ++void idr_preload_end(void) ++{ ++ idr_preload_unlock(); ++} ++EXPORT_SYMBOL(idr_preload_end); ++#else ++static inline void idr_preload_lock(void) ++{ ++ preempt_disable(); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ preempt_enable(); ++} ++#endif ++ + /** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading +@@ -423,7 +454,7 @@ void idr_preload(gfp_t gfp_mask) + WARN_ON_ONCE(in_interrupt()); + might_sleep_if(gfp_mask & __GFP_WAIT); + +- preempt_disable(); ++ idr_preload_lock(); + + /* + * idr_alloc() is likely to succeed w/o full idr_layer buffer and +@@ -435,9 +466,9 @@ void idr_preload(gfp_t gfp_mask) + while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { + struct idr_layer *new; + +- preempt_enable(); ++ idr_preload_unlock(); + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); +- preempt_disable(); ++ idr_preload_lock(); + if (!new) + break; + diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch new file mode 100644 index 000000000..3c96073f3 --- /dev/null +++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch @@ -0,0 +1,43 @@ +From: Sven-Thorsten Dietrich +Date: Fri, 3 Jul 2009 08:30:35 -0500 +Subject: infiniband: Mellanox IB driver patch use _nort() primitives +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT +Kernel. + +Michael S. Tsirkin sayeth: +"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable +interrupts for non-raw spinlocks, I think all of infiniband will be fine without +changes." + +Signed-off-by: Sven-Thorsten Dietrich +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +=================================================================== +--- linux-stable.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +@@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct wor + + ipoib_mcast_stop_thread(dev, 0); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); + +@@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct wor + + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* We have to cancel outside of the spinlock */ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch new file mode 100644 index 000000000..63966820c --- /dev/null +++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch @@ -0,0 +1,47 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:16 -0500 +Subject: input: gameport: Do not disable interrupts on PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use the _nort() primitives. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/input/gameport/gameport.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Index: linux-stable/drivers/input/gameport/gameport.c +=================================================================== +--- linux-stable.orig/drivers/input/gameport/gameport.c ++++ linux-stable/drivers/input/gameport/gameport.c +@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + rdtscl(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + rdtscl(t2); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } diff --git a/debian/patches/features/all/rt/ipc-make-rt-aware.patch b/debian/patches/features/all/rt/ipc-make-rt-aware.patch new file mode 100644 index 000000000..a850adc02 --- /dev/null +++ b/debian/patches/features/all/rt/ipc-make-rt-aware.patch @@ -0,0 +1,88 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:12 -0500 +Subject: ipc: Make the ipc code -rt aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +RT serializes the code with the (rt)spinlock but keeps preemption +enabled. Some parts of the code need to be atomic nevertheless. + +Protect it with preempt_disable/enable_rt pairts. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + ipc/mqueue.c | 5 +++++ + ipc/msg.c | 16 ++++++++++++++++ + 2 files changed, 21 insertions(+) + +Index: linux-stable/ipc/mqueue.c +=================================================================== +--- linux-stable.orig/ipc/mqueue.c ++++ linux-stable/ipc/mqueue.c +@@ -921,12 +921,17 @@ static inline void pipelined_send(struct + struct msg_msg *message, + struct ext_wait_queue *receiver) + { ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); + receiver->msg = message; + list_del(&receiver->list); + receiver->state = STATE_PENDING; + wake_up_process(receiver->task); + smp_wmb(); + receiver->state = STATE_READY; ++ preempt_enable_rt(); + } + + /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() +Index: linux-stable/ipc/msg.c +=================================================================== +--- linux-stable.orig/ipc/msg.c ++++ linux-stable/ipc/msg.c +@@ -252,10 +252,18 @@ static void expunge_all(struct msg_queue + struct msg_receiver *msr, *t; + + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); ++ + msr->r_msg = NULL; + wake_up_process(msr->r_tsk); + smp_mb(); + msr->r_msg = ERR_PTR(res); ++ ++ preempt_enable_rt(); + } + } + +@@ -597,6 +605,12 @@ static inline int pipelined_send(struct + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { + ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); ++ + list_del(&msr->r_list); + if (msr->r_maxsize < msg->m_ts) { + msr->r_msg = NULL; +@@ -610,9 +624,11 @@ static inline int pipelined_send(struct + wake_up_process(msr->r_tsk); + smp_mb(); + msr->r_msg = msg; ++ preempt_enable_rt(); + + return 1; + } ++ preempt_enable_rt(); + } + } + return 0; diff --git a/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch new file mode 100644 index 000000000..6f1f4843b --- /dev/null +++ b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch @@ -0,0 +1,67 @@ +Subject: ipc/mqueue: Add a critical section to avoid a deadlock +From: KOBAYASHI Yoshitake +Date: Sat, 23 Jul 2011 11:57:36 +0900 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +(Repost for v3.0-rt1 and changed the distination addreses) +I have tested the following patch on v3.0-rt1 with PREEMPT_RT_FULL. +In POSIX message queue, if a sender process uses SCHED_FIFO and +has a higher priority than a receiver process, the sender will +be stuck at ipc/mqueue.c:452 + + 452 while (ewp->state == STATE_PENDING) + 453 cpu_relax(); + +Description of the problem + (receiver process) + 1. receiver changes sender's state to STATE_PENDING (mqueue.c:846) + 2. wake up sender process and "switch to sender" (mqueue.c:847) + Note: This context switch only happens in PREEMPT_RT_FULL kernel. + (sender process) + 3. sender check the own state in above loop (mqueue.c:452-453) + *. receiver will never wake up and cannot change sender's state to + STATE_READY because sender has higher priority + + +Signed-off-by: Yoshitake Kobayashi +Cc: viro@zeniv.linux.org.uk +Cc: dchinner@redhat.com +Cc: npiggin@kernel.dk +Cc: hch@lst.de +Cc: arnd@arndb.de +Link: http://lkml.kernel.org/r/4E2A38A0.1090601@toshiba.co.jp +Signed-off-by: Thomas Gleixner +--- + ipc/mqueue.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +Index: linux-stable/ipc/mqueue.c +=================================================================== +--- linux-stable.orig/ipc/mqueue.c ++++ linux-stable/ipc/mqueue.c +@@ -945,13 +945,18 @@ static inline void pipelined_receive(str + wake_up_interruptible(&info->wait_q); + return; + } +- if (msg_insert(sender->msg, info)) +- return; +- list_del(&sender->list); +- sender->state = STATE_PENDING; +- wake_up_process(sender->task); +- smp_wmb(); +- sender->state = STATE_READY; ++ /* ++ * Keep them in one critical section for PREEMPT_RT: ++ */ ++ preempt_disable_rt(); ++ if (!msg_insert(sender->msg, info)) { ++ list_del(&sender->list); ++ sender->state = STATE_PENDING; ++ wake_up_process(sender->task); ++ smp_wmb(); ++ sender->state = STATE_READY; ++ } ++ preempt_enable_rt(); + } + + SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch new file mode 100644 index 000000000..4258c2c56 --- /dev/null +++ b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch @@ -0,0 +1,139 @@ +Subject: ipc/sem: Rework semaphore wakeups +From: Peter Zijlstra +Date: Wed, 14 Sep 2011 11:57:04 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Subject: ipc/sem: Rework semaphore wakeups +From: Peter Zijlstra +Date: Tue Sep 13 15:09:40 CEST 2011 + +Current sysv sems have a weird ass wakeup scheme that involves keeping +preemption disabled over a potential O(n^2) loop and busy waiting on +that on other CPUs. + +Kill this and simply wake the task directly from under the sem_lock. + +This was discovered by a migrate_disable() debug feature that +disallows: + + spin_lock(); + preempt_disable(); + spin_unlock() + preempt_enable(); + +Cc: Manfred Spraul +Suggested-by: Thomas Gleixner +Reported-by: Mike Galbraith +Signed-off-by: Peter Zijlstra +Cc: Manfred Spraul +Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins +Signed-off-by: Thomas Gleixner +--- + ipc/sem.c | 24 +++++++++++++++++------- + 1 file changed, 17 insertions(+), 7 deletions(-) + +Index: linux-stable/ipc/sem.c +=================================================================== +--- linux-stable.orig/ipc/sem.c ++++ linux-stable/ipc/sem.c +@@ -155,7 +155,7 @@ static int sysvipc_sem_proc_show(struct + * sem_array.sem_pending{,last}, + * sem_array.sem_undo: sem_lock() for read/write + * sem_undo.proc_next: only "current" is allowed to read/write that field. +- * ++ * + */ + + #define sc_semmsl sem_ctls[0] +@@ -498,7 +498,7 @@ static int try_atomic_semop (struct sem_ + curr = sma->sem_base + sop->sem_num; + sem_op = sop->sem_op; + result = curr->semval; +- ++ + if (!sem_op && result) + goto would_block; + +@@ -525,7 +525,7 @@ static int try_atomic_semop (struct sem_ + un->semadj[sop->sem_num] -= sop->sem_op; + sop--; + } +- ++ + return 0; + + out_of_range: +@@ -557,6 +557,13 @@ undo: + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -568,6 +575,7 @@ static void wake_up_sem_queue_prepare(st + q->pid = error; + + list_add_tail(&q->list, pt); ++#endif + } + + /** +@@ -581,6 +589,7 @@ static void wake_up_sem_queue_prepare(st + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -593,6 +602,7 @@ static void wake_up_sem_queue_do(struct + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +@@ -947,7 +957,7 @@ static int semctl_nolock(struct ipc_name + err = security_sem_semctl(NULL, cmd); + if (err) + return err; +- ++ + memset(&seminfo,0,sizeof(seminfo)); + seminfo.semmni = ns->sc_semmni; + seminfo.semmns = ns->sc_semmns; +@@ -967,7 +977,7 @@ static int semctl_nolock(struct ipc_name + } + max_id = ipc_get_maxid(&sem_ids(ns)); + up_read(&sem_ids(ns).rw_mutex); +- if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) ++ if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) + return -EFAULT; + return (max_id < 0) ? 0: max_id; + } +@@ -1642,7 +1652,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, + /* We need to sleep on this operation, so we put the current + * task into the pending queue and go to sleep. + */ +- ++ + queue.sops = sops; + queue.nsops = nsops; + queue.undo = un; +@@ -1765,7 +1775,7 @@ int copy_semundo(unsigned long clone_fla + return error; + atomic_inc(&undo_list->refcnt); + tsk->sysvsem.undo_list = undo_list; +- } else ++ } else + tsk->sysvsem.undo_list = NULL; + + return 0; diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch new file mode 100644 index 000000000..cc90f6be4 --- /dev/null +++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -0,0 +1,155 @@ +Subject: genirq: Allow disabling of softirq processing in irq thread context +From: Thomas Gleixner +Date: Tue, 31 Jan 2012 13:01:27 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The processing of softirqs in irq thread context is a performance gain +for the non-rt workloads of a system, but it's counterproductive for +interrupts which are explicitely related to the realtime +workload. Allow such interrupts to prevent softirq processing in their +thread context. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/interrupt.h | 2 ++ + include/linux/irq.h | 5 ++++- + kernel/irq/manage.c | 13 ++++++++++++- + kernel/irq/settings.h | 12 ++++++++++++ + kernel/softirq.c | 7 +++++++ + 5 files changed, 37 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -58,6 +58,7 @@ + * IRQF_NO_THREAD - Interrupt cannot be threaded + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device + * resume time. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_DISABLED 0x00000020 + #define IRQF_SHARED 0x00000080 +@@ -71,6 +72,7 @@ + #define IRQF_FORCE_RESUME 0x00008000 + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00040000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +Index: linux-stable/include/linux/irq.h +=================================================================== +--- linux-stable.orig/include/linux/irq.h ++++ linux-stable/include/linux/irq.h +@@ -70,6 +70,7 @@ typedef void (*irq_preflow_handler_t)(st + * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context + * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + */ + enum { + IRQ_TYPE_NONE = 0x00000000, +@@ -94,12 +95,14 @@ enum { + IRQ_NESTED_THREAD = (1 << 15), + IRQ_NOTHREAD = (1 << 16), + IRQ_PER_CPU_DEVID = (1 << 17), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 18), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ +- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) ++ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ ++ IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +Index: linux-stable/kernel/irq/manage.c +=================================================================== +--- linux-stable.orig/kernel/irq/manage.c ++++ linux-stable/kernel/irq/manage.c +@@ -783,7 +783,15 @@ irq_forced_thread_fn(struct irq_desc *de + local_bh_disable(); + ret = action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action); +- local_bh_enable(); ++ /* ++ * Interrupts which have real time requirements can be set up ++ * to avoid softirq processing in the thread handler. This is ++ * safe as these interrupts do not raise soft interrupts. ++ */ ++ if (irq_settings_no_softirq_call(desc)) ++ _local_bh_enable(); ++ else ++ local_bh_enable(); + return ret; + } + +@@ -1128,6 +1136,9 @@ __setup_irq(unsigned int irq, struct irq + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + } + ++ if (new->flags & IRQF_NO_SOFTIRQ_CALL) ++ irq_settings_set_no_softirq_call(desc); ++ + /* Set default affinity mask once everything is setup */ + setup_affinity(irq, desc, mask); + +Index: linux-stable/kernel/irq/settings.h +=================================================================== +--- linux-stable.orig/kernel/irq/settings.h ++++ linux-stable/kernel/irq/settings.h +@@ -14,6 +14,7 @@ enum { + _IRQ_NO_BALANCING = IRQ_NO_BALANCING, + _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, ++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, + _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, + }; + +@@ -26,6 +27,7 @@ enum { + #define IRQ_NOAUTOEN GOT_YOU_MORON + #define IRQ_NESTED_THREAD GOT_YOU_MORON + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON ++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON + #undef IRQF_MODIFY_MASK + #define IRQF_MODIFY_MASK GOT_YOU_MORON + +@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); + } + ++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) ++{ ++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; ++} ++ ++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) ++{ ++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; ++} ++ + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) + { + return desc->status_use_accessors & _IRQ_PER_CPU; +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -425,6 +425,13 @@ void local_bh_enable_ip(unsigned long ip + } + EXPORT_SYMBOL(local_bh_enable_ip); + ++void _local_bh_enable(void) ++{ ++ current->softirq_nestcnt--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL(_local_bh_enable); ++ + /* For tracing */ + int notrace __in_softirq(void) + { diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch new file mode 100644 index 000000000..6173bb6db --- /dev/null +++ b/debian/patches/features/all/rt/jump-label-rt.patch @@ -0,0 +1,24 @@ +Subject: jump-label-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Jul 2011 11:03:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/jump_label.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/jump_label.h +=================================================================== +--- linux-stable.orig/include/linux/jump_label.h ++++ linux-stable/include/linux/jump_label.h +@@ -50,7 +50,8 @@ + #include + #include + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ ++ !defined(CONFIG_PREEMPT_BASE) + + struct static_key { + atomic_t enabled; diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch new file mode 100644 index 000000000..f72f081e1 --- /dev/null +++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch @@ -0,0 +1,51 @@ +Subject: kconfig-disable-a-few-options-rt.patch +From: Thomas Gleixner +Date: Sun, 24 Jul 2011 12:11:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Disable stuff which is known to have issues on RT + +Signed-off-by: Thomas Gleixner +--- + arch/Kconfig | 1 + + drivers/net/Kconfig | 1 + + mm/Kconfig | 2 +- + 3 files changed, 3 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/Kconfig +=================================================================== +--- linux-stable.orig/arch/Kconfig ++++ linux-stable/arch/Kconfig +@@ -6,6 +6,7 @@ config OPROFILE + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +Index: linux-stable/drivers/net/Kconfig +=================================================================== +--- linux-stable.orig/drivers/net/Kconfig ++++ linux-stable/drivers/net/Kconfig +@@ -164,6 +164,7 @@ config VXLAN + + config NETCONSOLE + tristate "Network console logging support" ++ depends on !PREEMPT_RT_FULL + ---help--- + If you want to log kernel messages over the network, enable this. + See for details. +Index: linux-stable/mm/Kconfig +=================================================================== +--- linux-stable.orig/mm/Kconfig ++++ linux-stable/mm/Kconfig +@@ -384,7 +384,7 @@ config NOMMU_INITIAL_TRIM_EXCESS + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch new file mode 100644 index 000000000..9192bde72 --- /dev/null +++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch @@ -0,0 +1,63 @@ +Subject: kconfig-preempt-rt-full.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 14:58:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + init/Makefile | 2 +- + kernel/Kconfig.preempt | 8 ++++++++ + scripts/mkcompile_h | 4 +++- + 3 files changed, 12 insertions(+), 2 deletions(-) + +Index: linux-stable/init/Makefile +=================================================================== +--- linux-stable.orig/init/Makefile ++++ linux-stable/init/Makefile +@@ -33,4 +33,4 @@ silent_chk_compile.h = : + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +Index: linux-stable/kernel/Kconfig.preempt +=================================================================== +--- linux-stable.orig/kernel/Kconfig.preempt ++++ linux-stable/kernel/Kconfig.preempt +@@ -73,6 +73,14 @@ config PREEMPT_RTB + enables changes which are preliminary for the full preemptiple + RT kernel. + ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ select PREEMPT_RCU ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +Index: linux-stable/scripts/mkcompile_h +=================================================================== +--- linux-stable.orig/scripts/mkcompile_h ++++ linux-stable/scripts/mkcompile_h +@@ -4,7 +4,8 @@ TARGET=$1 + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length diff --git a/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch new file mode 100644 index 000000000..d9c607762 --- /dev/null +++ b/debian/patches/features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch @@ -0,0 +1,91 @@ +From 24136a819693ae36039d6b4286bf1f775e062bcc Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 7 Jun 2013 22:37:06 +0200 +Subject: [PATCH] kernel/cpu: fix cpu down problem if kthread's cpu is + going down +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If kthread is pinned to CPUx and CPUx is going down then we get into +trouble: +- first the unplug thread is created +- it will set itself to hp->unplug. As a result, every task that is + going to take a lock, has to leave the CPU. +- the CPU_DOWN_PREPARE notifier are started. The worker thread will + start a new process for the "high priority worker". + Now kthread would like to take a lock but since it can't leave the CPU + it will never complete its task. + +We could fire the unplug thread after the notifier but then the cpu is +no longer marked "online" and the unplug thread will run on CPU0 which +was fixed before :) + +So instead the unplug thread is started and kept waiting until the +notfier complete their work. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 16 ++++++++++++++-- + 1 file changed, 14 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -83,6 +83,7 @@ struct hotplug_pcp { + int refcount; + int grab_lock; + struct completion synced; ++ struct completion unplug_wait; + #ifdef CONFIG_PREEMPT_RT_FULL + spinlock_t lock; + #else +@@ -178,6 +179,7 @@ static int sync_unplug_thread(void *data + { + struct hotplug_pcp *hp = data; + ++ wait_for_completion(&hp->unplug_wait); + preempt_disable(); + hp->unplug = current; + wait_for_pinned_cpus(hp); +@@ -243,6 +245,14 @@ static void __cpu_unplug_sync(struct hot + wait_for_completion(&hp->synced); + } + ++static void __cpu_unplug_wait(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ complete(&hp->unplug_wait); ++ wait_for_completion(&hp->synced); ++} ++ + /* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. +@@ -266,6 +276,7 @@ static int cpu_unplug_begin(unsigned int + tell_sched_cpu_down_begin(cpu); + + init_completion(&hp->synced); ++ init_completion(&hp->unplug_wait); + + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(hp->sync_tsk)) { +@@ -281,8 +292,7 @@ static int cpu_unplug_begin(unsigned int + * wait for tasks that are going to enter these sections and + * we must not have them block. + */ +- __cpu_unplug_sync(hp); +- ++ wake_up_process(hp->sync_tsk); + return 0; + } + +@@ -594,6 +604,8 @@ static int __ref _cpu_down(unsigned int + __func__, cpu); + goto out_release; + } ++ ++ __cpu_unplug_wait(cpu); + smpboot_park_threads(cpu); + + /* Notifiers are done. Don't let any more tasks pin this CPU. */ diff --git a/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch new file mode 100644 index 000000000..86849fb65 --- /dev/null +++ b/debian/patches/features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch @@ -0,0 +1,62 @@ +From 4c6df3d78817c20a147c0291f6600d002c0910d3 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Fri, 14 Jun 2013 17:16:35 +0200 +Subject: [PATCH] kernel/hotplug: restore original cpu mask oncpu/down +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If a task which is allowed to run only on CPU X puts CPU Y down then it +will be allowed on all CPUs but the on CPU Y after it comes back from +kernel. This patch ensures that we don't lose the initial setting unless +the CPU the task is running is going down. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/cpu.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -568,6 +568,7 @@ static int __ref _cpu_down(unsigned int + .hcpu = hcpu, + }; + cpumask_var_t cpumask; ++ cpumask_var_t cpumask_org; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -578,6 +579,12 @@ static int __ref _cpu_down(unsigned int + /* Move the downtaker off the unplug cpu */ + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; ++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { ++ free_cpumask_var(cpumask); ++ return -ENOMEM; ++ } ++ ++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); +@@ -586,7 +593,8 @@ static int __ref _cpu_down(unsigned int + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); + migrate_enable(); +- return -EBUSY; ++ err = -EBUSY; ++ goto restore_cpus; + } + + cpu_hotplug_begin(); +@@ -645,6 +653,9 @@ out_cancel: + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); ++restore_cpus: ++ set_cpus_allowed_ptr(current, cpumask_org); ++ free_cpumask_var(cpumask_org); + return err; + } + diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch new file mode 100644 index 000000000..ae221f4be --- /dev/null +++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch @@ -0,0 +1,109 @@ +From: Jason Wessel +Date: Thu, 28 Jul 2011 12:42:23 -0500 +Subject: kgdb/serial: Short term workaround +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On 07/27/2011 04:37 PM, Thomas Gleixner wrote: +> - KGDB (not yet disabled) is reportedly unusable on -rt right now due +> to missing hacks in the console locking which I dropped on purpose. +> + +To work around this in the short term you can use this patch, in +addition to the clocksource watchdog patch that Thomas brewed up. + +Comments are welcome of course. Ultimately the right solution is to +change separation between the console and the HW to have a polled mode ++ work queue so as not to introduce any kind of latency. + +Thanks, +Jason. + +--- + drivers/tty/serial/8250/8250_core.c | 3 ++- + include/linux/kdb.h | 3 ++- + kernel/debug/kdb/kdb_io.c | 6 ++---- + 3 files changed, 6 insertions(+), 6 deletions(-) + +Index: linux-stable/drivers/tty/serial/8250/8250_core.c +=================================================================== +--- linux-stable.orig/drivers/tty/serial/8250/8250_core.c ++++ linux-stable/drivers/tty/serial/8250/8250_core.c +@@ -38,6 +38,7 @@ + #include + #include + #include ++#include + #ifdef CONFIG_SPARC + #include + #endif +@@ -2869,7 +2870,7 @@ serial8250_console_write(struct console + + touch_nmi_watchdog(); + +- if (port->sysrq || oops_in_progress) ++ if (port->sysrq || oops_in_progress || in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +Index: linux-stable/include/linux/kdb.h +=================================================================== +--- linux-stable.orig/include/linux/kdb.h ++++ linux-stable/include/linux/kdb.h +@@ -115,7 +115,7 @@ extern int kdb_trap_printk; + extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +- ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -150,6 +150,7 @@ extern int kdb_register_repeat(char *, k + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +Index: linux-stable/kernel/debug/kdb/kdb_io.c +=================================================================== +--- linux-stable.orig/kernel/debug/kdb/kdb_io.c ++++ linux-stable/kernel/debug/kdb/kdb_io.c +@@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list + int linecount; + int colcount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -565,8 +564,6 @@ int vkdb_printf(const char *fmt, va_list + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -833,7 +830,6 @@ kdb_print_out: + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -843,9 +839,11 @@ int kdb_printf(const char *fmt, ...) + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch new file mode 100644 index 000000000..8167900de --- /dev/null +++ b/debian/patches/features/all/rt/latency-hist.patch @@ -0,0 +1,1826 @@ +Subject: latency-hist.patch +From: Carsten Emde +Date: Tue, 19 Jul 2011 14:03:41 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This patch provides a recording mechanism to store data of potential +sources of system latencies. The recordings separately determine the +latency caused by a delayed timer expiration, by a delayed wakeup of the +related user space program and by the sum of both. The histograms can be +enabled and reset individually. The data are accessible via the debug +filesystem. For details please consult Documentation/trace/histograms.txt. + +Signed-off-by: Carsten Emde +Signed-off-by: Thomas Gleixner + +--- + Documentation/trace/histograms.txt | 186 +++++ + include/linux/hrtimer.h | 3 + include/linux/sched.h | 6 + include/trace/events/hist.h | 69 ++ + include/trace/events/latency_hist.h | 29 + kernel/hrtimer.c | 23 + kernel/trace/Kconfig | 104 +++ + kernel/trace/Makefile | 4 + kernel/trace/latency_hist.c | 1177 ++++++++++++++++++++++++++++++++++++ + kernel/trace/trace_irqsoff.c | 11 + 10 files changed, 1612 insertions(+) + +Index: linux-stable/Documentation/trace/histograms.txt +=================================================================== +--- /dev/null ++++ linux-stable/Documentation/trace/histograms.txt +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms ++ ++ ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. ++ ++ ++* Purpose of latency histograms ++ ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 ++ ++ ++* Wakeup latency of a selected process ++ ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid ++ ++PIDs are not considered, if this variable is set to 0. ++ ++ ++* Details of the process with the highest wakeup latency so far ++ ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. ++ ++In addition, other relevant system data at the time when the ++latency occurred are given. ++ ++The format of the data is (all in one line): ++ () \ ++<- ++ ++The value of is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as . ++ ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. ++ ++Finally, the timestamp of the time when the latency occurred ++in . after the most recent system boot ++is provided. ++ ++These data are also reset when the wakeup histogram is reset. +Index: linux-stable/include/linux/hrtimer.h +=================================================================== +--- linux-stable.orig/include/linux/hrtimer.h ++++ linux-stable/include/linux/hrtimer.h +@@ -111,6 +111,9 @@ struct hrtimer { + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ ktime_t praecox; ++#endif + #ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1406,6 +1406,12 @@ struct task_struct { + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ long timer_offset; ++#endif ++#endif + #endif /* CONFIG_TRACING */ + #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ + struct memcg_batch_info { +Index: linux-stable/include/trace/events/hist.h +=================================================================== +--- /dev/null ++++ linux-stable/include/trace/events/hist.h +@@ -0,0 +1,69 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist ++ ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H ++ ++#include "latency_hist.h" ++#include ++ ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a,b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, ++ ++ TP_PROTO(int reason, int starthist), ++ ++ TP_ARGS(reason, starthist), ++ ++ TP_STRUCT__entry( ++ __field(int, reason ) ++ __field(int, starthist ) ++ ), ++ ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), ++ ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif ++ ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a,b,c,d) ++#else ++TRACE_EVENT(hrtimer_interrupt, ++ ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task), ++ ++ TP_ARGS(cpu, offset, curr, task), ++ ++ TP_STRUCT__entry( ++ __field(int, cpu ) ++ __field(long long, offset ) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio ) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio ) ++ ), ++ ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "", task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), ++ ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif ++ ++#endif /* _TRACE_HIST_H */ ++ ++/* This part must be outside protection */ ++#include +Index: linux-stable/include/trace/events/latency_hist.h +=================================================================== +--- /dev/null ++++ linux-stable/include/trace/events/latency_hist.h +@@ -0,0 +1,29 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H ++ ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; ++ ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; ++ ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return(actions[action]); ++ return("unknown"); ++} ++ ++#endif /* _LATENCY_HIST_H */ +Index: linux-stable/kernel/hrtimer.c +=================================================================== +--- linux-stable.orig/kernel/hrtimer.c ++++ linux-stable/kernel/hrtimer.c +@@ -51,6 +51,7 @@ + #include + + #include ++#include + + /* + * The timer bases: +@@ -989,6 +990,17 @@ int __hrtimer_start_range_ns(struct hrti + #endif + } + ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ { ++ ktime_t now = new_base->get_time(); ++ ++ if (ktime_to_ns(tim) < ktime_to_ns(now)) ++ timer->praecox = now; ++ else ++ timer->praecox = ktime_set(0, 0); ++ } ++#endif ++ + hrtimer_set_expires_range_ns(timer, tim, delta_ns); + + timer_stats_hrtimer_set_start_info(timer); +@@ -1267,6 +1279,8 @@ static void __run_hrtimer(struct hrtimer + + #ifdef CONFIG_HIGH_RES_TIMERS + ++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); ++ + /* + * High resolution timer interrupt + * Called with interrupts disabled +@@ -1310,6 +1324,15 @@ retry: + + timer = container_of(node, struct hrtimer, node); + ++ trace_hrtimer_interrupt(raw_smp_processor_id(), ++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? ++ timer->praecox : hrtimer_get_expires(timer), ++ basenow)), ++ current, ++ timer->function == hrtimer_wakeup ? ++ container_of(timer, struct hrtimer_sleeper, ++ timer)->task : NULL); ++ + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the +Index: linux-stable/kernel/trace/Kconfig +=================================================================== +--- linux-stable.orig/kernel/trace/Kconfig ++++ linux-stable/kernel/trace/Kconfig +@@ -192,6 +192,24 @@ config IRQSOFF_TRACER + enabled. This option and the preempt-off timing option can be + used together or separately.) + ++config INTERRUPT_OFF_HIST ++ bool "Interrupts-off Latency Histogram" ++ depends on IRQSOFF_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with interrupts disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If PREEMPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/irqsoff ++ + config PREEMPT_TRACER + bool "Preemption-off Latency Tracer" + default n +@@ -216,6 +234,24 @@ config PREEMPT_TRACER + enabled. This option and the irqs-off timing option can be + used together or separately.) + ++config PREEMPT_OFF_HIST ++ bool "Preemption-off Latency Histogram" ++ depends on PREEMPT_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with preemption disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/preemptoff ++ + config SCHED_TRACER + bool "Scheduling Latency Tracer" + select GENERIC_TRACER +@@ -226,6 +262,74 @@ config SCHED_TRACER + This tracer tracks the latency of the highest priority task + to be scheduled in, starting from the point it has woken up. + ++config WAKEUP_LATENCY_HIST ++ bool "Scheduling Latency Histogram" ++ depends on SCHED_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the scheduling latency of the highest priority task. ++ The histograms are disabled by default. To enable them, write a ++ non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup ++ ++ Two different algorithms are used, one to determine the latency of ++ processes that exclusively use the highest priority of the system and ++ another one to determine the latency of processes that share the ++ highest system priority with other processes. The former is used to ++ improve hardware and system software, the latter to optimize the ++ priority design of a given system. The histogram data will be ++ located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup ++ ++ and ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ ++config MISSED_TIMER_OFFSETS_HIST ++ depends on HIGH_RES_TIMERS ++ select GENERIC_TRACER ++ bool "Missed Timer Offsets Histogram" ++ help ++ Generate a histogram of missed timer offsets in microseconds. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets ++ ++ The histogram data will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ + config ENABLE_DEFAULT_TRACERS + bool "Trace process context switches and events" + depends on !GENERIC_TRACER +Index: linux-stable/kernel/trace/Makefile +=================================================================== +--- linux-stable.orig/kernel/trace/Makefile ++++ linux-stable/kernel/trace/Makefile +@@ -34,6 +34,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f + obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o + obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o + obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o ++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o ++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o + obj-$(CONFIG_NOP_TRACER) += trace_nop.o + obj-$(CONFIG_STACK_TRACER) += trace_stack.o + obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +Index: linux-stable/kernel/trace/latency_hist.c +=================================================================== +--- /dev/null ++++ linux-stable/kernel/trace/latency_hist.c +@@ -0,0 +1,1177 @@ ++/* ++ * kernel/trace/latency_hist.c ++ * ++ * Add support for histograms of preemption-off latency and ++ * interrupt-off latency and wakeup latency, it depends on ++ * Real-Time Preemption Support. ++ * ++ * Copyright (C) 2005 MontaVista Software, Inc. ++ * Yi Yang ++ * ++ * Converted to work with the new latency tracer. ++ * Copyright (C) 2008 Red Hat, Inc. ++ * Steven Rostedt ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "trace.h" ++#include ++ ++#define NSECS_PER_USECS 1000L ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++enum { ++ IRQSOFF_LATENCY = 0, ++ PREEMPTOFF_LATENCY, ++ PREEMPTIRQSOFF_LATENCY, ++ WAKEUP_LATENCY, ++ WAKEUP_LATENCY_SHAREDPRIO, ++ MISSED_TIMER_OFFSETS, ++ TIMERANDWAKEUP_LATENCY, ++ MAX_LATENCY_TYPE, ++}; ++ ++#define MAX_ENTRY_NUM 10240 ++ ++struct hist_data { ++ atomic_t hist_mode; /* 0 log, 1 don't log */ ++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ ++ long min_lat; ++ long max_lat; ++ unsigned long long below_hist_bound_samples; ++ unsigned long long above_hist_bound_samples; ++ long long accumulate_lat; ++ unsigned long long total_samples; ++ unsigned long long hist_array[MAX_ENTRY_NUM]; ++}; ++ ++struct enable_data { ++ int latency_type; ++ int enabled; ++}; ++ ++static char *latency_hist_dir_root = "latency_hist"; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); ++static char *irqsoff_hist_dir = "irqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); ++static DEFINE_PER_CPU(int, hist_irqsoff_counting); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); ++static char *preemptoff_hist_dir = "preemptoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); ++static DEFINE_PER_CPU(int, hist_preemptoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); ++static char *preemptirqsoff_hist_dir = "preemptirqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); ++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); ++static struct enable_data preemptirqsoff_enabled_data = { ++ .latency_type = PREEMPTIRQSOFF_LATENCY, ++ .enabled = 0, ++}; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++struct maxlatproc_data { ++ char comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ int pid; ++ int current_pid; ++ int prio; ++ int current_prio; ++ long latency; ++ long timeroffset; ++ cycle_t timestamp; ++}; ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); ++static char *wakeup_latency_hist_dir = "wakeup"; ++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success); ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next); ++static notrace void probe_sched_migrate_task(void *, ++ struct task_struct *task, int cpu); ++static struct enable_data wakeup_latency_enabled_data = { ++ .latency_type = WAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); ++static DEFINE_PER_CPU(struct task_struct *, wakeup_task); ++static DEFINE_PER_CPU(int, wakeup_sharedprio); ++static unsigned long wakeup_pid; ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); ++static char *missed_timer_offsets_dir = "missed_timer_offsets"; ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long offset, struct task_struct *curr, struct task_struct *task); ++static struct enable_data missed_timer_offsets_enabled_data = { ++ .latency_type = MISSED_TIMER_OFFSETS, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); ++static unsigned long missed_timer_offsets_pid; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); ++static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; ++static struct enable_data timerandwakeup_enabled_data = { ++ .latency_type = TIMERANDWAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); ++#endif ++ ++void notrace latency_hist(int latency_type, int cpu, long latency, ++ long timeroffset, cycle_t stop, ++ struct task_struct *p) ++{ ++ struct hist_data *my_hist; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 || ++ latency_type >= MAX_LATENCY_TYPE) ++ return; ++ ++ switch (latency_type) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ my_hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ my_hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ my_hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ my_hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ my_hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ ++ default: ++ return; ++ } ++ ++ latency += my_hist->offset; ++ ++ if (atomic_read(&my_hist->hist_mode) == 0) ++ return; ++ ++ if (latency < 0 || latency >= MAX_ENTRY_NUM) { ++ if (latency < 0) ++ my_hist->below_hist_bound_samples++; ++ else ++ my_hist->above_hist_bound_samples++; ++ } else ++ my_hist->hist_array[latency]++; ++ ++ if (unlikely(latency > my_hist->max_lat || ++ my_hist->min_lat == LONG_MAX)) { ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) { ++ strncpy(mp->comm, p->comm, sizeof(mp->comm)); ++ strncpy(mp->current_comm, current->comm, ++ sizeof(mp->current_comm)); ++ mp->pid = task_pid_nr(p); ++ mp->current_pid = task_pid_nr(current); ++ mp->prio = p->prio; ++ mp->current_prio = current->prio; ++ mp->latency = latency; ++ mp->timeroffset = timeroffset; ++ mp->timestamp = stop; ++ } ++#endif ++ my_hist->max_lat = latency; ++ } ++ if (unlikely(latency < my_hist->min_lat)) ++ my_hist->min_lat = latency; ++ my_hist->total_samples++; ++ my_hist->accumulate_lat += latency; ++} ++ ++static void *l_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t *index_ptr = NULL; ++ loff_t index = *pos; ++ struct hist_data *my_hist = m->private; ++ ++ if (index == 0) { ++ char minstr[32], avgstr[32], maxstr[32]; ++ ++ atomic_dec(&my_hist->hist_mode); ++ ++ if (likely(my_hist->total_samples)) { ++ long avg = (long) div64_s64(my_hist->accumulate_lat, ++ my_hist->total_samples); ++ snprintf(minstr, sizeof(minstr), "%ld", ++ my_hist->min_lat - my_hist->offset); ++ snprintf(avgstr, sizeof(avgstr), "%ld", ++ avg - my_hist->offset); ++ snprintf(maxstr, sizeof(maxstr), "%ld", ++ my_hist->max_lat - my_hist->offset); ++ } else { ++ strcpy(minstr, ""); ++ strcpy(avgstr, minstr); ++ strcpy(maxstr, minstr); ++ } ++ ++ seq_printf(m, "#Minimum latency: %s microseconds\n" ++ "#Average latency: %s microseconds\n" ++ "#Maximum latency: %s microseconds\n" ++ "#Total samples: %llu\n" ++ "#There are %llu samples lower than %ld" ++ " microseconds.\n" ++ "#There are %llu samples greater or equal" ++ " than %ld microseconds.\n" ++ "#usecs\t%16s\n", ++ minstr, avgstr, maxstr, ++ my_hist->total_samples, ++ my_hist->below_hist_bound_samples, ++ -my_hist->offset, ++ my_hist->above_hist_bound_samples, ++ MAX_ENTRY_NUM - my_hist->offset, ++ "samples"); ++ } ++ if (index < MAX_ENTRY_NUM) { ++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); ++ if (index_ptr) ++ *index_ptr = index; ++ } ++ ++ return index_ptr; ++} ++ ++static void *l_next(struct seq_file *m, void *p, loff_t *pos) ++{ ++ loff_t *index_ptr = p; ++ struct hist_data *my_hist = m->private; ++ ++ if (++*pos >= MAX_ENTRY_NUM) { ++ atomic_inc(&my_hist->hist_mode); ++ return NULL; ++ } ++ *index_ptr = *pos; ++ return index_ptr; ++} ++ ++static void l_stop(struct seq_file *m, void *p) ++{ ++ kfree(p); ++} ++ ++static int l_show(struct seq_file *m, void *p) ++{ ++ int index = *(loff_t *) p; ++ struct hist_data *my_hist = m->private; ++ ++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, ++ my_hist->hist_array[index]); ++ return 0; ++} ++ ++static struct seq_operations latency_hist_seq_op = { ++ .start = l_start, ++ .next = l_next, ++ .stop = l_stop, ++ .show = l_show ++}; ++ ++static int latency_hist_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ ret = seq_open(file, &latency_hist_seq_op); ++ if (!ret) { ++ struct seq_file *seq = file->private_data; ++ seq->private = inode->i_private; ++ } ++ return ret; ++} ++ ++static struct file_operations latency_hist_fops = { ++ .open = latency_hist_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static void clear_maxlatprocdata(struct maxlatproc_data *mp) ++{ ++ mp->comm[0] = mp->current_comm[0] = '\0'; ++ mp->prio = mp->current_prio = mp->pid = mp->current_pid = ++ mp->latency = mp->timeroffset = -1; ++ mp->timestamp = 0; ++} ++#endif ++ ++static void hist_reset(struct hist_data *hist) ++{ ++ atomic_dec(&hist->hist_mode); ++ ++ memset(hist->hist_array, 0, sizeof(hist->hist_array)); ++ hist->below_hist_bound_samples = 0ULL; ++ hist->above_hist_bound_samples = 0ULL; ++ hist->min_lat = LONG_MAX; ++ hist->max_lat = LONG_MIN; ++ hist->total_samples = 0ULL; ++ hist->accumulate_lat = 0LL; ++ ++ atomic_inc(&hist->hist_mode); ++} ++ ++static ssize_t ++latency_hist_reset(struct file *file, const char __user *a, ++ size_t size, loff_t *off) ++{ ++ int cpu; ++ struct hist_data *hist = NULL; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ off_t latency_type = (off_t) file->private_data; ++ ++ for_each_online_cpu(cpu) { ++ ++ switch (latency_type) { ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ } ++ ++ hist_reset(hist); ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) ++ clear_maxlatprocdata(mp); ++#endif ++ } ++ ++ return size; ++} ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ int r; ++ unsigned long *this_pid = file->private_data; ++ ++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t do_pid(struct file *file, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ unsigned long pid; ++ unsigned long *this_pid = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = '\0'; ++ ++ if (strict_strtoul(buf, 10, &pid)) ++ return(-EINVAL); ++ ++ *this_pid = pid; ++ ++ return cnt; ++} ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ int r; ++ struct maxlatproc_data *mp = file->private_data; ++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); ++ unsigned long long t; ++ unsigned long usecs, secs; ++ char *buf; ++ ++ if (mp->pid == -1 || mp->current_pid == -1) { ++ buf = "(none)\n"; ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ++ strlen(buf)); ++ } ++ ++ buf = kmalloc(strmaxlen, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ t = ns2usecs(mp->timestamp); ++ usecs = do_div(t, USEC_PER_SEC); ++ secs = (unsigned long) t; ++ r = snprintf(buf, strmaxlen, ++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, ++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, ++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, ++ secs, usecs); ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ kfree(buf); ++ return r; ++} ++#endif ++ ++static ssize_t ++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ struct enable_data *ed = file->private_data; ++ int r; ++ ++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t ++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ long enable; ++ struct enable_data *ed = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (strict_strtol(buf, 10, &enable)) ++ return(-EINVAL); ++ ++ if ((enable && ed->enabled) || (!enable && !ed->enabled)) ++ return cnt; ++ ++ if (enable) { ++ int ret; ++ ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ ret = register_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_preemptirqsoff_hist " ++ "to trace_preemptirqsoff_hist\n"); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ ret = register_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup\n"); ++ return ret; ++ } ++ ret = register_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup_new\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_stop " ++ "to trace_sched_switch\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_sched_migrate_task " ++ "to trace_sched_migrate_task\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ ret = register_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_hrtimer_interrupt " ++ "to trace_hrtimer_interrupt\n"); ++ return ret; ++ } ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ if (!wakeup_latency_enabled_data.enabled || ++ !missed_timer_offsets_enabled_data.enabled) ++ return -EINVAL; ++ break; ++#endif ++ default: ++ break; ++ } ++ } else { ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ for_each_online_cpu(cpu) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ per_cpu(hist_irqsoff_counting, ++ cpu) = 0; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ per_cpu(hist_preemptoff_counting, ++ cpu) = 0; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ per_cpu(hist_preemptirqsoff_counting, ++ cpu) = 0; ++#endif ++ } ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ unregister_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(wakeup_task, cpu) = NULL; ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } ++ } ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ unregister_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++ default: ++ break; ++ } ++ } ++ ed->enabled = enable; ++ return cnt; ++} ++ ++static const struct file_operations latency_hist_reset_fops = { ++ .open = tracing_open_generic, ++ .write = latency_hist_reset, ++}; ++ ++static const struct file_operations enable_fops = { ++ .open = tracing_open_generic, ++ .read = show_enable, ++ .write = do_enable, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static const struct file_operations pid_fops = { ++ .open = tracing_open_generic, ++ .read = show_pid, ++ .write = do_pid, ++}; ++ ++static const struct file_operations maxlatproc_fops = { ++ .open = tracing_open_generic, ++ .read = show_maxlatproc, ++}; ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, ++ int starthist) ++{ ++ int cpu = raw_smp_processor_id(); ++ int time_set = 0; ++ ++ if (starthist) { ++ cycle_t uninitialized_var(start); ++ ++ if (!preempt_count() && !irqs_disabled()) ++ return; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_OFF || reason == TRACE_START) && ++ !per_cpu(hist_irqsoff_counting, cpu)) { ++ per_cpu(hist_irqsoff_counting, cpu) = 1; ++ start = ftrace_now(cpu); ++ time_set++; ++ per_cpu(hist_irqsoff_start, cpu) = start; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_OFF || reason == TRACE_START) && ++ !per_cpu(hist_preemptoff_counting, cpu)) { ++ per_cpu(hist_preemptoff_counting, cpu) = 1; ++ if (!(time_set++)) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptoff_start, cpu) = start; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if (per_cpu(hist_irqsoff_counting, cpu) && ++ per_cpu(hist_preemptoff_counting, cpu) && ++ !per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; ++ if (!time_set) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptirqsoff_start, cpu) = start; ++ } ++#endif ++ } else { ++ cycle_t uninitialized_var(stop); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_ON || reason == TRACE_STOP) && ++ per_cpu(hist_irqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_irqsoff_start, cpu); ++ ++ stop = ftrace_now(cpu); ++ time_set++; ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, ++ stop, NULL); ++ } ++ per_cpu(hist_irqsoff_counting, cpu) = 0; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && ++ per_cpu(hist_preemptoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptoff_start, cpu); ++ ++ if (!(time_set++)) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, ++ 0, stop, NULL); ++ } ++ per_cpu(hist_preemptoff_counting, cpu) = 0; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if ((!per_cpu(hist_irqsoff_counting, cpu) || ++ !per_cpu(hist_preemptoff_counting, cpu)) && ++ per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); ++ ++ if (!time_set) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, ++ latency, 0, stop, NULL); ++ } ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; ++ } ++#endif ++ } ++} ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_RAW_SPINLOCK(wakeup_lock); ++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, ++ int cpu) ++{ ++ int old_cpu = task_cpu(task); ++ ++ if (cpu != old_cpu) { ++ unsigned long flags; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); ++ if (task == cpu_wakeup_task) { ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, old_cpu) = NULL; ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; ++ get_task_struct(cpu_wakeup_task); ++ } ++ ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++ } ++} ++ ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p, int success) ++{ ++ unsigned long flags; ++ struct task_struct *curr = current; ++ int cpu = task_cpu(p); ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (wakeup_pid) { ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ if (likely(wakeup_pid != task_pid_nr(p))) ++ goto out; ++ } else { ++ if (likely(!rt_task(p)) || ++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || ++ p->prio > curr->prio) ++ goto out; ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ } ++ ++ if (cpu_wakeup_task) ++ put_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; ++ get_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task->preempt_timestamp_hist = ++ ftrace_now(raw_smp_processor_id()); ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++ ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ struct task_struct *prev, struct task_struct *next) ++{ ++ unsigned long flags; ++ int cpu = task_cpu(next); ++ long latency; ++ cycle_t stop; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (cpu_wakeup_task == NULL) ++ goto out; ++ ++ /* Already running? */ ++ if (unlikely(current == cpu_wakeup_task)) ++ goto out_reset; ++ ++ if (next != cpu_wakeup_task) { ++ if (next->prio < cpu_wakeup_task->prio) ++ goto out_reset; ++ ++ if (next->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ goto out; ++ } ++ ++ if (current->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ /* ++ * The task we are waiting for is about to be switched to. ++ * Calculate latency and store it in histogram. ++ */ ++ stop = ftrace_now(raw_smp_processor_id()); ++ ++ latency = ((long) (stop - next->preempt_timestamp_hist)) / ++ NSECS_PER_USECS; ++ ++ if (per_cpu(wakeup_sharedprio, cpu)) { ++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, ++ next); ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } else { ++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ if (timerandwakeup_enabled_data.enabled) { ++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, ++ next->timer_offset + latency, next->timer_offset, ++ stop, next); ++ } ++#endif ++ } ++ ++out_reset: ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ next->timer_offset = 0; ++#endif ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, cpu) = NULL; ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long latency_ns, struct task_struct *curr, struct task_struct *task) ++{ ++ if (latency_ns <= 0 && task != NULL && rt_task(task) && ++ (task->prio < curr->prio || ++ (task->prio == curr->prio && ++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { ++ long latency; ++ cycle_t now; ++ ++ if (missed_timer_offsets_pid) { ++ if (likely(missed_timer_offsets_pid != ++ task_pid_nr(task))) ++ return; ++ } ++ ++ now = ftrace_now(cpu); ++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); ++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, ++ task); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ task->timer_offset = latency; ++#endif ++ } ++} ++#endif ++ ++static __init int latency_hist_init(void) ++{ ++ struct dentry *latency_hist_root = NULL; ++ struct dentry *dentry; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ struct dentry *dentry_sharedprio; ++#endif ++ struct dentry *entry; ++ struct dentry *enable_root; ++ int i = 0; ++ struct hist_data *my_hist; ++ char name[64]; ++ char *cpufmt = "CPU%d"; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ char *cpufmt_maxlatproc = "max_latency-CPU%d"; ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ dentry = tracing_init_dentry(); ++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); ++ enable_root = debugfs_create_dir("enable", latency_hist_root); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(irqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(irqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ dentry = debugfs_create_dir(preemptoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptirqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ entry = debugfs_create_file("preemptirqsoff", 0644, ++ enable_root, (void *)&preemptirqsoff_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ dentry = debugfs_create_dir(wakeup_latency_hist_dir, ++ latency_hist_root); ++ dentry_sharedprio = debugfs_create_dir( ++ wakeup_latency_hist_dir_sharedprio, dentry); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(wakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, ++ &per_cpu(wakeup_latency_hist_sharedprio, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ ++ mp = &per_cpu(wakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&wakeup_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, ++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); ++ entry = debugfs_create_file("wakeup", 0644, ++ enable_root, (void *)&wakeup_latency_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ dentry = debugfs_create_dir(missed_timer_offsets_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); ++ my_hist = &per_cpu(missed_timer_offsets, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&missed_timer_offsets_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); ++ entry = debugfs_create_file("missed_timer_offsets", 0644, ++ enable_root, (void *)&missed_timer_offsets_enabled_data, ++ &enable_fops); ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(timerandwakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(timerandwakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(timerandwakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("timerandwakeup", 0644, ++ enable_root, (void *)&timerandwakeup_enabled_data, ++ &enable_fops); ++#endif ++ return 0; ++} ++ ++__initcall(latency_hist_init); +Index: linux-stable/kernel/trace/trace_irqsoff.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace_irqsoff.c ++++ linux-stable/kernel/trace/trace_irqsoff.c +@@ -17,6 +17,7 @@ + #include + + #include "trace.h" ++#include + + static struct trace_array *irqsoff_trace __read_mostly; + static int tracer_enabled __read_mostly; +@@ -439,11 +440,13 @@ void start_critical_timings(void) + { + if (preempt_trace() || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(TRACE_START, 1); + } + EXPORT_SYMBOL_GPL(start_critical_timings); + + void stop_critical_timings(void) + { ++ trace_preemptirqsoff_hist(TRACE_STOP, 0); + if (preempt_trace() || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -453,6 +456,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) + #ifdef CONFIG_PROVE_LOCKING + void time_hardirqs_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(a0, a1); + } +@@ -461,6 +465,7 @@ void time_hardirqs_off(unsigned long a0, + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(a0, a1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + + #else /* !CONFIG_PROVE_LOCKING */ +@@ -486,6 +491,7 @@ inline void print_irqtrace_events(struct + */ + void trace_hardirqs_on(void) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -495,11 +501,13 @@ void trace_hardirqs_off(void) + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off); + + void trace_hardirqs_on_caller(unsigned long caller_addr) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, caller_addr); + } +@@ -509,6 +517,7 @@ void trace_hardirqs_off_caller(unsigned + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, caller_addr); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off_caller); + +@@ -518,12 +527,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) + #ifdef CONFIG_PREEMPT_TRACER + void trace_preempt_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 0); + if (preempt_trace() && !irq_trace()) + stop_critical_timing(a0, a1); + } + + void trace_preempt_off(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 1); + if (preempt_trace() && !irq_trace()) + start_critical_timing(a0, a1); + } diff --git a/debian/patches/features/all/rt/lglocks-rt.patch b/debian/patches/features/all/rt/lglocks-rt.patch new file mode 100644 index 000000000..05f07bf5a --- /dev/null +++ b/debian/patches/features/all/rt/lglocks-rt.patch @@ -0,0 +1,178 @@ +Subject: lglocks-rt.patch +From: Thomas Gleixner +Date: Wed, 15 Jun 2011 11:02:21 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/lglock.h | 19 +++++++++++++++-- + kernel/lglock.c | 54 ++++++++++++++++++++++++++++++++----------------- + 2 files changed, 53 insertions(+), 20 deletions(-) + +Index: linux-stable/include/linux/lglock.h +=================================================================== +--- linux-stable.orig/include/linux/lglock.h ++++ linux-stable/include/linux/lglock.h +@@ -42,22 +42,37 @@ + #endif + + struct lglock { ++#ifndef CONFIG_PREEMPT_RT_FULL + arch_spinlock_t __percpu *lock; ++#else ++ struct rt_mutex __percpu *lock; ++#endif + #ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lock_key; + struct lockdep_map lock_dep_map; + #endif + }; + +-#define DEFINE_LGLOCK(name) \ ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define DEFINE_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + struct lglock name = { .lock = &name ## _lock } + +-#define DEFINE_STATIC_LGLOCK(name) \ ++# define DEFINE_STATIC_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + static struct lglock name = { .lock = &name ## _lock } ++#else ++ ++# define DEFINE_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ ++ struct lglock name = { .lock = &name ## _lock } ++ ++# define DEFINE_STATIC_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ ++ static struct lglock name = { .lock = &name ## _lock } ++#endif + + void lg_lock_init(struct lglock *lg, char *name); + void lg_local_lock(struct lglock *lg); +Index: linux-stable/kernel/lglock.c +=================================================================== +--- linux-stable.orig/kernel/lglock.c ++++ linux-stable/kernel/lglock.c +@@ -4,6 +4,15 @@ + #include + #include + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define lg_lock_ptr arch_spinlock_t ++# define lg_do_lock(l) arch_spin_lock(l) ++# define lg_do_unlock(l) arch_spin_unlock(l) ++#else ++# define lg_lock_ptr struct rt_mutex ++# define lg_do_lock(l) __rt_spin_lock(l) ++# define lg_do_unlock(l) __rt_spin_unlock(l) ++#endif + /* + * Note there is no uninit, so lglocks cannot be defined in + * modules (but it's fine to use them from there) +@@ -12,51 +21,60 @@ + + void lg_lock_init(struct lglock *lg, char *name) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int i; ++ ++ for_each_possible_cpu(i) { ++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); ++ ++ rt_mutex_init(lock); ++ } ++#endif + LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); + } + EXPORT_SYMBOL(lg_lock_init); + + void lg_local_lock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ migrate_disable(); + rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock); + + void lg_local_unlock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ migrate_enable(); + } + EXPORT_SYMBOL(lg_local_unlock); + + void lg_local_lock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ preempt_disable_nort(); + rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock_cpu); + + void lg_local_unlock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_local_unlock_cpu); + +@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg) + { + int i; + +- preempt_disable(); ++ preempt_disable_nort(); + rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + } + EXPORT_SYMBOL(lg_global_lock); +@@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg) + + rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_unlock(lock); ++ lg_do_unlock(lock); + } +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_global_unlock); diff --git a/debian/patches/features/all/rt/list-add-list-last-entry.patch b/debian/patches/features/all/rt/list-add-list-last-entry.patch new file mode 100644 index 000000000..1df058f09 --- /dev/null +++ b/debian/patches/features/all/rt/list-add-list-last-entry.patch @@ -0,0 +1,32 @@ +Subject: list-add-list-last-entry.patch +From: Peter Zijlstra +Date: Tue, 21 Jun 2011 11:22:36 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/list.h | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +Index: linux-stable/include/linux/list.h +=================================================================== +--- linux-stable.orig/include/linux/list.h ++++ linux-stable/include/linux/list.h +@@ -373,6 +373,17 @@ static inline void list_splice_tail_init + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) + + /** ++ * list_last_entry - get the last element from a list ++ * @ptr: the list head to take the element from. ++ * @type: the type of the struct this is embedded in. ++ * @member: the name of the list_struct within the struct. ++ * ++ * Note, that list is expected to be not empty. ++ */ ++#define list_last_entry(ptr, type, member) \ ++ list_entry((ptr)->prev, type, member) ++ ++/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. diff --git a/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch new file mode 100644 index 000000000..140509a71 --- /dev/null +++ b/debian/patches/features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch @@ -0,0 +1,113 @@ +From: Paul Gortmaker +Date: Fri, 21 Jun 2013 15:07:25 -0400 +Subject: [PATCH] list_bl.h: make list head locking RT safe +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +As per changes in include/linux/jbd_common.h for avoiding the +bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal +head lock rt safe") we do the same thing here. + +We use the non atomic __set_bit and __clear_bit inside the scope of +the lock to preserve the ability of the existing LIST_DEBUG code to +use the zero'th bit in the sanity checks. + +As a bit spinlock, we had no lockdep visibility into the usage +of the list head locking. Now, if we were to implement it as a +standard non-raw spinlock, we would see: + +BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 +in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd +5 locks held by udevd/122: + #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 + #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 + #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 + #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 + #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 +Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 +Call Trace: + [] __might_sleep+0x134/0x1f0 + [] rt_spin_lock+0x24/0x60 + [] __d_shrink+0x5c/0xa0 + [] __d_drop+0x1d/0x40 + [] __d_move+0x8e/0x320 + [] d_move+0x3e/0x60 + [] vfs_rename+0x198/0x4c0 + [] sys_renameat+0x213/0x240 + [] ? _raw_spin_unlock+0x35/0x60 + [] ? do_page_fault+0x1ec/0x4b0 + [] ? retint_swapgs+0xe/0x13 + [] ? trace_hardirqs_on_thunk+0x3a/0x3f + [] sys_rename+0x1b/0x20 + [] system_call_fastpath+0x1a/0x1f + +Since we are only taking the lock during short lived list operations, +lets assume for now that it being raw won't be a significant latency +concern. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Paul Gortmaker +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/list_bl.h | 24 ++++++++++++++++++++++-- + 1 file changed, 22 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/list_bl.h +=================================================================== +--- linux-stable.orig/include/linux/list_bl.h ++++ linux-stable/include/linux/list_bl.h +@@ -2,6 +2,7 @@ + #define _LINUX_LIST_BL_H + + #include ++#include + #include + + /* +@@ -32,13 +33,22 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) ++ ++static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) ++{ ++ h->first = NULL; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spin_lock_init(&h->lock); ++#endif ++} + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -117,12 +127,22 @@ static inline void hlist_bl_del_init(str + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++ __set_bit(0, (unsigned long *)b); ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++ __clear_bit(0, (unsigned long *)b); ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch new file mode 100644 index 000000000..9cd360e92 --- /dev/null +++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch @@ -0,0 +1,57 @@ +From: Thomas Gleixner +Date: Tue, 21 Jul 2009 22:34:14 +0200 +Subject: rt: local_irq_* variants depending on RT/!RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Add local_irq_*_(no)rt variant which are mainly used to break +interrupt disabled sections on PREEMPT_RT or to explicitely disable +interrupts on PREEMPT_RT. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 2 +- + include/linux/irqflags.h | 19 +++++++++++++++++++ + 2 files changed, 20 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -211,7 +211,7 @@ extern void devm_free_irq(struct device + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +Index: linux-stable/include/linux/irqflags.h +=================================================================== +--- linux-stable.orig/include/linux/irqflags.h ++++ linux-stable/include/linux/irqflags.h +@@ -147,4 +147,23 @@ + + #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + ++/* ++ * local_irq* variants depending on RT/!RT ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0) ++# define local_irq_restore_nort(flags) do { (void)(flags); } while (0) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif ++ + #endif diff --git a/debian/patches/features/all/rt/local-var.patch b/debian/patches/features/all/rt/local-var.patch new file mode 100644 index 000000000..553054362 --- /dev/null +++ b/debian/patches/features/all/rt/local-var.patch @@ -0,0 +1,26 @@ +Subject: local-var.patch +From: Thomas Gleixner +Date: Fri, 24 Jun 2011 18:40:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/percpu.h | 5 +++++ + 1 file changed, 5 insertions(+) + +Index: linux-stable/include/linux/percpu.h +=================================================================== +--- linux-stable.orig/include/linux/percpu.h ++++ linux-stable/include/linux/percpu.h +@@ -48,6 +48,11 @@ + preempt_enable(); \ + } while (0) + ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + diff --git a/debian/patches/features/all/rt/local-vars-migrate-disable.patch b/debian/patches/features/all/rt/local-vars-migrate-disable.patch new file mode 100644 index 000000000..e507b67ed --- /dev/null +++ b/debian/patches/features/all/rt/local-vars-migrate-disable.patch @@ -0,0 +1,49 @@ +Subject: local-vars-migrate-disable.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 20:42:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/percpu.h | 28 ++++++++++++++++++++++++---- + 1 file changed, 24 insertions(+), 4 deletions(-) + +Index: linux-stable/include/linux/percpu.h +=================================================================== +--- linux-stable.orig/include/linux/percpu.h ++++ linux-stable/include/linux/percpu.h +@@ -48,10 +48,30 @@ + preempt_enable(); \ + } while (0) + +-#define get_local_var(var) get_cpu_var(var) +-#define put_local_var(var) put_cpu_var(var) +-#define get_local_ptr(var) get_cpu_ptr(var) +-#define put_local_ptr(var) put_cpu_ptr(var) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define get_local_var(var) get_cpu_var(var) ++# define put_local_var(var) put_cpu_var(var) ++# define get_local_ptr(var) get_cpu_ptr(var) ++# define put_local_ptr(var) put_cpu_ptr(var) ++#else ++# define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ &__get_cpu_var(var); })) ++ ++# define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++#endif + + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch new file mode 100644 index 000000000..cdbd566cd --- /dev/null +++ b/debian/patches/features/all/rt/localversion.patch @@ -0,0 +1,18 @@ +Subject: localversion.patch +From: Thomas Gleixner +Date: Fri, 08 Jul 2011 20:25:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org +--- + localversion-rt | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/localversion-rt +=================================================================== +--- /dev/null ++++ linux-stable/localversion-rt +@@ -0,0 +1 @@ ++-rt1 diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch new file mode 100644 index 000000000..5cdc57a32 --- /dev/null +++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch @@ -0,0 +1,61 @@ +Subject: lockdep-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 18:51:23 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/irqflags.h | 10 +++++++--- + kernel/lockdep.c | 2 ++ + 2 files changed, 9 insertions(+), 3 deletions(-) + +Index: linux-stable/include/linux/irqflags.h +=================================================================== +--- linux-stable.orig/include/linux/irqflags.h ++++ linux-stable/include/linux/irqflags.h +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif ++ ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +Index: linux-stable/kernel/lockdep.c +=================================================================== +--- linux-stable.orig/kernel/lockdep.c ++++ linux-stable/kernel/lockdep.c +@@ -3541,6 +3541,7 @@ static void check_flags(unsigned long fl + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3555,6 +3556,7 @@ static void check_flags(unsigned long fl + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch new file mode 100644 index 000000000..b7d768edd --- /dev/null +++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch @@ -0,0 +1,59 @@ +Subject: lockdep: Selftest: Only do hardirq context test for raw spinlock +From: Yong Zhang +Date: Mon, 16 Apr 2012 15:01:56 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +From: Yong Zhang + +On -rt there is no softirq context any more and rwlock is sleepable, +disable softirq context test and rwlock+irq test. + +Signed-off-by: Yong Zhang +Cc: Yong Zhang +Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + lib/locking-selftest.c | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + +Index: linux-stable/lib/locking-selftest.c +=================================================================== +--- linux-stable.orig/lib/locking-selftest.c ++++ linux-stable/lib/locking-selftest.c +@@ -1175,6 +1175,7 @@ void locking_selftest(void) + + printk(" --------------------------------------------------------------------------\n"); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * irq-context testcases: + */ +@@ -1187,6 +1188,28 @@ void locking_selftest(void) + + DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); + // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); ++#else ++ /* On -rt, we only do hardirq context test for raw spinlock */ ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); ++#endif + + if (unexpected_testcase_failures) { + printk("-----------------------------------------------------------------\n"); diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch new file mode 100644 index 000000000..8b8a08137 --- /dev/null +++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch @@ -0,0 +1,93 @@ +From: Thomas Gleixner +Date: Tue, 6 Apr 2010 16:51:31 +0200 +Subject: md: raid5: Make raid5_percpu handling RT aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +__raid_run_ops() disables preemption with get_cpu() around the access +to the raid5_percpu variables. That causes scheduling while atomic +spews on RT. + +Serialize the access to the percpu data with a lock and keep the code +preemptible. + +Reported-by: Udo van den Heuvel +Signed-off-by: Thomas Gleixner +Tested-by: Udo van den Heuvel + +--- + drivers/md/raid5.c | 13 ++++++++----- + drivers/md/raid5.h | 1 + + 2 files changed, 9 insertions(+), 5 deletions(-) + +Index: linux-stable/drivers/md/raid5.c +=================================================================== +--- linux-stable.orig/drivers/md/raid5.c ++++ linux-stable/drivers/md/raid5.c +@@ -1418,8 +1418,9 @@ static void raid_run_ops(struct stripe_h + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1471,7 +1472,8 @@ static void raid_run_ops(struct stripe_h + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static int grow_one_stripe(struct r5conf *conf) +@@ -1907,7 +1909,7 @@ static void raid5_end_write_request(stru + } + + static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); +- ++ + static void raid5_build_block(struct stripe_head *sh, int i, int previous) + { + struct r5dev *dev = &sh->dev[i]; +@@ -4315,7 +4317,7 @@ static void make_request(struct mddev *m + previous, + &dd_idx, NULL); + pr_debug("raid456: make_request, sector %llu logical %llu\n", +- (unsigned long long)new_sector, ++ (unsigned long long)new_sector, + (unsigned long long)logical_sector); + + sh = get_active_stripe(conf, new_sector, previous, +@@ -5111,6 +5113,7 @@ static int raid5_alloc_percpu(struct r5c + break; + } + per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + } + #ifdef CONFIG_HOTPLUG_CPU + conf->cpu_notify.notifier_call = raid456_cpu_notify; +@@ -5285,7 +5288,7 @@ static int only_parity(int raid_disk, in + return 1; + break; + case ALGORITHM_PARITY_0_6: +- if (raid_disk == 0 || ++ if (raid_disk == 0 || + raid_disk == raid_disks - 1) + return 1; + break; +Index: linux-stable/drivers/md/raid5.h +=================================================================== +--- linux-stable.orig/drivers/md/raid5.h ++++ linux-stable/drivers/md/raid5.h +@@ -425,6 +425,7 @@ struct r5conf { + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address diff --git a/debian/patches/features/all/rt/might-sleep-check-for-idle.patch b/debian/patches/features/all/rt/might-sleep-check-for-idle.patch new file mode 100644 index 000000000..5ee0c7645 --- /dev/null +++ b/debian/patches/features/all/rt/might-sleep-check-for-idle.patch @@ -0,0 +1,26 @@ +Subject: sched: Check for idle task in might_sleep() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 23:34:08 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Idle is not allowed to call sleeping functions ever! + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -7481,7 +7481,8 @@ void __might_sleep(const char *file, int + static unsigned long prev_jiffy; /* ratelimiting */ + + rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ +- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || + system_state != SYSTEM_RUNNING || oops_in_progress) + return; + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) diff --git a/debian/patches/features/all/rt/migrate-disable-rt-variant.patch b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch new file mode 100644 index 000000000..9d0d0bb60 --- /dev/null +++ b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch @@ -0,0 +1,30 @@ +Subject: migrate-disable-rt-variant.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 19:48:20 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/preempt.h | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -143,11 +143,15 @@ extern void migrate_enable(void); + # define preempt_enable_rt() preempt_enable() + # define preempt_disable_nort() barrier() + # define preempt_enable_nort() barrier() ++# define migrate_disable_rt() migrate_disable() ++# define migrate_enable_rt() migrate_enable() + #else + # define preempt_disable_rt() barrier() + # define preempt_enable_rt() barrier() + # define preempt_disable_nort() preempt_disable() + # define preempt_enable_nort() preempt_enable() ++# define migrate_disable_rt() barrier() ++# define migrate_enable_rt() barrier() + #endif + + #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch new file mode 100644 index 000000000..ddb583a6d --- /dev/null +++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch @@ -0,0 +1,23 @@ +Subject: mips-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:10:12 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/mips/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/arch/mips/Kconfig +=================================================================== +--- linux-stable.orig/arch/mips/Kconfig ++++ linux-stable/arch/mips/Kconfig +@@ -2114,7 +2114,7 @@ config CPU_R4400_WORKAROUNDS + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool diff --git a/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch new file mode 100644 index 000000000..74fabaa2c --- /dev/null +++ b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch @@ -0,0 +1,22 @@ +Subject: mips-enable-interrupts-in-signal.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 21:32:10 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/mips/kernel/signal.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/arch/mips/kernel/signal.c +=================================================================== +--- linux-stable.orig/arch/mips/kernel/signal.c ++++ linux-stable/arch/mips/kernel/signal.c +@@ -572,6 +572,7 @@ asmlinkage void do_notify_resume(struct + __u32 thread_info_flags) + { + local_irq_enable(); ++ preempt_check_resched(); + + /* deal with pending signal delivery */ + if (thread_info_flags & _TIF_SIGPENDING) diff --git a/debian/patches/features/all/rt/mm-allow-slab-rt.patch b/debian/patches/features/all/rt/mm-allow-slab-rt.patch new file mode 100644 index 000000000..25eddad10 --- /dev/null +++ b/debian/patches/features/all/rt/mm-allow-slab-rt.patch @@ -0,0 +1,32 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:03 -0500 +Subject: mm: Allow only slab on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + init/Kconfig | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/init/Kconfig +=================================================================== +--- linux-stable.orig/init/Kconfig ++++ linux-stable/init/Kconfig +@@ -1541,6 +1541,7 @@ config SLAB + + config SLUB + bool "SLUB (Unqueued Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLUB is a slab allocator that minimizes cache line usage + instead of managing queues of cached objects (SLAB approach). +@@ -1552,6 +1553,7 @@ config SLUB + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch new file mode 100644 index 000000000..0bbb3a629 --- /dev/null +++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch @@ -0,0 +1,30 @@ +Subject: mm: bounce: Use local_irq_save_nort +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 10:33:09 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +kmap_atomic() is preemptible on RT. + +Signed-off-by: Thomas Gleixner +--- + mm/bounce.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/mm/bounce.c +=================================================================== +--- linux-stable.orig/mm/bounce.c ++++ linux-stable/mm/bounce.c +@@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_v + unsigned long flags; + unsigned char *vto; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + #else /* CONFIG_HIGHMEM */ diff --git a/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch new file mode 100644 index 000000000..ba9c136fc --- /dev/null +++ b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch @@ -0,0 +1,96 @@ +From: Thomas Gleixner +Date: Wed, 19 Aug 2009 09:56:42 +0200 +Subject: mm: Replace cgroup_page bit spinlock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Bit spinlocks are not working on RT. Replace them. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/page_cgroup.h | 15 +++++++++++++++ + mm/page_cgroup.c | 11 +++++++++++ + 2 files changed, 26 insertions(+) + +Index: linux-stable/include/linux/page_cgroup.h +=================================================================== +--- linux-stable.orig/include/linux/page_cgroup.h ++++ linux-stable/include/linux/page_cgroup.h +@@ -24,6 +24,9 @@ enum { + */ + struct page_cgroup { + unsigned long flags; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t pcg_lock; ++#endif + struct mem_cgroup *mem_cgroup; + }; + +@@ -74,12 +77,20 @@ static inline void lock_page_cgroup(stru + * Don't take this lock in IRQ context. + * This lock is for pc->mem_cgroup, USED, MIGRATION + */ ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(PCG_LOCK, &pc->flags); ++#else ++ spin_lock(&pc->pcg_lock); ++#endif + } + + static inline void unlock_page_cgroup(struct page_cgroup *pc) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(PCG_LOCK, &pc->flags); ++#else ++ spin_unlock(&pc->pcg_lock); ++#endif + } + + #else /* CONFIG_MEMCG */ +@@ -102,6 +113,10 @@ static inline void __init page_cgroup_in + { + } + ++static inline void page_cgroup_lock_init(struct page_cgroup *pc) ++{ ++} ++ + #endif /* CONFIG_MEMCG */ + + #include +Index: linux-stable/mm/page_cgroup.c +=================================================================== +--- linux-stable.orig/mm/page_cgroup.c ++++ linux-stable/mm/page_cgroup.c +@@ -13,6 +13,14 @@ + + static unsigned long total_usage; + ++static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ for (; nr_pages; nr_pages--, pc++) ++ spin_lock_init(&pc->pcg_lock); ++#endif ++} ++ + #if !defined(CONFIG_SPARSEMEM) + + +@@ -60,6 +68,7 @@ static int __init alloc_node_page_cgroup + return -ENOMEM; + NODE_DATA(nid)->node_page_cgroup = base; + total_usage += table_size; ++ page_cgroup_lock_init(base, nr_pages); + return 0; + } + +@@ -150,6 +159,8 @@ static int __meminit init_section_page_c + return -ENOMEM; + } + ++ page_cgroup_lock_init(base, PAGES_PER_SECTION); ++ + /* + * The passed "pfn" may not be aligned to SECTION. For the calculation + * we need to apply a mask. diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch new file mode 100644 index 000000000..cb5b53c12 --- /dev/null +++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch @@ -0,0 +1,116 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:51 -0500 +Subject: mm: convert swap to percpu locked +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + mm/swap.c | 30 ++++++++++++++++++------------ + 1 file changed, 18 insertions(+), 12 deletions(-) + +Index: linux-stable/mm/swap.c +=================================================================== +--- linux-stable.orig/mm/swap.c ++++ linux-stable/mm/swap.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "internal.h" + +@@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_ + static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); + static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); + ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); ++ + /* + * This path almost never happens for VM activity - pages are normally + * freed via pagevecs. But it gets used by networking. +@@ -355,11 +359,11 @@ void rotate_reclaimable_page(struct page + unsigned long flags; + + page_cache_get(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = &__get_cpu_var(lru_rotate_pvecs); + if (!pagevec_add(pvec, page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -404,12 +408,13 @@ static void activate_page_drain(int cpu) + void activate_page(struct page *page) + { + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -457,13 +462,13 @@ EXPORT_SYMBOL(mark_page_accessed); + */ + void __lru_cache_add(struct page *page, enum lru_list lru) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru]; + + page_cache_get(page); + if (!pagevec_space(pvec)) + __pagevec_lru_add(pvec, lru); + pagevec_add(pvec, page); +- put_cpu_var(lru_add_pvecs); ++ put_locked_var(swapvec_lock, lru_add_pvecs); + } + EXPORT_SYMBOL(__lru_cache_add); + +@@ -598,9 +603,9 @@ void lru_add_drain_cpu(int cpu) + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + + pvec = &per_cpu(lru_deactivate_pvecs, cpu); +@@ -628,18 +633,19 @@ void deactivate_page(struct page *page) + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_pvecs); + + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); +- put_cpu_var(lru_deactivate_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + + static void lru_add_drain_per_cpu(struct work_struct *dummy) diff --git a/debian/patches/features/all/rt/mm-disable-slab-on-rt.patch b/debian/patches/features/all/rt/mm-disable-slab-on-rt.patch new file mode 100644 index 000000000..088ed3012 --- /dev/null +++ b/debian/patches/features/all/rt/mm-disable-slab-on-rt.patch @@ -0,0 +1,31 @@ +--- + init/Kconfig | 1 + + mm/slab.h | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Index: linux-stable/init/Kconfig +=================================================================== +--- linux-stable.orig/init/Kconfig ++++ linux-stable/init/Kconfig +@@ -1535,6 +1535,7 @@ choice + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + help + The regular slab allocator that is established and known to work + well in all environments. It organizes cache hot objects in +Index: linux-stable/mm/slab.h +=================================================================== +--- linux-stable.orig/mm/slab.h ++++ linux-stable/mm/slab.h +@@ -245,7 +245,7 @@ static inline struct kmem_cache *cache_f + * The slab lists for all objects. + */ + struct kmem_cache_node { +- spinlock_t list_lock; ++ raw_spinlock_t list_lock; + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch new file mode 100644 index 000000000..84f0a7357 --- /dev/null +++ b/debian/patches/features/all/rt/mm-enable-slub.patch @@ -0,0 +1,395 @@ +Subject: mm: Enable SLUB for RT +From: Thomas Gleixner +Date: Thu, 25 Oct 2012 10:32:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Make SLUB RT aware and remove the restriction in Kconfig. + +Signed-off-by: Thomas Gleixner +--- + init/Kconfig | 1 + mm/slub.c | 119 +++++++++++++++++++++++++++++++++++++++++++++-------------- + 2 files changed, 91 insertions(+), 29 deletions(-) + +Index: linux-stable/init/Kconfig +=================================================================== +--- linux-stable.orig/init/Kconfig ++++ linux-stable/init/Kconfig +@@ -1542,7 +1542,6 @@ config SLAB + + config SLUB + bool "SLUB (Unqueued Allocator)" +- depends on !PREEMPT_RT_FULL + help + SLUB is a slab allocator that minimizes cache line usage + instead of managing queues of cached objects (SLAB approach). +Index: linux-stable/mm/slub.c +=================================================================== +--- linux-stable.orig/mm/slub.c ++++ linux-stable/mm/slub.c +@@ -1071,7 +1071,7 @@ static noinline struct kmem_cache_node * + { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + +- spin_lock_irqsave(&n->list_lock, *flags); ++ raw_spin_lock_irqsave(&n->list_lock, *flags); + slab_lock(page); + + if (!check_slab(s, page)) +@@ -1119,7 +1119,7 @@ out: + + fail: + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, *flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, *flags); + slab_fix(s, "Object at 0x%p not freed", object); + return NULL; + } +@@ -1254,6 +1254,12 @@ static inline void slab_free_hook(struct + + #endif /* CONFIG_SLUB_DEBUG */ + ++struct slub_free_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); ++ + /* + * Slab allocation and freeing + */ +@@ -1278,7 +1284,11 @@ static struct page *allocate_slab(struct + + flags &= gfp_allowed_mask; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++#else + if (flags & __GFP_WAIT) ++#endif + local_irq_enable(); + + flags |= s->allocflags; +@@ -1318,7 +1328,11 @@ static struct page *allocate_slab(struct + kmemcheck_mark_unallocated_pages(page, pages); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++#else + if (flags & __GFP_WAIT) ++#endif + local_irq_disable(); + if (!page) + return NULL; +@@ -1415,6 +1429,16 @@ static void __free_slab(struct kmem_cach + __free_memcg_kmem_pages(page, order); + } + ++static void free_delayed(struct kmem_cache *s, struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_slab(s, page); ++ } ++} ++ + #define need_reserve_slab_rcu \ + (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) + +@@ -1449,6 +1473,12 @@ static void free_slab(struct kmem_cache + } + + call_rcu(head, rcu_free_slab); ++ } else if (irqs_disabled()) { ++ struct slub_free_list *f = &__get_cpu_var(slub_free_list); ++ ++ raw_spin_lock(&f->lock); ++ list_add(&page->lru, &f->list); ++ raw_spin_unlock(&f->lock); + } else + __free_slab(s, page); + } +@@ -1553,7 +1583,7 @@ static void *get_partial_node(struct kme + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + void *t; + +@@ -1577,7 +1607,7 @@ static void *get_partial_node(struct kme + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -1819,7 +1849,7 @@ redo: + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -1830,7 +1860,7 @@ redo: + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } + +@@ -1865,7 +1895,7 @@ redo: + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_FREE) { + stat(s, DEACTIVATE_EMPTY); +@@ -1896,10 +1926,10 @@ static void unfreeze_partials(struct kme + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -1928,7 +1958,7 @@ static void unfreeze_partials(struct kme + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -1964,14 +1994,21 @@ static void put_cpu_partial(struct kmem_ + pobjects = oldpage->pobjects; + pages = oldpage->pages; + if (drain && pobjects > s->cpu_partial) { ++ struct slub_free_list *f; + unsigned long flags; ++ LIST_HEAD(tofree); + /* + * partial array is full. Move the existing + * set to the per node partial list. + */ + local_irq_save(flags); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ f = &__get_cpu_var(slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(s, &tofree); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2033,7 +2070,22 @@ static bool has_cpu_slab(int cpu, void * + + static void flush_all(struct kmem_cache *s) + { ++ LIST_HEAD(tofree); ++ int cpu; ++ + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); ++ for_each_online_cpu(cpu) { ++ struct slub_free_list *f; ++ ++ if (!has_cpu_slab(cpu, s)) ++ continue; ++ ++ f = &per_cpu(slub_free_list, cpu); ++ raw_spin_lock_irq(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock_irq(&f->lock); ++ free_delayed(s, &tofree); ++ } + } + + /* +@@ -2061,10 +2113,10 @@ static unsigned long count_partial(struc + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + +@@ -2207,9 +2259,11 @@ static inline void *get_freelist(struct + static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, + unsigned long addr, struct kmem_cache_cpu *c) + { ++ struct slub_free_list *f; + void *freelist; + struct page *page; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2272,7 +2326,13 @@ load_freelist: + VM_BUG_ON(!c->page->frozen); + c->freelist = get_freepointer(s, freelist); + c->tid = next_tid(c->tid); ++out: ++ f = &__get_cpu_var(slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(s, &tofree); + return freelist; + + new_slab: +@@ -2290,9 +2350,7 @@ new_slab: + if (unlikely(!freelist)) { + if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) + slab_out_of_memory(s, gfpflags, node); +- +- local_irq_restore(flags); +- return NULL; ++ goto out; + } + + page = c->page; +@@ -2306,8 +2364,7 @@ new_slab: + deactivate_slab(s, page, get_freepointer(s, freelist)); + c->page = NULL; + c->freelist = NULL; +- local_irq_restore(flags); +- return freelist; ++ goto out; + } + + /* +@@ -2484,7 +2541,7 @@ static void __slab_free(struct kmem_cach + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2514,7 +2571,7 @@ static void __slab_free(struct kmem_cach + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -2555,7 +2612,7 @@ static void __slab_free(struct kmem_cach + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -2569,7 +2626,7 @@ slab_empty: + /* Slab must be on the full list */ + remove_full(s, page); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -2771,7 +2828,7 @@ static void + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3393,7 +3450,7 @@ int kmem_cache_shrink(struct kmem_cache + for (i = 0; i < objects; i++) + INIT_LIST_HEAD(slabs_by_inuse + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists indexed by the items in use in each slab. +@@ -3414,7 +3471,7 @@ int kmem_cache_shrink(struct kmem_cache + for (i = objects - 1; i > 0; i--) + list_splice(slabs_by_inuse + i, n->partial.prev); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, slabs_by_inuse, lru) +@@ -3590,6 +3647,12 @@ void __init kmem_cache_init(void) + { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } + + if (debug_guardpage_minorder()) + slub_max_order = 0; +@@ -3894,7 +3957,7 @@ static int validate_slab_node(struct kme + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page, map); +@@ -3917,7 +3980,7 @@ static int validate_slab_node(struct kme + atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4107,12 +4170,12 @@ static int list_locations(struct kmem_ca + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc, map); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc, map); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch new file mode 100644 index 000000000..92ec4d1d3 --- /dev/null +++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch @@ -0,0 +1,89 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:13 -0500 +Subject: [PATCH] mm: make vmstat -rt aware +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/vmstat.h | 4 ++++ + mm/vmstat.c | 6 ++++++ + 2 files changed, 10 insertions(+) + +Index: linux-stable/include/linux/vmstat.h +=================================================================== +--- linux-stable.orig/include/linux/vmstat.h ++++ linux-stable/include/linux/vmstat.h +@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v + + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + __this_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -39,7 +41,9 @@ static inline void count_vm_event(enum v + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + __this_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +Index: linux-stable/mm/vmstat.c +=================================================================== +--- linux-stable.orig/mm/vmstat.c ++++ linux-stable/mm/vmstat.c +@@ -214,6 +214,7 @@ void __mod_zone_page_state(struct zone * + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -223,6 +224,7 @@ void __mod_zone_page_state(struct zone * + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -255,6 +257,7 @@ void __inc_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -263,6 +266,7 @@ void __inc_zone_state(struct zone *zone, + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -277,6 +281,7 @@ void __dec_zone_state(struct zone *zone, + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -285,6 +290,7 @@ void __dec_zone_state(struct zone *zone, + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/debian/patches/features/all/rt/mm-page-alloc-fix.patch b/debian/patches/features/all/rt/mm-page-alloc-fix.patch new file mode 100644 index 000000000..4502a9d20 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-fix.patch @@ -0,0 +1,25 @@ +Subject: mm-page-alloc-fix.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 16:47:49 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/mm/page_alloc.c +=================================================================== +--- linux-stable.orig/mm/page_alloc.c ++++ linux-stable/mm/page_alloc.c +@@ -2240,8 +2240,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m + struct page *page; + + /* Page migration frees to the PCP lists but we want merging */ +- drain_pages(get_cpu()); +- put_cpu(); ++ drain_pages(get_cpu_light()); ++ put_cpu_light(); + + page = get_page_from_freelist(gfp_mask, nodemask, + order, zonelist, high_zoneidx, diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch new file mode 100644 index 000000000..9e0f872c4 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch @@ -0,0 +1,23 @@ +Subject: mm-page-alloc-use-list-last-entry.patch +From: Peter Zijlstra +Date: Tue, 21 Jun 2011 11:24:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/mm/page_alloc.c +=================================================================== +--- linux-stable.orig/mm/page_alloc.c ++++ linux-stable/mm/page_alloc.c +@@ -667,7 +667,7 @@ static void free_pcppages_bulk(struct zo + do { + int mt; /* migratetype of the to-be-freed page */ + +- page = list_entry(list->prev, struct page, lru); ++ page = list_last_entry(list, struct page, lru); + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + mt = get_freepage_migratetype(page); diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch new file mode 100644 index 000000000..720cea8a7 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch @@ -0,0 +1,30 @@ +Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock +From: Thomas Gleixner +Date: Thu, 27 Sep 2012 11:11:46 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The plain spinlock while sufficient does not update the local_lock +internals. Use a proper local_lock function instead to ease debugging. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + mm/page_alloc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/mm/page_alloc.c +=================================================================== +--- linux-stable.orig/mm/page_alloc.c ++++ linux-stable/mm/page_alloc.c +@@ -226,9 +226,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + + #ifdef CONFIG_PREEMPT_RT_BASE + # define cpu_lock_irqsave(cpu, flags) \ +- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++ local_lock_irqsave_on(pa_lock, flags, cpu) + # define cpu_unlock_irqrestore(cpu, flags) \ +- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) + #else + # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) + # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch new file mode 100644 index 000000000..1717e2794 --- /dev/null +++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch @@ -0,0 +1,222 @@ +From: Peter Zijlstra +Date: Fri, 3 Jul 2009 08:44:37 -0500 +Subject: mm: page_alloc reduce lock sections further +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Split out the pages which are to be freed into a separate list and +call free_pages_bulk() outside of the percpu page allocator locks. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner +--- + mm/page_alloc.c | 98 +++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 69 insertions(+), 29 deletions(-) + +Index: linux-stable/mm/page_alloc.c +=================================================================== +--- linux-stable.orig/mm/page_alloc.c ++++ linux-stable/mm/page_alloc.c +@@ -634,7 +634,7 @@ static inline int free_pages_check(struc + } + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -645,16 +645,50 @@ static inline int free_pages_check(struc + * pinned" detection logic. + */ + static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++ struct list_head *list) + { +- int migratetype = 0; +- int batch_free = 0; + int to_free = count; ++ unsigned long flags; + +- spin_lock(&zone->lock); ++ spin_lock_irqsave(&zone->lock, flags); + zone->all_unreclaimable = 0; + zone->pages_scanned = 0; + ++ while (!list_empty(list)) { ++ struct page *page = list_first_entry(list, struct page, lru); ++ int mt; /* migratetype of the to-be-freed page */ ++ ++ /* must delete as __free_one_page list manipulates */ ++ list_del(&page->lru); ++ ++ mt = get_freepage_migratetype(page); ++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ ++ __free_one_page(page, zone, 0, mt); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ if (likely(!is_migrate_isolate_page(page))) { ++ __mod_zone_page_state(zone, NR_FREE_PAGES, 1); ++ if (is_migrate_cma(mt)) ++ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); ++ } ++ ++ to_free--; ++ } ++ WARN_ON(to_free != 0); ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++/* ++ * Moves a number of pages from the PCP lists to free list which ++ * is freed outside of the locked region. ++ * ++ * Assumes all pages on list are in same zone, and of same order. ++ * count is the number of pages to free. ++ */ ++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, ++ struct list_head *dst) ++{ ++ int migratetype = 0, batch_free = 0; ++ + while (to_free) { + struct page *page; + struct list_head *list; +@@ -670,7 +704,7 @@ static void free_pcppages_bulk(struct zo + batch_free++; + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; +- list = &pcp->lists[migratetype]; ++ list = &src->lists[migratetype]; + } while (list_empty(list)); + + /* This is the only non-empty list. Free them all. */ +@@ -678,36 +712,26 @@ static void free_pcppages_bulk(struct zo + batch_free = to_free; + + do { +- int mt; /* migratetype of the to-be-freed page */ +- + page = list_last_entry(list, struct page, lru); +- /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); +- mt = get_freepage_migratetype(page); +- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ +- __free_one_page(page, zone, 0, mt); +- trace_mm_page_pcpu_drain(page, 0, mt); +- if (likely(!is_migrate_isolate_page(page))) { +- __mod_zone_page_state(zone, NR_FREE_PAGES, 1); +- if (is_migrate_cma(mt)) +- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); +- } ++ list_add(&page->lru, dst); + } while (--to_free && --batch_free && !list_empty(list)); + } +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, struct page *page, int order, + int migratetype) + { +- spin_lock(&zone->lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + zone->all_unreclaimable = 0; + zone->pages_scanned = 0; + + __free_one_page(page, zone, order, migratetype); + if (unlikely(!is_migrate_isolate(migratetype))) + __mod_zone_freepage_state(zone, 1 << order, migratetype); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static bool free_pages_prepare(struct page *page, unsigned int order) +@@ -1191,6 +1215,7 @@ static int rmqueue_bulk(struct zone *zon + void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; ++ LIST_HEAD(dst); + int to_drain; + + local_lock_irqsave(pa_lock, flags); +@@ -1199,10 +1224,11 @@ void drain_zone_pages(struct zone *zone, + else + to_drain = pcp->count; + if (to_drain > 0) { +- free_pcppages_bulk(zone, to_drain, pcp); ++ isolate_pcp_pages(to_drain, pcp, &dst); + pcp->count -= to_drain; + } + local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, to_drain, &dst); + } + #endif + +@@ -1221,16 +1247,21 @@ static void drain_pages(unsigned int cpu + for_each_populated_zone(zone) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + + cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) { +- free_pcppages_bulk(zone, pcp->count, pcp); ++ count = pcp->count; ++ if (count) { ++ isolate_pcp_pages(count, pcp, &dst); + pcp->count = 0; + } + cpu_unlock_irqrestore(cpu, flags); ++ if (count) ++ free_pcppages_bulk(zone, count, &dst); + } + } + +@@ -1368,8 +1399,15 @@ void free_hot_cold_page(struct page *pag + list_add(&page->lru, &pcp->lists[migratetype]); + pcp->count++; + if (pcp->count >= pcp->high) { +- free_pcppages_bulk(zone, pcp->batch, pcp); ++ LIST_HEAD(dst); ++ int count; ++ ++ isolate_pcp_pages(pcp->batch, pcp, &dst); + pcp->count -= pcp->batch; ++ count = pcp->batch; ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, count, &dst); ++ return; + } + + out: +@@ -6072,20 +6110,22 @@ static int __meminit __zone_pcp_update(v + { + struct zone *zone = data; + int cpu; +- unsigned long batch = zone_batchsize(zone), flags; ++ unsigned long flags; + + for_each_possible_cpu(cpu) { + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); + + pset = per_cpu_ptr(zone->pageset, cpu); + pcp = &pset->pcp; + + cpu_lock_irqsave(cpu, flags); +- if (pcp->count > 0) +- free_pcppages_bulk(zone, pcp->count, pcp); ++ if (pcp->count > 0) { ++ isolate_pcp_pages(pcp->count, pcp, &dst); ++ free_pcppages_bulk(zone, pcp->count, &dst); ++ } + drain_zonestat(zone, pset); +- setup_pageset(pset, batch); + cpu_unlock_irqrestore(cpu, flags); + } + return 0; diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch new file mode 100644 index 000000000..825343cee --- /dev/null +++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -0,0 +1,230 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:37 -0500 +Subject: mm: page_alloc: rt-friendly per-cpu pages +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +rt-friendly per-cpu pages: convert the irqs-off per-cpu locking +method into a preemptible, explicit-per-cpu-locks method. + +Contains fixes from: + Peter Zijlstra + Thomas Gleixner + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + mm/page_alloc.c | 61 +++++++++++++++++++++++++++++++++++++------------------- + 1 file changed, 41 insertions(+), 20 deletions(-) + +Index: linux-stable/mm/page_alloc.c +=================================================================== +--- linux-stable.orig/mm/page_alloc.c ++++ linux-stable/mm/page_alloc.c +@@ -60,6 +60,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -221,6 +222,18 @@ EXPORT_SYMBOL(nr_node_ids); + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + void set_pageblock_migratetype(struct page *page, int migratetype) +@@ -731,12 +744,12 @@ static void __free_pages_ok(struct page + if (!free_pages_prepare(page, order)) + return; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); + free_one_page(page_zone(page), page, order, migratetype); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -1180,7 +1193,7 @@ void drain_zone_pages(struct zone *zone, + unsigned long flags; + int to_drain; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (pcp->count >= pcp->batch) + to_drain = pcp->batch; + else +@@ -1189,7 +1202,7 @@ void drain_zone_pages(struct zone *zone, + free_pcppages_bulk(zone, to_drain, pcp); + pcp->count -= to_drain; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + #endif + +@@ -1209,7 +1222,7 @@ static void drain_pages(unsigned int cpu + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +@@ -1217,7 +1230,7 @@ static void drain_pages(unsigned int cpu + free_pcppages_bulk(zone, pcp->count, pcp); + pcp->count = 0; + } +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); + } + } + +@@ -1270,7 +1283,12 @@ void drain_all_pages(void) + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); ++#else ++ for_each_cpu(cpu, &cpus_with_pcps) ++ drain_pages(cpu); ++#endif + } + + #ifdef CONFIG_HIBERNATION +@@ -1325,7 +1343,7 @@ void free_hot_cold_page(struct page *pag + + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_event(PGFREE); + + /* +@@ -1355,7 +1373,7 @@ void free_hot_cold_page(struct page *pag + } + + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -1485,7 +1503,7 @@ again: + struct per_cpu_pages *pcp; + struct list_head *list; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + if (list_empty(list)) { +@@ -1517,18 +1535,20 @@ again: + */ + WARN_ON_ONCE(order > 1); + } +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + page = __rmqueue(zone, order, migratetype); +- spin_unlock(&zone->lock); +- if (!page) ++ if (!page) { ++ spin_unlock(&zone->lock); + goto failed; ++ } + __mod_zone_freepage_state(zone, -(1 << order), + get_pageblock_migratetype(page)); ++ spin_unlock(&zone->lock); + } + + __count_zone_vm_events(PGALLOC, zone, 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + VM_BUG_ON(bad_range(zone, page)); + if (prep_new_page(page, order, gfp_flags)) +@@ -1536,7 +1556,7 @@ again: + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -5233,6 +5253,7 @@ static int page_alloc_cpu_notify(struct + void __init page_alloc_init(void) + { + hotcpu_notifier(page_alloc_cpu_notify, 0); ++ local_irq_lock_init(pa_lock); + } + + /* +@@ -5471,11 +5492,11 @@ int __meminit init_per_zone_wmark_min(vo + module_init(init_per_zone_wmark_min) + + /* +- * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so ++ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so + * that we can call two helper functions whenever min_free_kbytes + * changes. + */ +-int min_free_kbytes_sysctl_handler(ctl_table *table, int write, ++int min_free_kbytes_sysctl_handler(ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) + { + proc_dointvec(table, write, buffer, length, ppos); +@@ -6060,12 +6081,12 @@ static int __meminit __zone_pcp_update(v + pset = per_cpu_ptr(zone->pageset, cpu); + pcp = &pset->pcp; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + if (pcp->count > 0) + free_pcppages_bulk(zone, pcp->count, pcp); + drain_zonestat(zone, pset); + setup_pageset(pset, batch); +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); + } + return 0; + } +@@ -6083,7 +6104,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -6092,7 +6113,7 @@ void zone_pcp_reset(struct zone *zone) + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch new file mode 100644 index 000000000..67653ab59 --- /dev/null +++ b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch @@ -0,0 +1,127 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:37 -0500 +Subject: mm: Prepare decoupling the page fault disabling logic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Add a pagefault_disabled variable to task_struct to allow decoupling +the pagefault-disabled logic from the preempt count. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 1 + + include/linux/uaccess.h | 33 +++------------------------------ + kernel/fork.c | 1 + + mm/memory.c | 29 +++++++++++++++++++++++++++++ + 4 files changed, 34 insertions(+), 30 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1260,6 +1260,7 @@ struct task_struct { + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++ int pagefault_disabled; + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +Index: linux-stable/include/linux/uaccess.h +=================================================================== +--- linux-stable.orig/include/linux/uaccess.h ++++ linux-stable/include/linux/uaccess.h +@@ -6,37 +6,10 @@ + + /* + * These routines enable/disable the pagefault handler in that +- * it will not take any locks and go straight to the fixup table. +- * +- * They have great resemblance to the preempt_disable/enable calls +- * and in fact they are identical; this is because currently there is +- * no other way to make the pagefault handlers do this. So we do +- * disable preemption but we don't necessarily care about that. ++ * it will not take any MM locks and go straight to the fixup table. + */ +-static inline void pagefault_disable(void) +-{ +- inc_preempt_count(); +- /* +- * make sure to have issued the store before a pagefault +- * can hit. +- */ +- barrier(); +-} +- +-static inline void pagefault_enable(void) +-{ +- /* +- * make sure to issue those last loads/stores before enabling +- * the pagefault handler again. +- */ +- barrier(); +- dec_preempt_count(); +- /* +- * make sure we do.. +- */ +- barrier(); +- preempt_check_resched(); +-} ++extern void pagefault_disable(void); ++extern void pagefault_enable(void); + + #ifndef ARCH_HAS_NOCACHE_UACCESS + +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -1293,6 +1293,7 @@ static struct task_struct *copy_process( + p->hardirq_context = 0; + p->softirq_context = 0; + #endif ++ p->pagefault_disabled = 0; + #ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; +Index: linux-stable/mm/memory.c +=================================================================== +--- linux-stable.orig/mm/memory.c ++++ linux-stable/mm/memory.c +@@ -3748,6 +3748,35 @@ unlock: + return 0; + } + ++void pagefault_disable(void) ++{ ++ inc_preempt_count(); ++ current->pagefault_disabled++; ++ /* ++ * make sure to have issued the store before a pagefault ++ * can hit. ++ */ ++ barrier(); ++} ++EXPORT_SYMBOL(pagefault_disable); ++ ++void pagefault_enable(void) ++{ ++ /* ++ * make sure to issue those last loads/stores before enabling ++ * the pagefault handler again. ++ */ ++ barrier(); ++ current->pagefault_disabled--; ++ dec_preempt_count(); ++ /* ++ * make sure we do.. ++ */ ++ barrier(); ++ preempt_check_resched(); ++} ++EXPORT_SYMBOL(pagefault_enable); ++ + /* + * By the time we get here, we already hold the mm semaphore + */ diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch new file mode 100644 index 000000000..76abd9d11 --- /dev/null +++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch @@ -0,0 +1,74 @@ +Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() +From: Yong Zhang +Date: Tue, 15 May 2012 13:53:56 +0800 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +User preempt_*_rt instead of local_irq_*_rt or otherwise there will be +warning on ARM like below: + +WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() +Modules linked in: +[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) +[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) +[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) +[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) +[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) +[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) +[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) +[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) +[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) +[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) +[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) +---[ end trace 0000000000000002 ]--- + +The reason is that ARM need irq enabled when doing activate_mm(). +According to mm-protect-activate-switch-mm.patch, actually +preempt_[disable|enable]_rt() is sufficient. + +Inspired-by: Steven Rostedt +Signed-off-by: Yong Zhang +Cc: Steven Rostedt +Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +--- + fs/exec.c | 2 ++ + mm/mmu_context.c | 2 ++ + 2 files changed, 4 insertions(+) + +Index: linux-stable/fs/exec.c +=================================================================== +--- linux-stable.orig/fs/exec.c ++++ linux-stable/fs/exec.c +@@ -836,10 +836,12 @@ static int exec_mmap(struct mm_struct *m + } + } + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); ++ preempt_enable_rt(); + task_unlock(tsk); + arch_pick_mmap_layout(mm); + if (old_mm) { +Index: linux-stable/mm/mmu_context.c +=================================================================== +--- linux-stable.orig/mm/mmu_context.c ++++ linux-stable/mm/mmu_context.c +@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) + struct task_struct *tsk = current; + + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + atomic_inc(&mm->mm_count); +@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + + if (active_mm != mm) diff --git a/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch new file mode 100644 index 000000000..b57ad3c56 --- /dev/null +++ b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch @@ -0,0 +1,37 @@ +From: Thomas Gleixner +Date: Sat, 25 Jul 2009 22:06:27 +0200 +Subject: mm: Remove preempt count from pagefault disable/enable +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Now that all users are cleaned up, we can remove the preemption count. + +Signed-off-by: Thomas Gleixner +--- + mm/memory.c | 7 ------- + 1 file changed, 7 deletions(-) + +Index: linux-stable/mm/memory.c +=================================================================== +--- linux-stable.orig/mm/memory.c ++++ linux-stable/mm/memory.c +@@ -3751,7 +3751,6 @@ unlock: + #ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { +- inc_preempt_count(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault +@@ -3769,12 +3768,6 @@ void pagefault_enable(void) + */ + barrier(); + current->pagefault_disabled--; +- dec_preempt_count(); +- /* +- * make sure we do.. +- */ +- barrier(); +- preempt_check_resched(); + } + EXPORT_SYMBOL(pagefault_enable); + #endif diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch new file mode 100644 index 000000000..9e95e4bdb --- /dev/null +++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch @@ -0,0 +1,289 @@ +Subject: mm, rt: kmap_atomic scheduling +From: Peter Zijlstra +Date: Thu, 28 Jul 2011 10:43:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +In fact, with migrate_disable() existing one could play games with +kmap_atomic. You could save/restore the kmap_atomic slots on context +switch (if there are any in use of course), this should be esp easy now +that we have a kmap_atomic stack. + +Something like the below.. it wants replacing all the preempt_disable() +stuff with pagefault_disable() && migrate_disable() of course, but then +you can flip kmaps around like below. + +Signed-off-by: Peter Zijlstra +[dvhart@linux.intel.com: build fix] +Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins + +[tglx@linutronix.de: Get rid of the per cpu variable and store the idx + and the pte content right away in the task struct. + Shortens the context switch code. ] +--- + arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++ + arch/x86/mm/highmem_32.c | 9 ++++++++- + arch/x86/mm/iomap_32.c | 9 ++++++++- + include/linux/highmem.h | 27 +++++++++++++++++++++++---- + include/linux/sched.h | 7 +++++++ + mm/highmem.c | 6 ++++-- + mm/memory.c | 2 ++ + 7 files changed, 84 insertions(+), 8 deletions(-) + +Index: linux-stable/arch/x86/kernel/process_32.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/process_32.c ++++ linux-stable/arch/x86/kernel/process_32.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -214,6 +215,35 @@ start_thread(struct pt_regs *regs, unsig + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -293,6 +323,8 @@ __switch_to(struct task_struct *prev_p, + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++ switch_kmaps(prev_p, next_p); ++ + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +Index: linux-stable/arch/x86/mm/highmem_32.c +=================================================================== +--- linux-stable.orig/arch/x86/mm/highmem_32.c ++++ linux-stable/arch/x86/mm/highmem_32.c +@@ -31,6 +31,7 @@ EXPORT_SYMBOL(kunmap); + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +@@ -44,7 +45,10 @@ void *kmap_atomic_prot(struct page *page + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -87,6 +91,9 @@ void __kunmap_atomic(void *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +Index: linux-stable/arch/x86/mm/iomap_32.c +=================================================================== +--- linux-stable.orig/arch/x86/mm/iomap_32.c ++++ linux-stable/arch/x86/mm/iomap_32.c +@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -64,7 +65,10 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -110,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +Index: linux-stable/include/linux/highmem.h +=================================================================== +--- linux-stable.orig/include/linux/highmem.h ++++ linux-stable/include/linux/highmem.h +@@ -85,32 +85,51 @@ static inline void __kunmap_atomic(void + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx > KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -23,6 +23,7 @@ struct sched_param { + #include + #include + ++#include + #include + #include + #include +@@ -1446,6 +1447,12 @@ struct task_struct { + struct rcu_head put_rcu; + int softirq_nestcnt; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + }; + + #ifdef CONFIG_NUMA_BALANCING +Index: linux-stable/mm/highmem.c +=================================================================== +--- linux-stable.orig/mm/highmem.c ++++ linux-stable/mm/highmem.c +@@ -29,10 +29,11 @@ + #include + #include + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -47,8 +48,9 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +Index: linux-stable/mm/memory.c +=================================================================== +--- linux-stable.orig/mm/memory.c ++++ linux-stable/mm/memory.c +@@ -3751,6 +3751,7 @@ unlock: + #ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { ++ migrate_disable(); + current->pagefault_disabled++; + /* + * make sure to have issued the store before a pagefault +@@ -3768,6 +3769,7 @@ void pagefault_enable(void) + */ + barrier(); + current->pagefault_disabled--; ++ migrate_enable(); + } + EXPORT_SYMBOL(pagefault_enable); + #endif diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch new file mode 100644 index 000000000..80fbe46f4 --- /dev/null +++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch @@ -0,0 +1,41 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:34 -0500 +Subject: mm: scatterlist dont disable irqs on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + lib/scatterlist.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +Index: linux-stable/lib/scatterlist.c +=================================================================== +--- linux-stable.orig/lib/scatterlist.c ++++ linux-stable/lib/scatterlist.c +@@ -533,7 +533,7 @@ void sg_miter_stop(struct sg_mapping_ite + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); +@@ -573,7 +573,7 @@ static size_t sg_copy_buffer(struct scat + + sg_miter_start(&miter, sgl, nents, sg_flags); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; +@@ -590,7 +590,7 @@ static size_t sg_copy_buffer(struct scat + + sg_miter_stop(&miter); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return offset; + } + diff --git a/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch b/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch new file mode 100644 index 000000000..12544ce8a --- /dev/null +++ b/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch @@ -0,0 +1,147 @@ +From: Peter Zijlstra +Date: Fri, 3 Jul 2009 08:44:54 -0500 +Subject: mm: shrink the page frame to !-rt size +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +He below is a boot-tested hack to shrink the page frame size back to +normal. + +Should be a net win since there should be many less PTE-pages than +page-frames. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner + +--- + include/linux/mm.h | 46 +++++++++++++++++++++++++++++++++++++++------- + include/linux/mm_types.h | 4 ++++ + mm/memory.c | 32 ++++++++++++++++++++++++++++++++ + 3 files changed, 75 insertions(+), 7 deletions(-) + +Index: linux-stable/include/linux/mm.h +=================================================================== +--- linux-stable.orig/include/linux/mm.h ++++ linux-stable/include/linux/mm.h +@@ -1241,27 +1241,59 @@ static inline pmd_t *pmd_alloc(struct mm + * overflow into the next struct page (as it might with DEBUG_SPINLOCK). + * When freeing, reset page->mapping so free_pages_check won't complain. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #define __pte_lockptr(page) &((page)->ptl) +-#define pte_lock_init(_page) do { \ +- spin_lock_init(__pte_lockptr(_page)); \ +-} while (0) ++ ++static inline struct page *pte_lock_init(struct page *page) ++{ ++ spin_lock_init(__pte_lockptr(page)); ++ return page; ++} ++ + #define pte_lock_deinit(page) ((page)->mapping = NULL) ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the ++ * page frame, hence it only has a pointer and we need to dynamically ++ * allocate the lock when we allocate PTE-pages. ++ * ++ * This is an overall win, since only a small fraction of the pages ++ * will be PTE pages under normal circumstances. ++ */ ++ ++#define __pte_lockptr(page) ((page)->ptl) ++ ++extern struct page *pte_lock_init(struct page *page); ++extern void pte_lock_deinit(struct page *page); ++ ++#endif /* PREEMPT_RT_FULL */ ++ + #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) + #else /* !USE_SPLIT_PTLOCKS */ + /* + * We use mm->page_table_lock to guard all pagetable pages of the mm. + */ +-#define pte_lock_init(page) do {} while (0) ++static inline struct page *pte_lock_init(struct page *page) { return page; } + #define pte_lock_deinit(page) do {} while (0) + #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) + #endif /* USE_SPLIT_PTLOCKS */ + +-static inline void pgtable_page_ctor(struct page *page) ++static inline struct page *__pgtable_page_ctor(struct page *page) + { +- pte_lock_init(page); +- inc_zone_page_state(page, NR_PAGETABLE); ++ page = pte_lock_init(page); ++ if (page) ++ inc_zone_page_state(page, NR_PAGETABLE); ++ return page; + } + ++#define pgtable_page_ctor(page) \ ++do { \ ++ page = __pgtable_page_ctor(page); \ ++} while (0) ++ + static inline void pgtable_page_dtor(struct page *page) + { + pte_lock_deinit(page); +Index: linux-stable/include/linux/mm_types.h +=================================================================== +--- linux-stable.orig/include/linux/mm_types.h ++++ linux-stable/include/linux/mm_types.h +@@ -142,7 +142,11 @@ struct page { + * system if PG_buddy is set. + */ + #if USE_SPLIT_PTLOCKS ++# ifndef CONFIG_PREEMPT_RT_FULL + spinlock_t ptl; ++# else ++ spinlock_t *ptl; ++# endif + #endif + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ +Index: linux-stable/mm/memory.c +=================================================================== +--- linux-stable.orig/mm/memory.c ++++ linux-stable/mm/memory.c +@@ -4319,3 +4319,35 @@ void copy_user_huge_page(struct page *ds + } + } + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ ++ ++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0) ++/* ++ * Heinous hack, relies on the caller doing something like: ++ * ++ * pte = alloc_pages(PGALLOC_GFP, 0); ++ * if (pte) ++ * pgtable_page_ctor(pte); ++ * return pte; ++ * ++ * This ensures we release the page and return NULL when the ++ * lock allocation fails. ++ */ ++struct page *pte_lock_init(struct page *page) ++{ ++ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL); ++ if (page->ptl) { ++ spin_lock_init(__pte_lockptr(page)); ++ } else { ++ __free_page(page); ++ page = NULL; ++ } ++ return page; ++} ++ ++void pte_lock_deinit(struct page *page) ++{ ++ kfree(page->ptl); ++ page->mapping = NULL; ++} ++ ++#endif diff --git a/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch b/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch new file mode 100644 index 000000000..0ff505b9c --- /dev/null +++ b/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch @@ -0,0 +1,230 @@ +From: Peter Zijlstra +Date: Fri, 3 Jul 2009 08:44:43 -0500 +Subject: mm: More lock breaks in slab.c +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Handle __free_pages outside of the locked regions. This reduces the +lock contention on the percpu slab locks in -rt significantly. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner + +--- + mm/slab.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++++------------- + 1 file changed, 64 insertions(+), 16 deletions(-) + +Index: linux-stable/mm/slab.c +=================================================================== +--- linux-stable.orig/mm/slab.c ++++ linux-stable/mm/slab.c +@@ -634,6 +634,7 @@ static void slab_set_debugobj_lock_class + #endif + + static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); ++static DEFINE_PER_CPU(struct list_head, slab_free_list); + static DEFINE_LOCAL_IRQ_LOCK(slab_lock); + + #ifndef CONFIG_PREEMPT_RT_BASE +@@ -666,6 +667,34 @@ static void unlock_slab_on(unsigned int + } + #endif + ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_pages(page, page->index); ++ } ++} ++ ++static void unlock_l3_and_free_delayed(spinlock_t *list_lock) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_spin_unlock_irq(slab_lock, list_lock); ++ free_delayed(&tmp); ++} ++ ++static void unlock_slab_and_free_delayed(unsigned long flags) ++{ ++ LIST_HEAD(tmp); ++ ++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp); ++ local_unlock_irqrestore(slab_lock, flags); ++ free_delayed(&tmp); ++} ++ + static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) + { + return cachep->array[smp_processor_id()]; +@@ -1242,6 +1271,7 @@ static void __cpuinit cpuup_canceled(lon + + if (!cpumask_empty(mask)) { + local_spin_unlock_irq(slab_lock, &n->list_lock); ++ unlock_l3_and_free_delayed(&n->list_lock); + goto free_array_cache; + } + +@@ -1256,6 +1286,7 @@ static void __cpuinit cpuup_canceled(lon + n->alien = NULL; + + local_spin_unlock_irq(slab_lock, &n->list_lock); ++ unlock_l3_and_free_delayed(&n->list_lock); + + kfree(shared); + if (alien) { +@@ -1546,6 +1577,8 @@ void __init kmem_cache_init(void) + use_alien_caches = 0; + + local_irq_lock_init(slab_lock); ++ for_each_possible_cpu(i) ++ INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); + + for (i = 0; i < NUM_INIT_LISTS; i++) + kmem_cache_node_init(&init_kmem_cache_node[i]); +@@ -1824,12 +1857,14 @@ static void *kmem_getpages(struct kmem_c + /* + * Interface to system's page release. + */ +-static void kmem_freepages(struct kmem_cache *cachep, void *addr) ++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed) + { + unsigned long i = (1 << cachep->gfporder); +- struct page *page = virt_to_page(addr); ++ struct page *page, *basepage = virt_to_page(addr); + const unsigned long nr_freed = i; + ++ page = basepage; ++ + kmemcheck_free_shadow(page, cachep->gfporder); + + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) +@@ -1848,7 +1883,12 @@ static void kmem_freepages(struct kmem_c + memcg_release_pages(cachep, cachep->gfporder); + if (current->reclaim_state) + current->reclaim_state->reclaimed_slab += nr_freed; +- free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); ++ if (!delayed) { ++ free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); ++ } else { ++ basepage->index = cachep->gfporder; ++ list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -1856,7 +1896,7 @@ static void kmem_rcu_free(struct rcu_hea + struct slab_rcu *slab_rcu = (struct slab_rcu *)head; + struct kmem_cache *cachep = slab_rcu->cachep; + +- kmem_freepages(cachep, slab_rcu->addr); ++ kmem_freepages(cachep, slab_rcu->addr, false); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slab_rcu); + } +@@ -2073,7 +2113,8 @@ static void slab_destroy_debugcheck(stru + * Before calling the slab must have been unlinked from the cache. The + * cache-lock is not held/needed. + */ +-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) ++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp, ++ bool delayed) + { + void *addr = slabp->s_mem - slabp->colouroff; + +@@ -2086,7 +2127,7 @@ static void slab_destroy(struct kmem_cac + slab_rcu->addr = addr; + call_rcu(&slab_rcu->head, kmem_rcu_free); + } else { +- kmem_freepages(cachep, addr); ++ kmem_freepages(cachep, addr, delayed); + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->slabp_cache, slabp); + } +@@ -2497,9 +2538,15 @@ static void do_drain(void *arg) + __do_drain(arg, smp_processor_id()); + } + #else +-static void do_drain(void *arg, int this_cpu) ++static void do_drain(void *arg, int cpu) + { +- __do_drain(arg, this_cpu); ++ LIST_HEAD(tmp); ++ ++ lock_slab_on(cpu); ++ __do_drain(arg, cpu); ++ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp); ++ unlock_slab_on(cpu); ++ free_delayed(&tmp); + } + #endif + +@@ -2557,7 +2604,7 @@ static int drain_freelist(struct kmem_ca + */ + n->free_objects -= cache->num; + local_spin_unlock_irq(slab_lock, &n->list_lock); +- slab_destroy(cache, slabp); ++ slab_destroy(cache, slabp, false); + nr_freed++; + } + out: +@@ -2872,7 +2919,7 @@ static int cache_grow(struct kmem_cache + spin_unlock(&n->list_lock); + return 1; + opps1: +- kmem_freepages(cachep, objp); ++ kmem_freepages(cachep, objp, false); + failed: + if (local_flags & __GFP_WAIT) + local_lock_irq(slab_lock); +@@ -3554,7 +3601,7 @@ static void free_block(struct kmem_cache + * a different cache, refer to comments before + * alloc_slabmgmt. + */ +- slab_destroy(cachep, slabp); ++ slab_destroy(cachep, slabp, true); + } else { + list_add(&slabp->list, &n->slabs_free); + } +@@ -3822,7 +3869,7 @@ void kmem_cache_free(struct kmem_cache * + debug_check_no_obj_freed(objp, cachep->object_size); + local_lock_irqsave(slab_lock, flags); + __cache_free(cachep, objp, _RET_IP_); +- local_unlock_irqrestore(slab_lock, flags); ++ unlock_slab_and_free_delayed(flags); + + trace_kmem_cache_free(_RET_IP_, objp); + } +@@ -3853,7 +3900,7 @@ void kfree(const void *objp) + debug_check_no_obj_freed(objp, c->object_size); + local_lock_irqsave(slab_lock, flags); + __cache_free(c, (void *)objp, _RET_IP_); +- local_unlock_irqrestore(slab_lock, flags); ++ unlock_slab_and_free_delayed(flags); + } + EXPORT_SYMBOL(kfree); + +@@ -3903,7 +3950,8 @@ static int alloc_kmemlist(struct kmem_ca + } + n->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- local_spin_unlock_irq(slab_lock, &n->list_lock); ++ unlock_l3_and_free_delayed(&n->list_lock); ++ + kfree(shared); + free_alien_cache(new_alien); + continue; +@@ -4011,8 +4059,8 @@ static int __do_tune_cpucache(struct kme + local_spin_lock_irq(slab_lock, + &cachep->node[cpu_to_mem(i)]->list_lock); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); +- local_spin_unlock_irq(slab_lock, +-x &cachep->node[cpu_to_mem(i)]->list_lock); ++ ++ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock); + kfree(ccold); + } + kfree(new); diff --git a/debian/patches/features/all/rt/mm-slab-move-debug-out.patch b/debian/patches/features/all/rt/mm-slab-move-debug-out.patch new file mode 100644 index 000000000..15b2aa5ba --- /dev/null +++ b/debian/patches/features/all/rt/mm-slab-move-debug-out.patch @@ -0,0 +1,40 @@ +Subject: mm-slab-move-debug-out.patch +From: Thomas Gleixner +Date: Mon, 20 Jun 2011 10:42:04 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/slab.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/mm/slab.c +=================================================================== +--- linux-stable.orig/mm/slab.c ++++ linux-stable/mm/slab.c +@@ -3771,10 +3771,10 @@ void kmem_cache_free(struct kmem_cache * + if (!cachep) + return; + +- local_irq_save(flags); + debug_check_no_locks_freed(objp, cachep->object_size); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, cachep->object_size); ++ local_irq_save(flags); + __cache_free(cachep, objp, _RET_IP_); + local_irq_restore(flags); + +@@ -3800,12 +3800,12 @@ void kfree(const void *objp) + + if (unlikely(ZERO_OR_NULL_PTR(objp))) + return; +- local_irq_save(flags); + kfree_debugcheck(objp); + c = virt_to_cache(objp); + debug_check_no_locks_freed(objp, c->object_size); + + debug_check_no_obj_freed(objp, c->object_size); ++ local_irq_save(flags); + __cache_free(c, (void *)objp, _RET_IP_); + local_irq_restore(flags); + } diff --git a/debian/patches/features/all/rt/mm-slab-wrap-functions.patch b/debian/patches/features/all/rt/mm-slab-wrap-functions.patch new file mode 100644 index 000000000..484da365f --- /dev/null +++ b/debian/patches/features/all/rt/mm-slab-wrap-functions.patch @@ -0,0 +1,449 @@ +Subject: mm-slab-wrap-functions.patch +From: Thomas Gleixner +Date: Sat, 18 Jun 2011 19:44:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/slab.c | 157 ++++++++++++++++++++++++++++++++++++++++++-------------------- + 1 file changed, 108 insertions(+), 49 deletions(-) + +Index: linux-stable/mm/slab.c +=================================================================== +--- linux-stable.orig/mm/slab.c ++++ linux-stable/mm/slab.c +@@ -116,6 +116,7 @@ + #include + #include + #include ++#include + + #include + +@@ -633,6 +634,37 @@ static void slab_set_debugobj_lock_class + #endif + + static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); ++static DEFINE_LOCAL_IRQ_LOCK(slab_lock); ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1) ++#else ++/* ++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have ++ * to run on the remote CPUs - we only have to take their CPU-locks. ++ * (This is a rare operation, so cacheline bouncing is not an issue.) ++ */ ++static void ++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg) ++{ ++ unsigned int i; ++ ++ get_cpu_light(); ++ for_each_online_cpu(i) ++ func(arg, i); ++ put_cpu_light(); ++} ++ ++static void lock_slab_on(unsigned int cpu) ++{ ++ local_lock_irq_on(slab_lock, cpu); ++} ++ ++static void unlock_slab_on(unsigned int cpu) ++{ ++ local_unlock_irq_on(slab_lock, cpu); ++} ++#endif + + static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) + { +@@ -1073,9 +1105,10 @@ static void reap_alien(struct kmem_cache + if (n->alien) { + struct array_cache *ac = n->alien[node]; + +- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { ++ if (ac && ac->avail && ++ local_spin_trylock_irq(slab_lock, &ac->lock)) { + __drain_alien_cache(cachep, ac, node); +- spin_unlock_irq(&ac->lock); ++ local_spin_unlock_irq(slab_lock, &ac->lock); + } + } + } +@@ -1090,9 +1123,9 @@ static void drain_alien_cache(struct kme + for_each_online_node(i) { + ac = alien[i]; + if (ac) { +- spin_lock_irqsave(&ac->lock, flags); ++ local_spin_lock_irqsave(slab_lock, &ac->lock, flags); + __drain_alien_cache(cachep, ac, i); +- spin_unlock_irqrestore(&ac->lock, flags); ++ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags); + } + } + } +@@ -1171,11 +1204,11 @@ static int init_cache_node_node(int node + cachep->node[node] = n; + } + +- spin_lock_irq(&cachep->node[node]->list_lock); ++ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + cachep->node[node]->free_limit = + (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&cachep->node[node]->list_lock); ++ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock); + } + return 0; + } +@@ -1200,7 +1233,7 @@ static void __cpuinit cpuup_canceled(lon + if (!n) + goto free_array_cache; + +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + + /* Free limit for this kmem_cache_node */ + n->free_limit -= cachep->batchcount; +@@ -1208,7 +1241,7 @@ static void __cpuinit cpuup_canceled(lon + free_block(cachep, nc->entry, nc->avail, node); + + if (!cpumask_empty(mask)) { +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + goto free_array_cache; + } + +@@ -1222,7 +1255,7 @@ static void __cpuinit cpuup_canceled(lon + alien = n->alien; + n->alien = NULL; + +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + + kfree(shared); + if (alien) { +@@ -1296,7 +1329,7 @@ static int __cpuinit cpuup_prepare(long + n = cachep->node[node]; + BUG_ON(!n); + +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + if (!n->shared) { + /* + * We are serialised from CPU_DEAD or +@@ -1311,7 +1344,7 @@ static int __cpuinit cpuup_prepare(long + alien = NULL; + } + #endif +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + kfree(shared); + free_alien_cache(alien); + if (cachep->flags & SLAB_DEBUG_OBJECTS) +@@ -1512,6 +1545,8 @@ void __init kmem_cache_init(void) + if (num_possible_nodes() == 1) + use_alien_caches = 0; + ++ local_irq_lock_init(slab_lock); ++ + for (i = 0; i < NUM_INIT_LISTS; i++) + kmem_cache_node_init(&init_kmem_cache_node[i]); + +@@ -2408,7 +2443,7 @@ __kmem_cache_create (struct kmem_cache * + #if DEBUG + static void check_irq_off(void) + { +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + } + + static void check_irq_on(void) +@@ -2443,26 +2478,37 @@ static void drain_array(struct kmem_cach + struct array_cache *ac, + int force, int node); + +-static void do_drain(void *arg) ++static void __do_drain(void *arg, unsigned int cpu)+static void __do_drain(void *arg, unsigned int cpu) + { + struct kmem_cache *cachep = arg; + struct array_cache *ac; +- int node = numa_mem_id(); ++ int node = cpu_to_mem(cpu); + +- check_irq_off(); +- ac = cpu_cache_get(cachep); ++ ac = cpu_cache_get_on_cpu(cachep, cpu); + spin_lock(&cachep->node[node]->list_lock); + free_block(cachep, ac->entry, ac->avail, node); + spin_unlock(&cachep->node[node]->list_lock); + ac->avail = 0; + } + ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_drain(void *arg) ++{ ++ __do_drain(arg, smp_processor_id()); ++} ++#else ++static void do_drain(void *arg, int this_cpu) ++{ ++ __do_drain(arg, this_cpu); ++} ++#endif ++ + static void drain_cpu_caches(struct kmem_cache *cachep) + { + struct kmem_cache_node *n; + int node; + +- on_each_cpu(do_drain, cachep, 1); ++ slab_on_each_cpu(do_drain, cachep); + check_irq_on(); + for_each_online_node(node) { + n = cachep->node[node]; +@@ -2493,10 +2539,10 @@ static int drain_freelist(struct kmem_ca + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&n->slabs_free)) { + +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + p = n->slabs_free.prev; + if (p == &n->slabs_free) { +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + goto out; + } + +@@ -2510,7 +2556,7 @@ static int drain_freelist(struct kmem_ca + * to the cache. + */ + n->free_objects -= cache->num; +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + slab_destroy(cache, slabp); + nr_freed++; + } +@@ -2785,7 +2831,7 @@ static int cache_grow(struct kmem_cache + offset *= cachep->colour_off; + + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + + /* + * The test for missing atomic flag is performed here, rather than +@@ -2815,7 +2861,7 @@ static int cache_grow(struct kmem_cache + cache_init_objs(cachep, slabp); + + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + check_irq_off(); + spin_lock(&n->list_lock); + +@@ -2829,7 +2875,7 @@ opps1: + kmem_freepages(cachep, objp); + failed: + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + return 0; + } + +@@ -3243,11 +3289,11 @@ retry: + * set and go into memory reserves if necessary. + */ + if (local_flags & __GFP_WAIT) +- local_irq_enable(); ++ local_unlock_irq(slab_lock); + kmem_flagcheck(cache, flags); + obj = kmem_getpages(cache, local_flags, numa_mem_id()); + if (local_flags & __GFP_WAIT) +- local_irq_disable(); ++ local_lock_irq(slab_lock); + if (obj) { + /* + * Insert into the appropriate per node queues +@@ -3368,7 +3414,7 @@ slab_alloc_node(struct kmem_cache *cache + cachep = memcg_kmem_get_cache(cachep, flags); + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + + if (nodeid == NUMA_NO_NODE) + nodeid = slab_node; +@@ -3393,7 +3439,7 @@ slab_alloc_node(struct kmem_cache *cache + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + out: +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, + flags); +@@ -3455,9 +3501,9 @@ slab_alloc(struct kmem_cache *cachep, gf + cachep = memcg_kmem_get_cache(cachep, flags); + + cache_alloc_debugcheck_before(cachep, flags); +- local_irq_save(save_flags); ++ local_lock_irqsave(slab_lock, save_flags); + objp = __do_cache_alloc(cachep, flags); +- local_irq_restore(save_flags); ++ local_unlock_irqrestore(slab_lock, save_flags); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, + flags); +@@ -3774,9 +3820,9 @@ void kmem_cache_free(struct kmem_cache * + debug_check_no_locks_freed(objp, cachep->object_size); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, cachep->object_size); +- local_irq_save(flags); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(cachep, objp, _RET_IP_); +- local_irq_restore(flags); ++ local_unlock_irqrestore(slab_lock, flags); + + trace_kmem_cache_free(_RET_IP_, objp); + } +@@ -3805,9 +3851,9 @@ void kfree(const void *objp) + debug_check_no_locks_freed(objp, c->object_size); + + debug_check_no_obj_freed(objp, c->object_size); +- local_irq_save(flags); ++ local_lock_irqsave(slab_lock, flags); + __cache_free(c, (void *)objp, _RET_IP_); +- local_irq_restore(flags); ++ local_unlock_irqrestore(slab_lock, flags); + } + EXPORT_SYMBOL(kfree); + +@@ -3844,7 +3890,7 @@ static int alloc_kmemlist(struct kmem_ca + if (n) { + struct array_cache *shared = n->shared; + +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + + if (shared) + free_block(cachep, shared->entry, +@@ -3857,7 +3903,7 @@ static int alloc_kmemlist(struct kmem_ca + } + n->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + kfree(shared); + free_alien_cache(new_alien); + continue; +@@ -3904,17 +3950,28 @@ struct ccupdate_struct { + struct array_cache *new[0]; + }; + +-static void do_ccupdate_local(void *info) ++static void __do_ccupdate_local(void *info, int cpu) + { + struct ccupdate_struct *new = info; + struct array_cache *old; + +- check_irq_off(); +- old = cpu_cache_get(new->cachep); ++ old = cpu_cache_get_on_cpu(new->cachep, cpu); ++ ++ new->cachep->array[cpu] = new->new[cpu]; ++ new->new[cpu] = old; ++} + +- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; +- new->new[smp_processor_id()] = old; ++#ifndef CONFIG_PREEMPT_RT_BASE ++static void do_ccupdate_local(void *info) ++{ ++ __do_ccupdate_local(info, smp_processor_id()); + } ++#else ++static void do_ccupdate_local(void *info, int cpu) ++{ ++ __do_ccupdate_local(info, cpu); ++} ++#endif + + /* Always called with the slab_mutex held */ + static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, +@@ -3940,7 +3997,7 @@ static int __do_tune_cpucache(struct kme + } + new->cachep = cachep; + +- on_each_cpu(do_ccupdate_local, (void *)new, 1); ++ slab_on_each_cpu(do_ccupdate_local, (void *)new); + + check_irq_on(); + cachep->batchcount = batchcount; +@@ -3951,9 +4008,11 @@ static int __do_tune_cpucache(struct kme + struct array_cache *ccold = new->new[i]; + if (!ccold) + continue; +- spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); ++ local_spin_lock_irq(slab_lock, ++ &cachep->node[cpu_to_mem(i)]->list_lock); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); +- spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); ++ local_spin_unlock_irq(slab_lock, ++x &cachep->node[cpu_to_mem(i)]->list_lock); + kfree(ccold); + } + kfree(new); +@@ -4068,7 +4127,7 @@ static void drain_array(struct kmem_cach + if (ac->touched && !force) { + ac->touched = 0; + } else { +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) +@@ -4078,7 +4137,7 @@ static void drain_array(struct kmem_cach + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); + } +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + } + } + +@@ -4171,7 +4230,7 @@ void get_slabinfo(struct kmem_cache *cac + continue; + + check_irq_on(); +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + + list_for_each_entry(slabp, &n->slabs_full, list) { + if (slabp->inuse != cachep->num && !error) +@@ -4196,7 +4255,7 @@ void get_slabinfo(struct kmem_cache *cac + if (n->shared) + shared_avail += n->shared->avail; + +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + } + num_slabs += active_slabs; + num_objs = num_slabs * cachep->num; +@@ -4396,13 +4455,13 @@ static int leaks_show(struct seq_file *m + continue; + + check_irq_on(); +- spin_lock_irq(&n->list_lock); ++ local_spin_lock_irq(slab_lock, &n->list_lock); + + list_for_each_entry(slabp, &n->slabs_full, list) + handle_slab(x, cachep, slabp); + list_for_each_entry(slabp, &n->slabs_partial, list) + handle_slab(x, cachep, slabp); +- spin_unlock_irq(&n->list_lock); ++ local_spin_unlock_irq(slab_lock, &n->list_lock); + } + name = cachep->name; + if (x[0] == x[1]) { diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch new file mode 100644 index 000000000..7f54811cf --- /dev/null +++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch @@ -0,0 +1,67 @@ +Subject: mm-vmalloc.patch +From: Thomas Gleixner +Date: Tue, 12 Jul 2011 11:39:36 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + mm/vmalloc.c | 14 ++++++++------ + 1 file changed, 8 insertions(+), 6 deletions(-) + +Index: linux-stable/mm/vmalloc.c +=================================================================== +--- linux-stable.orig/mm/vmalloc.c ++++ linux-stable/mm/vmalloc.c +@@ -792,7 +792,7 @@ static struct vmap_block *new_vmap_block + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + + node = numa_node_id(); + +@@ -831,12 +831,13 @@ static struct vmap_block *new_vmap_block + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + vb->vbq = vbq; + spin_lock(&vbq->lock); + list_add_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vb; + } +@@ -910,7 +911,7 @@ static void *vb_alloc(unsigned long size + struct vmap_block *vb; + unsigned long addr = 0; + unsigned int order; +- int purge = 0; ++ int purge = 0, cpu; + + BUG_ON(size & ~PAGE_MASK); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -926,7 +927,8 @@ static void *vb_alloc(unsigned long size + + again: + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = &__get_cpu_var(vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + int i; + +@@ -963,7 +965,7 @@ next: + if (purge) + purge_fragmented_blocks_thiscpu(); + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + if (!addr) { diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch new file mode 100644 index 000000000..20f98e178 --- /dev/null +++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch @@ -0,0 +1,42 @@ +Subject: mmci: Remove bogus local_irq_save() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:11:12 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On !RT interrupt runs with interrupts disabled. On RT it's in a +thread, so no need to disable interrupts at all. + +Signed-off-by: Thomas Gleixner +--- + drivers/mmc/host/mmci.c | 5 ----- + 1 file changed, 5 deletions(-) + +Index: linux-stable/drivers/mmc/host/mmci.c +=================================================================== +--- linux-stable.orig/drivers/mmc/host/mmci.c ++++ linux-stable/drivers/mmc/host/mmci.c +@@ -976,15 +976,12 @@ static irqreturn_t mmci_pio_irq(int irq, + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -1024,8 +1021,6 @@ static irqreturn_t mmci_pio_irq(int irq, + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. diff --git a/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch new file mode 100644 index 000000000..817544e27 --- /dev/null +++ b/debian/patches/features/all/rt/move_sched_delayed_work_to_helper.patch @@ -0,0 +1,97 @@ +Date: Wed, 26 Jun 2013 15:28:11 -0400 +From: Steven Rostedt +To: linux-kernel@vger.kernel.org, + linux-rt-users +Cc: Thomas Gleixner , + Carsten Emde , + Sebastian Andrzej Siewior , + Clark Williams , + "Paul E. McKenney" +Subject: [RFC][PATCH RT 5/6] rt,ntp: Move call to schedule_delayed_work() to helper thread +References: <20130626192806.107564905@goodmis.org> +Content-Disposition: inline; filename=ntp-sched-delay-thread.patch +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The ntp code for notify_cmos_timer() is called from a hard interrupt +context. schedule_delayed_work() under PREEMPT_RT_FULL calls spinlocks +that have been converted to mutexes, thus calling schedule_delayed_work() +from interrupt is not safe. + +Add a helper thread that does the call to schedule_delayed_work and wake +up that thread instead of calling schedule_delayed_work() directly. +This is only for CONFIG_PREEMPT_RT_FULL, otherwise the code still calls +schedule_delayed_work() directly in irq context. + +Note: There's a few places in the kernel that do this. Perhaps the RT +code should have a dedicated thread that does the checks. Just register +a notifier on boot up for your check and wake up the thread when +needed. This will be a todo. + +Signed-off-by: Steven Rostedt + +--- + kernel/time/ntp.c | 40 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +Index: linux-stable/kernel/time/ntp.c +=================================================================== +--- linux-stable.orig/kernel/time/ntp.c ++++ linux-stable/kernel/time/ntp.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -516,10 +517,49 @@ static void sync_cmos_clock(struct work_ + schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT can not call schedule_delayed_work from real interrupt context. ++ * Need to make a thread to do the real work. ++ */ ++static struct task_struct *cmos_delay_thread; ++static bool do_cmos_delay; ++ ++static int run_cmos_delay(void *ignore) ++{ ++ while (!kthread_should_stop()) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (do_cmos_delay) { ++ do_cmos_delay = false; ++ schedule_delayed_work(&sync_cmos_work, 0); ++ } ++ schedule(); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++static void notify_cmos_timer(void) ++{ ++ do_cmos_delay = true; ++ /* Make visible before waking up process */ ++ smp_wmb(); ++ wake_up_process(cmos_delay_thread); ++} ++ ++static __init int create_cmos_delay_thread(void) ++{ ++ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); ++ BUG_ON(!cmos_delay_thread); ++ return 0; ++} ++early_initcall(create_cmos_delay_thread); ++#else + static void notify_cmos_timer(void) + { + schedule_delayed_work(&sync_cmos_work, 0); + } ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + #else + static inline void notify_cmos_timer(void) { } diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch new file mode 100644 index 000000000..cff446561 --- /dev/null +++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch @@ -0,0 +1,20 @@ +Subject: mutex-no-spin-on-rt.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:51:45 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/Kconfig.locks | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/Kconfig.locks +=================================================================== +--- linux-stable.orig/kernel/Kconfig.locks ++++ linux-stable/kernel/Kconfig.locks +@@ -222,4 +222,4 @@ endif + + config MUTEX_SPIN_ON_OWNER + def_bool y +- depends on SMP && !DEBUG_MUTEXES ++ depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch new file mode 100644 index 000000000..5e723790a --- /dev/null +++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch @@ -0,0 +1,50 @@ +Subject: net: Another local_irq_disable/kmalloc headache +From: Thomas Gleixner +Date: Wed, 26 Sep 2012 16:21:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Replace it by a local lock. Though that's pretty inefficient :( + +Signed-off-by: Thomas Gleixner +--- + net/core/skbuff.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +Index: linux-stable/net/core/skbuff.c +=================================================================== +--- linux-stable.orig/net/core/skbuff.c ++++ linux-stable/net/core/skbuff.c +@@ -60,6 +60,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -367,6 +368,7 @@ struct netdev_alloc_cache { + unsigned int pagecnt_bias; + }; + static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); + + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -375,7 +377,7 @@ static void *__netdev_alloc_frag(unsigne + int order; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = &__get_cpu_var(netdev_alloc_cache); + if (unlikely(!nc->frag.page)) { + refill: +@@ -409,7 +411,7 @@ recycle: + nc->frag.offset += fragsz; + nc->pagecnt_bias--; + end: +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; + } + diff --git a/debian/patches/features/all/rt/net-dev-make-devnet-rename-seq-static.patch b/debian/patches/features/all/rt/net-dev-make-devnet-rename-seq-static.patch new file mode 100644 index 000000000..a2cef32e8 --- /dev/null +++ b/debian/patches/features/all/rt/net-dev-make-devnet-rename-seq-static.patch @@ -0,0 +1,38 @@ +Subject: net-dev-make-devnet-rename-seq-static.patch +From: Thomas Gleixner +Date: Tue, 23 Jul 2013 12:55:50 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/netdevice.h | 3 --- + net/core/dev.c | 2 +- + 2 files changed, 1 insertion(+), 4 deletions(-) + +Index: linux-stable/include/linux/netdevice.h +=================================================================== +--- linux-stable.orig/include/linux/netdevice.h ++++ linux-stable/include/linux/netdevice.h +@@ -1601,9 +1601,6 @@ extern int call_netdevice_notifiers(unsi + + extern rwlock_t dev_base_lock; /* Device list lock */ + +-extern seqcount_t devnet_rename_seq; /* Device rename seq */ +- +- + #define for_each_netdev(net, d) \ + list_for_each_entry(d, &(net)->dev_base_head, dev_list) + #define for_each_netdev_reverse(net, d) \ +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -166,7 +166,7 @@ static struct list_head offload_base __r + DEFINE_RWLOCK(dev_base_lock); + EXPORT_SYMBOL(dev_base_lock); + +-seqcount_t devnet_rename_seq; ++static seqcount_t devnet_rename_seq; + + static inline void dev_base_seq_inc(struct net *net) + { diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch new file mode 100644 index 000000000..e949457db --- /dev/null +++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch @@ -0,0 +1,78 @@ +Subject: net: netfilter: Serialize xt_write_recseq sections on RT +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 11:18:08 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The netfilter code relies only on the implicit semantics of +local_bh_disable() for serializing wt_write_recseq sections. RT breaks +that and needs explicit serialization here. + +Reported-by: Peter LaDow +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/netfilter/x_tables.h | 7 +++++++ + net/netfilter/core.c | 6 ++++++ + 2 files changed, 13 insertions(+) + +Index: linux-stable/include/linux/netfilter/x_tables.h +=================================================================== +--- linux-stable.orig/include/linux/netfilter/x_tables.h ++++ linux-stable/include/linux/netfilter/x_tables.h +@@ -3,6 +3,7 @@ + + + #include ++#include + #include + + /** +@@ -284,6 +285,8 @@ extern void xt_free_table_info(struct xt + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++ + /** + * xt_write_recseq_begin - start of a write section + * +@@ -298,6 +301,9 @@ static inline unsigned int xt_write_recs + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); ++ + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -328,6 +334,7 @@ static inline void xt_write_recseq_end(u + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +Index: linux-stable/net/netfilter/core.c +=================================================================== +--- linux-stable.orig/net/netfilter/core.c ++++ linux-stable/net/netfilter/core.c +@@ -21,11 +21,17 @@ + #include + #include + #include ++#include + #include + #include + + #include "nf_internals.h" + ++#ifdef CONFIG_PREEMPT_RT_BASE ++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); ++EXPORT_PER_CPU_SYMBOL(xt_write_lock); ++#endif ++ + static DEFINE_MUTEX(afinfo_mutex); + + const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; diff --git a/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch new file mode 100644 index 000000000..2bcb33930 --- /dev/null +++ b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch @@ -0,0 +1,114 @@ +Subject: net-flip-lock-dep-thingy.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 10:59:58 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +======================================================= +[ INFO: possible circular locking dependency detected ] +3.0.0-rc3+ #26 +------------------------------------------------------- +ip/1104 is trying to acquire lock: + (local_softirq_lock){+.+...}, at: [] __local_lock+0x25/0x68 + +but task is already holding lock: + (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 + +which lock already depends on the new lock. + + +the existing dependency chain (in reverse order) is: + +-> #1 (sk_lock-AF_INET){+.+...}: + [] lock_acquire+0x103/0x12e + [] lock_sock_nested+0x82/0x92 + [] lock_sock+0x10/0x12 + [] tcp_close+0x1b/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + +-> #0 (local_softirq_lock){+.+...}: + [] __lock_acquire+0xacc/0xdc8 + [] lock_acquire+0x103/0x12e + [] _raw_spin_lock+0x3b/0x4a + [] __local_lock+0x25/0x68 + [] local_bh_disable+0x36/0x3b + [] _raw_write_lock_bh+0x16/0x4f + [] tcp_close+0x159/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + +other info that might help us debug this: + + Possible unsafe locking scenario: + + CPU0 CPU1 + ---- ---- + lock(sk_lock-AF_INET); + lock(local_softirq_lock); + lock(sk_lock-AF_INET); + lock(local_softirq_lock); + + *** DEADLOCK *** + +1 lock held by ip/1104: + #0: (sk_lock-AF_INET){+.+...}, at: [] lock_sock+0x10/0x12 + +stack backtrace: +Pid: 1104, comm: ip Not tainted 3.0.0-rc3+ #26 +Call Trace: + [] print_circular_bug+0x1f8/0x209 + [] __lock_acquire+0xacc/0xdc8 + [] ? __local_lock+0x25/0x68 + [] lock_acquire+0x103/0x12e + [] ? __local_lock+0x25/0x68 + [] ? get_parent_ip+0x11/0x41 + [] _raw_spin_lock+0x3b/0x4a + [] ? __local_lock+0x25/0x68 + [] ? get_parent_ip+0x28/0x41 + [] __local_lock+0x25/0x68 + [] local_bh_disable+0x36/0x3b + [] ? lock_sock+0x10/0x12 + [] _raw_write_lock_bh+0x16/0x4f + [] tcp_close+0x159/0x355 + [] inet_release+0xc3/0xcd + [] sock_release+0x1f/0x74 + [] sock_close+0x27/0x2b + [] fput+0x11d/0x1e3 + [] filp_close+0x70/0x7b + [] sys_close+0xf8/0x13d + [] system_call_fastpath+0x16/0x1b + + +Signed-off-by: Thomas Gleixner +--- + net/core/sock.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +Index: linux-stable/net/core/sock.c +=================================================================== +--- linux-stable.orig/net/core/sock.c ++++ linux-stable/net/core/sock.c +@@ -2288,12 +2288,11 @@ void lock_sock_nested(struct sock *sk, i + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch new file mode 100644 index 000000000..8d4da0f66 --- /dev/null +++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch @@ -0,0 +1,107 @@ +From: Sebastian Andrzej Siewior +Date: Wed, 20 Mar 2013 18:06:20 +0100 +Subject: [PATCH] net: Add a mutex around devnet_rename_seq +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On RT write_seqcount_begin() disables preemption and device_rename() +allocates memory with GFP_KERNEL and grabs later the sysfs_mutex +mutex. Serialize with a mutex and add use the non preemption disabling +__write_seqcount_begin(). + +To avoid writer starvation, let the reader grab the mutex and release +it when it detects a writer in progress. This keeps the normal case +(no reader on the fly) fast. + +[ tglx: Instead of replacing the seqcount by a mutex, add the mutex ] + +Signed-off-by: Sebastian Andrzej Siewior +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 34 ++++++++++++++++++++-------------- + 1 file changed, 20 insertions(+), 14 deletions(-) + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -167,6 +167,7 @@ DEFINE_RWLOCK(dev_base_lock); + EXPORT_SYMBOL(dev_base_lock); + + static seqcount_t devnet_rename_seq; ++static DEFINE_MUTEX(devnet_rename_mutex); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -818,7 +819,8 @@ retry: + strcpy(name, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); ++ mutex_lock(&devnet_rename_mutex); ++ mutex_unlock(&devnet_rename_mutex); + goto retry; + } + +@@ -1084,30 +1086,28 @@ int dev_change_name(struct net_device *d + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __write_seqcount_begin(&devnet_rename_seq); + +- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); +- return 0; +- } ++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) ++ goto outunlock; + + memcpy(oldname, dev->name, IFNAMSIZ); + + err = dev_get_valid_name(net, dev, newname); +- if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); +- return err; +- } ++ if (err < 0) ++ goto outunlock; + + rollback: + ret = device_rename(&dev->dev, dev->name); + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); +- write_seqcount_end(&devnet_rename_seq); +- return ret; ++ err = ret; ++ goto outunlock; + } + +- write_seqcount_end(&devnet_rename_seq); ++ __write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + + write_lock_bh(&dev_base_lock); + hlist_del_rcu(&dev->name_hlist); +@@ -1126,7 +1126,8 @@ rollback: + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __write_seqcount_begin(&devnet_rename_seq); + memcpy(dev->name, oldname, IFNAMSIZ); + goto rollback; + } else { +@@ -1136,6 +1137,11 @@ rollback: + } + + return err; ++ ++outunlock: ++ __write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); ++ return err; + } + + /** diff --git a/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch new file mode 100644 index 000000000..88477cb20 --- /dev/null +++ b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch @@ -0,0 +1,34 @@ +Subject: net: Use local_bh_disable in netif_rx_ni() +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 15:12:49 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This code triggers the new WARN in __raise_softirq_irqsoff() though it +actually looks at the softirq pending bit and calls into the softirq +code, but that fits not well with the context related softirq model of +RT. It's correct on mainline though, but going through +local_bh_disable/enable here is not going to hurt badly. + +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -3204,11 +3204,9 @@ int netif_rx_ni(struct sk_buff *skb) + { + int err; + +- migrate_disable(); ++ local_bh_disable(); + err = netif_rx(skb); +- if (local_softirq_pending()) +- thread_do_softirq(); +- migrate_enable(); ++ local_bh_enable(); + + return err; + } diff --git a/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch new file mode 100644 index 000000000..b6d8b7232 --- /dev/null +++ b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch @@ -0,0 +1,28 @@ +Subject: net-netif_rx_ni-migrate-disable.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 16:29:27 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/core/dev.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -3201,11 +3201,11 @@ int netif_rx_ni(struct sk_buff *skb) + { + int err; + +- preempt_disable(); ++ migrate_disable(); + err = netif_rx(skb); + if (local_softirq_pending()) + thread_do_softirq(); +- preempt_enable(); ++ migrate_enable(); + + return err; + } diff --git a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch new file mode 100644 index 000000000..b4fd6fd54 --- /dev/null +++ b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch @@ -0,0 +1,95 @@ +Subject: net: Avoid livelock in net_tx_action() on RT +From: Steven Rostedt +Date: Thu, 06 Oct 2011 10:48:39 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code +holding a qdisc_lock() can be interrupted and softirqs can run on the +return of interrupt in !RT. + +The spin_trylock() in net_tx_action() makes sure, that the softirq +does not deadlock. When the lock can't be acquired q is requeued and +the NET_TX softirq is raised. That causes the softirq to run over and +over. + +That works in mainline as do_softirq() has a retry loop limit and +leaves the softirq processing in the interrupt return path and +schedules ksoftirqd. The task which holds qdisc_lock cannot be +preempted, so the lock is released and either ksoftirqd or the next +softirq in the return from interrupt path can proceed. Though it's a +bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it +decides to bail out even if it's clear in the first iteration :) + +On RT all softirq processing is done in a FIFO thread and we don't +have a loop limit, so ksoftirqd preempts the lock holder forever and +unqueues and requeues until the reset button is hit. + +Due to the forced threading of ksoftirqd on RT we actually cannot +deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to +replace the spin_trylock() with a spin_lock(). When contended, +ksoftirqd is scheduled out and the lock holder can proceed. + +[ tglx: Massaged changelog and code comments ] + +Solved-by: Thomas Gleixner +Signed-off-by: Steven Rostedt +Tested-by: Carsten Emde +Cc: Clark Williams +Cc: John Kacur +Cc: Luis Claudio R. Goncalves +Signed-off-by: Thomas Gleixner + +--- + net/core/dev.c | 32 +++++++++++++++++++++++++++++++- + 1 file changed, 31 insertions(+), 1 deletion(-) + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -3211,6 +3211,36 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT runs ksoftirqd as a real time thread and the root_lock is a ++ * "sleeping spinlock". If the trylock fails then we can go into an ++ * infinite loop when ksoftirqd preempted the task which actually ++ * holds the lock, because we requeue q and raise NET_TX softirq ++ * causing ksoftirqd to loop forever. ++ * ++ * It's safe to use spin_lock on RT here as softirqs run in thread ++ * context and cannot deadlock against the thread which is holding ++ * root_lock. ++ * ++ * On !RT the trylock might fail, but there we bail out from the ++ * softirq loop after 10 attempts which we can't do on RT. And the ++ * task holding root_lock cannot be preempted, so the only downside of ++ * that trylock is that we need 10 loops to decide that we should have ++ * given up in the first one :) ++ */ ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ return 1; ++} ++#else ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ return spin_trylock(lock); ++} ++#endif ++ + static void net_tx_action(struct softirq_action *h) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); +@@ -3249,7 +3279,7 @@ static void net_tx_action(struct softirq + head = head->next_sched; + + root_lock = qdisc_lock(q); +- if (spin_trylock(root_lock)) { ++ if (take_root_lock(root_lock)) { + smp_mb__before_clear_bit(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch new file mode 100644 index 000000000..16f91bc1a --- /dev/null +++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch @@ -0,0 +1,67 @@ +Subject: net: Use cpu_chill() instead of cpu_relax() +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 21:10:04 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + net/packet/af_packet.c | 5 +++-- + net/rds/ib_rdma.c | 3 ++- + 2 files changed, 5 insertions(+), 3 deletions(-) + +Index: linux-stable/net/packet/af_packet.c +=================================================================== +--- linux-stable.orig/net/packet/af_packet.c ++++ linux-stable/net/packet/af_packet.c +@@ -88,6 +88,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_INET + #include +@@ -602,7 +603,7 @@ static void prb_retire_rx_blk_timer_expi + if (BLOCK_NUM_PKTS(pbd)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + +@@ -853,7 +854,7 @@ static void prb_retire_current_block(str + if (!(status & TP_STATUS_BLK_TMO)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + prb_close_block(pkc, pbd, po, status); +Index: linux-stable/net/rds/ib_rdma.c +=================================================================== +--- linux-stable.orig/net/rds/ib_rdma.c ++++ linux-stable/net/rds/ib_rdma.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include "rds.h" + #include "ib.h" +@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace + for_each_online_cpu(cpu) { + flag = &per_cpu(clean_list_grace, cpu); + while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) +- cpu_relax(); ++ cpu_chill(); + } + } + diff --git a/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch new file mode 100644 index 000000000..5611c97fb --- /dev/null +++ b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch @@ -0,0 +1,33 @@ +Subject: net: Use get_cpu_light() in ip_send_unicast_reply() +From: Thomas Gleixner +Date: Mon, 01 Oct 2012 17:12:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/ipv4/ip_output.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +Index: linux-stable/net/ipv4/ip_output.c +=================================================================== +--- linux-stable.orig/net/ipv4/ip_output.c ++++ linux-stable/net/ipv4/ip_output.c +@@ -1508,7 +1508,8 @@ void ip_send_unicast_reply(struct net *n + if (IS_ERR(rt)) + return; + +- inet = &get_cpu_var(unicast_sock); ++ get_cpu_light(); ++ inet = &__get_cpu_var(unicast_sock); + + inet->tos = arg->tos; + sk = &inet->sk; +@@ -1532,7 +1533,7 @@ void ip_send_unicast_reply(struct net *n + ip_push_pending_frames(sk, &fl4); + } + +- put_cpu_var(unicast_sock); ++ put_cpu_light(); + + ip_rt_put(rt); + } diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch new file mode 100644 index 000000000..b372e4b03 --- /dev/null +++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch @@ -0,0 +1,23 @@ +Subject: net-wireless-warn-nort.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 21:05:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + net/mac80211/rx.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/net/mac80211/rx.c +=================================================================== +--- linux-stable.orig/net/mac80211/rx.c ++++ linux-stable/net/mac80211/rx.c +@@ -3234,7 +3234,7 @@ void ieee80211_rx(struct ieee80211_hw *h + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + goto drop; diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch new file mode 100644 index 000000000..a2e1c242f --- /dev/null +++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch @@ -0,0 +1,151 @@ +From: Oleg Nesterov +Subject: signal/x86: Delay calling signals in atomic +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On x86_64 we must disable preemption before we enable interrupts +for stack faults, int3 and debugging, because the current task is using +a per CPU debug stack defined by the IST. If we schedule out, another task +can come in and use the same stack and cause the stack to be corrupted +and crash the kernel on return. + +When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and +one of these is the spin lock used in signal handling. + +Some of the debug code (int3) causes do_trap() to send a signal. +This function calls a spin lock that has been converted to a mutex +and has the possibility to sleep. If this happens, the above issues with +the corrupted stack is possible. + +Instead of calling the signal right away, for PREEMPT_RT and x86_64, +the signal information is stored on the stacks task_struct and +TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume +code will send the signal when preemption is enabled. + +[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to + ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Oleg Nesterov +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner +--- + + arch/x86/include/asm/signal.h | 13 +++++++++++++ + arch/x86/kernel/signal.c | 8 ++++++++ + include/linux/sched.h | 4 ++++ + kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++-- + 4 files changed, 60 insertions(+), 2 deletions(-) + +Index: linux-stable/arch/x86/include/asm/signal.h +=================================================================== +--- linux-stable.orig/arch/x86/include/asm/signal.h ++++ linux-stable/arch/x86/include/asm/signal.h +@@ -23,6 +23,19 @@ typedef struct { + unsigned long sig[_NSIG_WORDS]; + } sigset_t; + ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) ++#define ARCH_RT_DELAYS_SIGNAL_SEND ++#endif ++ + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +Index: linux-stable/arch/x86/kernel/signal.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/signal.c ++++ linux-stable/arch/x86/kernel/signal.c +@@ -743,6 +743,14 @@ do_notify_resume(struct pt_regs *regs, v + mce_notify_process(); + #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } ++#endif ++ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1220,6 +1220,10 @@ struct task_struct { + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + + unsigned long sas_ss_sp; + size_t sas_ss_size; +Index: linux-stable/kernel/signal.c +=================================================================== +--- linux-stable.orig/kernel/signal.c ++++ linux-stable/kernel/signal.c +@@ -1305,8 +1305,8 @@ int do_send_sig_info(int sig, struct sig + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +-int +-force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++static int ++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + unsigned long int flags; + int ret, blocked, ignored; +@@ -1331,6 +1331,39 @@ force_sig_info(int sig, struct siginfo * + return ret; + } + ++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++{ ++/* ++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap ++ * since it can not enable preemption, and the signal code's spin_locks ++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will ++ * send the signal on exit of the trap. ++ */ ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (in_atomic()) { ++ if (WARN_ON_ONCE(t != current)) ++ return 0; ++ if (WARN_ON_ONCE(t->forced_info.si_signo)) ++ return 0; ++ ++ if (is_si_special(info)) { ++ WARN_ON_ONCE(info != SEND_SIG_PRIV); ++ t->forced_info.si_signo = sig; ++ t->forced_info.si_errno = 0; ++ t->forced_info.si_code = SI_KERNEL; ++ t->forced_info.si_pid = 0; ++ t->forced_info.si_uid = 0; ++ } else { ++ t->forced_info = *info; ++ } ++ ++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); ++ return 0; ++ } ++#endif ++ return do_force_sig_info(sig, info, t); ++} ++ + /* + * Nuke all other threads in the group. + */ diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch new file mode 100644 index 000000000..74ceebc69 --- /dev/null +++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch @@ -0,0 +1,24 @@ +From: Thomas Gleixner +Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +--- + kernel/panic.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/kernel/panic.c +=================================================================== +--- linux-stable.orig/kernel/panic.c ++++ linux-stable/kernel/panic.c +@@ -363,9 +363,11 @@ static u64 oops_id; + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch new file mode 100644 index 000000000..64c8ba85f --- /dev/null +++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch @@ -0,0 +1,183 @@ +Subject: rcu: Make ksoftirqd do RCU quiescent states +From: "Paul E. McKenney" +Date: Wed, 5 Oct 2011 11:45:18 -0700 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable +to network-based denial-of-service attacks. This patch therefore +makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() +is running in ksoftirqd context. A wrapper layer in interposed so that +other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying +function __do_softirq_common() does the actual work. + +The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is +that there might be a local_bh_enable() inside an RCU-preempt read-side +critical section. This local_bh_enable() can invoke __do_softirq() +directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just +calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be +an illegal RCU-preempt quiescent state in the middle of an RCU-preempt +read-side critical section. Therefore, quiescent states can only happen +in cases where __do_softirq() is invoked directly from ksoftirqd. + +Signed-off-by: Paul E. McKenney +Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com +Signed-off-by: Thomas Gleixner + +--- + include/linux/rcupdate.h | 6 ------ + kernel/rcutree.c | 9 ++++++++- + kernel/rcutree_plugin.h | 8 +++++++- + kernel/softirq.c | 20 +++++++++++++------- + 4 files changed, 28 insertions(+), 15 deletions(-) + +Index: linux-stable/include/linux/rcupdate.h +=================================================================== +--- linux-stable.orig/include/linux/rcupdate.h ++++ linux-stable/include/linux/rcupdate.h +@@ -228,13 +228,7 @@ static inline int rcu_preempt_depth(void + + /* Internal to kernel */ + extern void rcu_sched_qs(int cpu); +- +-#ifndef CONFIG_PREEMPT_RT_FULL + extern void rcu_bh_qs(int cpu); +-#else +-static inline void rcu_bh_qs(int cpu) { } +-#endif +- + extern void rcu_check_callbacks(int cpu, int user); + struct notifier_block; + extern void rcu_idle_enter(void); +Index: linux-stable/kernel/rcutree.c +=================================================================== +--- linux-stable.orig/kernel/rcutree.c ++++ linux-stable/kernel/rcutree.c +@@ -182,7 +182,14 @@ void rcu_sched_qs(int cpu) + rdp->passed_quiesce = 1; + } + +-#ifndef CONFIG_PREEMPT_RT_FULL ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(int cpu); ++ ++void rcu_bh_qs(int cpu) ++{ ++ rcu_preempt_qs(cpu); ++} ++#else + void rcu_bh_qs(int cpu) + { + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); +Index: linux-stable/kernel/rcutree_plugin.h +=================================================================== +--- linux-stable.orig/kernel/rcutree_plugin.h ++++ linux-stable/kernel/rcutree_plugin.h +@@ -1530,7 +1530,7 @@ static void __cpuinit rcu_prepare_kthrea + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) + + /* + * Check to see if any future RCU-related work will need to be done +@@ -1546,6 +1546,9 @@ int rcu_needs_cpu(int cpu, unsigned long + *delta_jiffies = ULONG_MAX; + return rcu_cpu_has_callbacks(cpu, NULL); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ ++ ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up +@@ -1637,6 +1640,8 @@ static bool rcu_try_advance_all_cbs(void + return cbs_ready; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready + * to invoke. If the CPU has callbacks, try to advance them. Tell the +@@ -1675,6 +1680,7 @@ int rcu_needs_cpu(int cpu, unsigned long + } + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Prepare a CPU for idle from an RCU perspective. The first major task +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -142,7 +142,7 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + +-static void handle_pending_softirqs(u32 pending, int cpu) ++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) + { + struct softirq_action *h = softirq_vec; + unsigned int prev_count = preempt_count(); +@@ -165,7 +165,8 @@ static void handle_pending_softirqs(u32 + prev_count, (unsigned int) preempt_count()); + preempt_count() = prev_count; + } +- rcu_bh_qs(cpu); ++ if (need_rcu_bh_qs) ++ rcu_bh_qs(cpu); + } + local_irq_disable(); + } +@@ -331,7 +332,7 @@ restart: + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- handle_pending_softirqs(pending, cpu); ++ handle_pending_softirqs(pending, cpu, 1); + + pending = local_softirq_pending(); + if (pending) { +@@ -384,7 +385,12 @@ static void ksoftirqd_clr_sched_params(u + static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); + static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); + +-static void __do_softirq(void); ++static void __do_softirq_common(int need_rcu_bh_qs); ++ ++void __do_softirq(void) ++{ ++ __do_softirq_common(0); ++} + + void __init softirq_early_init(void) + { +@@ -455,7 +461,7 @@ EXPORT_SYMBOL(in_serving_softirq); + * Called with bh and local interrupts disabled. For full RT cpu must + * be pinned. + */ +-static void __do_softirq(void) ++static void __do_softirq_common(int need_rcu_bh_qs) + { + u32 pending = local_softirq_pending(); + int cpu = smp_processor_id(); +@@ -469,7 +475,7 @@ static void __do_softirq(void) + + lockdep_softirq_enter(); + +- handle_pending_softirqs(pending, cpu); ++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); + + pending = local_softirq_pending(); + if (pending) +@@ -508,7 +514,7 @@ static int __thread_do_softirq(int cpu) + * schedule! + */ + if (local_softirq_pending()) +- __do_softirq(); ++ __do_softirq_common(cpu >= 0); + local_unlock(local_softirq_lock); + unpin_current_cpu(); + preempt_disable(); diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch new file mode 100644 index 000000000..916416ebf --- /dev/null +++ b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch @@ -0,0 +1,28 @@ +Subject: pci: Use __wake_up_all_locked pci_unblock_user_cfg_access() +From: Thomas Gleixner +Date: Thu, 01 Dec 2011 00:07:16 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The waitqueue is protected by the pci_lock, so we can just avoid to +lock the waitqueue lock itself. That prevents the +might_sleep()/scheduling while atomic problem on RT + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + drivers/pci/access.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/drivers/pci/access.c +=================================================================== +--- linux-stable.orig/drivers/pci/access.c ++++ linux-stable/drivers/pci/access.c +@@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_de + WARN_ON(!dev->block_cfg_access); + + dev->block_cfg_access = 0; +- wake_up_all(&pci_cfg_wait); ++ wake_up_all_locked(&pci_cfg_wait); + raw_spin_unlock_irqrestore(&pci_lock, flags); + } + EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch new file mode 100644 index 000000000..7ab9bea61 --- /dev/null +++ b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch @@ -0,0 +1,30 @@ +From 49faecbc581de038b423d7abbebe0d7b50ed15ef Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 8 Apr 2013 16:08:46 +0200 +Subject: [PATCH] percpu-rwsem: compile fix +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The shortcut on mainline skip lockdep. No idea why this is a good thing. + +Signed-off-by: Sebastian Andrzej Siewior +--- + lib/percpu-rwsem.c | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: linux-stable/lib/percpu-rwsem.c +=================================================================== +--- linux-stable.orig/lib/percpu-rwsem.c ++++ linux-stable/lib/percpu-rwsem.c +@@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_s + + down_read(&brw->rw_sem); + atomic_inc(&brw->slow_read_ctr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ up_read(&brw->rw_sem); ++#else + /* avoid up_read()->rwsem_release() */ + __up_read(&brw->rw_sem); ++#endif + } + + void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch new file mode 100644 index 000000000..093a02b60 --- /dev/null +++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch @@ -0,0 +1,71 @@ +From: Yong Zhang +Date: Wed, 11 Jul 2012 22:05:21 +0000 +Subject: perf: Make swevent hrtimer run in irq instead of softirq +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Otherwise we get a deadlock like below: + +[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003 +[ 1044.042752] INFO: lockdep is turned off. +[ 1044.042754] Modules linked in: +[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29 +[ 1044.042759] Call Trace: +[ 1044.042761] [] __schedule_bug+0x65/0x80 +[ 1044.042770] [] __schedule+0x83c/0xa70 +[ 1044.042775] [] ? prepare_to_wait+0x32/0xb0 +[ 1044.042779] [] schedule+0x2e/0xa0 +[ 1044.042782] [] hrtimer_wait_for_timer+0x6d/0xb0 +[ 1044.042786] [] ? wake_up_bit+0x40/0x40 +[ 1044.042790] [] hrtimer_cancel+0x20/0x40 +[ 1044.042794] [] perf_swevent_cancel_hrtimer+0x3c/0x50 +[ 1044.042798] [] task_clock_event_stop+0x11/0x40 +[ 1044.042802] [] task_clock_event_del+0xe/0x10 +[ 1044.042805] [] event_sched_out+0x118/0x1d0 +[ 1044.042809] [] group_sched_out+0x29/0x90 +[ 1044.042813] [] __perf_event_disable+0x18e/0x200 +[ 1044.042817] [] remote_function+0x63/0x70 +[ 1044.042821] [] generic_smp_call_function_single_interrupt+0xce/0x120 +[ 1044.042826] [] smp_call_function_single_interrupt+0x27/0x40 +[ 1044.042831] [] call_function_single_interrupt+0x6c/0x80 +[ 1044.042833] [] ? perf_event_overflow+0x20/0x20 +[ 1044.042840] [] ? _raw_spin_unlock_irq+0x30/0x70 +[ 1044.042844] [] ? _raw_spin_unlock_irq+0x36/0x70 +[ 1044.042848] [] run_hrtimer_softirq+0xc2/0x200 +[ 1044.042853] [] ? perf_event_overflow+0x20/0x20 +[ 1044.042857] [] __do_softirq_common+0xf5/0x3a0 +[ 1044.042862] [] __thread_do_softirq+0x15d/0x200 +[ 1044.042865] [] run_ksoftirqd+0xfa/0x210 +[ 1044.042869] [] ? __thread_do_softirq+0x200/0x200 +[ 1044.042873] [] ? __thread_do_softirq+0x200/0x200 +[ 1044.042877] [] kthread+0xb6/0xc0 +[ 1044.042881] [] ? _raw_spin_unlock_irq+0x3b/0x70 +[ 1044.042886] [] kernel_thread_helper+0x4/0x10 +[ 1044.042889] [] ? finish_task_switch+0x8c/0x110 +[ 1044.042894] [] ? _raw_spin_unlock_irq+0x3b/0x70 +[ 1044.042897] [] ? retint_restore_args+0xe/0xe +[ 1044.042900] [] ? kthreadd+0x1e0/0x1e0 +[ 1044.042902] [] ? gs_change+0xb/0xb + +Signed-off-by: Yong Zhang +Cc: Peter Zijlstra +Cc: Steven Rostedt +Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com +Signed-off-by: Thomas Gleixner +Signed-off-by: Steven Rostedt + +--- + kernel/events/core.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/kernel/events/core.c +=================================================================== +--- linux-stable.orig/kernel/events/core.c ++++ linux-stable/kernel/events/core.c +@@ -5715,6 +5715,7 @@ static void perf_swevent_init_hrtimer(st + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; ++ hwc->hrtimer.irqsafe = 1; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch new file mode 100644 index 000000000..7bc31f841 --- /dev/null +++ b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch @@ -0,0 +1,78 @@ +Subject: x86-no-perf-irq-work-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Jul 2011 14:05:05 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/kernel/irq_work.c | 2 ++ + kernel/irq_work.c | 5 ++++- + kernel/timer.c | 6 +++++- + 3 files changed, 11 insertions(+), 2 deletions(-) + +Index: linux-stable/arch/x86/kernel/irq_work.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/irq_work.c ++++ linux-stable/arch/x86/kernel/irq_work.c +@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_re + irq_exit(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void arch_irq_work_raise(void) + { + #ifdef CONFIG_X86_LOCAL_APIC +@@ -28,3 +29,4 @@ void arch_irq_work_raise(void) + apic_wait_icr_idle(); + #endif + } ++#endif +Index: linux-stable/kernel/irq_work.c +=================================================================== +--- linux-stable.orig/kernel/irq_work.c ++++ linux-stable/kernel/irq_work.c +@@ -119,8 +119,9 @@ static void __irq_work_run(void) + if (llist_empty(this_list)) + return; + ++#ifndef CONFIG_PREEMPT_RT_FULL + BUG_ON(!irqs_disabled()); +- ++#endif + llnode = llist_del_all(this_list); + while (llnode != NULL) { + work = llist_entry(llnode, struct irq_work, llnode); +@@ -152,7 +153,9 @@ static void __irq_work_run(void) + */ + void irq_work_run(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + BUG_ON(!in_irq()); ++#endif + __irq_work_run(); + } + EXPORT_SYMBOL_GPL(irq_work_run); +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -1425,7 +1425,7 @@ void update_process_times(int user_tick) + scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL) + if (in_irq()) + irq_work_run(); + #endif +@@ -1439,6 +1439,10 @@ static void run_timer_softirq(struct sof + { + struct tvec_base *base = __this_cpu_read(tvec_bases); + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++ irq_work_run(); ++#endif ++ + hrtimer_run_pending(); + + if (time_after_eq(jiffies, base->timer_jiffies)) diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch new file mode 100644 index 000000000..c1f0eb2b3 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch @@ -0,0 +1,187 @@ +Subject: sched: Generic migrate_disable +From: Peter Zijlstra +Date: Thu Aug 11 15:14:58 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Make migrate_disable() be a preempt_disable() for !rt kernels. This +allows generic code to use it but still enforces that these code +sections stay relatively small. + +A preemptible migrate_disable() accessible for general use would allow +people growing arbitrary per-cpu crap instead of clean these things +up. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org +--- + include/linux/preempt.h | 21 +++++++++------------ + include/linux/sched.h | 13 +++++++++++++ + include/linux/smp.h | 9 ++------- + kernel/sched/core.c | 6 ++++-- + kernel/trace/trace.c | 2 +- + lib/smp_processor_id.c | 2 +- + 6 files changed, 30 insertions(+), 23 deletions(-) + +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -130,28 +130,25 @@ do { \ + + #endif /* CONFIG_PREEMPT_COUNT */ + +-#ifdef CONFIG_SMP +-extern void migrate_disable(void); +-extern void migrate_enable(void); +-#else +-# define migrate_disable() barrier() +-# define migrate_enable() barrier() +-#endif +- + #ifdef CONFIG_PREEMPT_RT_FULL + # define preempt_disable_rt() preempt_disable() + # define preempt_enable_rt() preempt_enable() + # define preempt_disable_nort() barrier() + # define preempt_enable_nort() barrier() +-# define migrate_disable_rt() migrate_disable() +-# define migrate_enable_rt() migrate_enable() ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++# endif /* CONFIG_SMP */ + #else + # define preempt_disable_rt() barrier() + # define preempt_enable_rt() barrier() + # define preempt_disable_nort() preempt_disable() + # define preempt_enable_nort() preempt_enable() +-# define migrate_disable_rt() barrier() +-# define migrate_enable_rt() barrier() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() + #endif + + #ifdef CONFIG_PREEMPT_NOTIFIERS +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1074,7 +1074,9 @@ struct task_struct { + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++#endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -2614,11 +2616,22 @@ static inline void set_task_cpu(struct t + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif ++} ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) + { ++#ifdef CONFIG_PREEMPT_RT_FULL + if (p->migrate_disable) + return cpumask_of(task_cpu(p)); ++#endif + + return &p->cpus_allowed; + } +Index: linux-stable/include/linux/smp.h +=================================================================== +--- linux-stable.orig/include/linux/smp.h ++++ linux-stable/include/linux/smp.h +@@ -223,13 +223,8 @@ static inline void kick_all_cpus_sync(vo + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + +-#ifndef CONFIG_PREEMPT_RT_FULL +-# define get_cpu_light() get_cpu() +-# define put_cpu_light() put_cpu() +-#else +-# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +-# define put_cpu_light() migrate_enable() +-#endif ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() + + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4828,7 +4828,7 @@ void __cpuinit init_idle(struct task_str + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (!p->migrate_disable) { ++ if (!__migrate_disabled(p)) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); +@@ -4879,7 +4879,7 @@ int set_cpus_allowed_ptr(struct task_str + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -4898,6 +4898,7 @@ out: + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + ++#ifdef CONFIG_PREEMPT_RT_FULL + void migrate_disable(void) + { + struct task_struct *p = current; +@@ -4990,6 +4991,7 @@ void migrate_enable(void) + preempt_enable(); + } + EXPORT_SYMBOL(migrate_enable); ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing +Index: linux-stable/kernel/trace/trace.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace.c ++++ linux-stable/kernel/trace/trace.c +@@ -1420,7 +1420,7 @@ tracing_generic_entry_update(struct trac + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | + (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); + +- entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0; ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +Index: linux-stable/lib/smp_processor_id.c +=================================================================== +--- linux-stable.orig/lib/smp_processor_id.c ++++ linux-stable/lib/smp_processor_id.c +@@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor + + printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " + "code: %s/%d\n", preempt_count() - 1, +- current->migrate_disable, current->comm, current->pid); ++ __migrate_disabled(current), current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch new file mode 100644 index 000000000..7aa00ab87 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch @@ -0,0 +1,70 @@ +Subject: sched: Optimize migrate_disable +From: Peter Zijlstra +Date: Thu Aug 11 15:03:35 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few +atomic ops. See comment on why it should be safe. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org +--- + kernel/sched/core.c | 24 ++++++++++++++++++++---- + 1 file changed, 20 insertions(+), 4 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4918,7 +4918,19 @@ void migrate_disable(void) + preempt_enable(); + return; + } +- rq = task_rq_lock(p, &flags); ++ ++ /* ++ * Since this is always current we can get away with only locking ++ * rq->lock, the ->cpus_allowed value can normally only be changed ++ * while holding both p->pi_lock and rq->lock, but seeing that this ++ * it current, we cannot actually be waking up, so all code that ++ * relies on serialization against p->pi_lock is out of scope. ++ * ++ * Taking rq->lock serializes us against things like ++ * set_cpus_allowed_ptr() that can still happen concurrently. ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); + p->migrate_disable = 1; + mask = tsk_cpus_allowed(p); + +@@ -4929,7 +4941,7 @@ void migrate_disable(void) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); + } +- task_rq_unlock(rq, p, &flags); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); + preempt_enable(); + } + EXPORT_SYMBOL(migrate_disable); +@@ -4957,7 +4969,11 @@ void migrate_enable(void) + return; + } + +- rq = task_rq_lock(p, &flags); ++ /* ++ * See comment in migrate_disable(). ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); + p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); + +@@ -4969,7 +4985,7 @@ void migrate_enable(void) + p->nr_cpus_allowed = cpumask_weight(mask); + } + +- task_rq_unlock(rq, p, &flags); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); + unpin_current_cpu(); + preempt_enable(); + } diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch new file mode 100644 index 000000000..75eb4c918 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch @@ -0,0 +1,377 @@ +Subject: mm: pagefault_disabled() +From: Peter Zijlstra +Date: Thu Aug 11 15:31:31 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Wrap the test for pagefault_disabled() into a helper, this allows us +to remove the need for current->pagefault_disabled on !-rt kernels. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org +--- + arch/alpha/mm/fault.c | 2 +- + arch/arm/mm/fault.c | 2 +- + arch/avr32/mm/fault.c | 3 +-- + arch/cris/mm/fault.c | 2 +- + arch/frv/mm/fault.c | 2 +- + arch/ia64/mm/fault.c | 2 +- + arch/m32r/mm/fault.c | 2 +- + arch/m68k/mm/fault.c | 2 +- + arch/microblaze/mm/fault.c | 2 +- + arch/mips/mm/fault.c | 2 +- + arch/mn10300/mm/fault.c | 2 +- + arch/parisc/mm/fault.c | 2 +- + arch/s390/mm/fault.c | 8 ++++---- + arch/score/mm/fault.c | 2 +- + arch/sh/mm/fault.c | 2 +- + arch/sparc/mm/fault_32.c | 2 +- + arch/sparc/mm/fault_64.c | 2 +- + arch/tile/mm/fault.c | 2 +- + arch/um/kernel/trap.c | 2 +- + arch/x86/mm/fault.c | 2 +- + arch/xtensa/mm/fault.c | 2 +- + include/linux/sched.h | 14 ++++++++++++++ + kernel/fork.c | 2 ++ + 23 files changed, 40 insertions(+), 25 deletions(-) + +Index: linux-stable/arch/alpha/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/alpha/mm/fault.c ++++ linux-stable/arch/alpha/mm/fault.c +@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns + + /* If we're in an interrupt context, or have no user context, + we must not take the fault. */ +- if (!mm || in_atomic() || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_ALPHA_LARGE_VMALLOC +Index: linux-stable/arch/arm/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/arm/mm/fault.c ++++ linux-stable/arch/arm/mm/fault.c +@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + /* +Index: linux-stable/arch/avr32/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/avr32/mm/fault.c ++++ linux-stable/arch/avr32/mm/fault.c +@@ -81,8 +81,7 @@ asmlinkage void do_page_fault(unsigned l + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) || +- current->pagefault_disabled) ++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) + goto no_context; + + local_irq_enable(); +Index: linux-stable/arch/cris/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/cris/mm/fault.c ++++ linux-stable/arch/cris/mm/fault.c +@@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + retry: +Index: linux-stable/arch/frv/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/frv/mm/fault.c ++++ linux-stable/arch/frv/mm/fault.c +@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/ia64/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/ia64/mm/fault.c ++++ linux-stable/arch/ia64/mm/fault.c +@@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +Index: linux-stable/arch/m32r/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/m32r/mm/fault.c ++++ linux-stable/arch/m32r/mm/fault.c +@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + /* When running in the kernel we expect faults to occur only to +Index: linux-stable/arch/m68k/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/m68k/mm/fault.c ++++ linux-stable/arch/m68k/mm/fault.c +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + retry: +Index: linux-stable/arch/microblaze/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/microblaze/mm/fault.c ++++ linux-stable/arch/microblaze/mm/fault.c +@@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs, + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +Index: linux-stable/arch/mips/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/mips/mm/fault.c ++++ linux-stable/arch/mips/mm/fault.c +@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + retry: +Index: linux-stable/arch/mn10300/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/mn10300/mm/fault.c ++++ linux-stable/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + retry: +Index: linux-stable/arch/parisc/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/parisc/mm/fault.c ++++ linux-stable/arch/parisc/mm/fault.c +@@ -177,7 +177,7 @@ void do_page_fault(struct pt_regs *regs, + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + retry: +Index: linux-stable/arch/s390/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/s390/mm/fault.c ++++ linux-stable/arch/s390/mm/fault.c +@@ -296,8 +296,8 @@ static inline int do_exception(struct pt + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || +- tsk->pagefault_disabled)) ++ if (unlikely(!user_space_fault(trans_exc_code) || ++ !mm || pagefault_disabled())) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +@@ -441,8 +441,8 @@ void __kprobes do_asce_exception(struct + clear_tsk_thread_flag(current, TIF_PER_TRAP); + + trans_exc_code = regs->int_parm_long; +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || +- current->pagefault_disabled())); ++ if (unlikely(!user_space_fault(trans_exc_code) || !mm || ++ pagefault_disabled())) + goto no_context; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/score/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/score/mm/fault.c ++++ linux-stable/arch/score/mm/fault.c +@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); +Index: linux-stable/arch/sh/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/sh/mm/fault.c ++++ linux-stable/arch/sh/mm/fault.c +@@ -440,7 +440,7 @@ asmlinkage void __kprobes do_page_fault( + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +Index: linux-stable/arch/sparc/mm/fault_32.c +=================================================================== +--- linux-stable.orig/arch/sparc/mm/fault_32.c ++++ linux-stable/arch/sparc/mm/fault_32.c +@@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) ++ if (!mm || pagefault_disabled()) + goto no_context; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +Index: linux-stable/arch/sparc/mm/fault_64.c +=================================================================== +--- linux-stable.orig/arch/sparc/mm/fault_64.c ++++ linux-stable/arch/sparc/mm/fault_64.c +@@ -321,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fau + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_enabled) ++ if (!mm || pagefault_disabled()) + goto intr_or_no_mm; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +Index: linux-stable/arch/tile/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/tile/mm/fault.c ++++ linux-stable/arch/tile/mm/fault.c +@@ -360,7 +360,7 @@ static int handle_page_fault(struct pt_r + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) { ++ if (!mm || pagefault_disabled()) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +Index: linux-stable/arch/um/kernel/trap.c +=================================================================== +--- linux-stable.orig/arch/um/kernel/trap.c ++++ linux-stable/arch/um/kernel/trap.c +@@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic() || current->pagefault_disabled) ++ if (pagefault_disabled()) + goto out_nosemaphore; + + retry: +Index: linux-stable/arch/x86/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/x86/mm/fault.c ++++ linux-stable/arch/x86/mm/fault.c +@@ -1104,7 +1104,7 @@ __do_page_fault(struct pt_regs *regs, un + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +Index: linux-stable/arch/xtensa/mm/fault.c +=================================================================== +--- linux-stable.orig/arch/xtensa/mm/fault.c ++++ linux-stable/arch/xtensa/mm/fault.c +@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm || current->pagefault_disabled) { ++ if (!mm || pagefault_disabled()) { + bad_page_fault(regs, address, SIGSEGV); + return; + } +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -52,6 +52,7 @@ struct sched_param { + #include + #include + #include ++#include + + #include + +@@ -1260,7 +1261,9 @@ struct task_struct { + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL + int pagefault_disabled; ++#endif + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +@@ -1440,6 +1443,17 @@ static inline void set_numabalancing_sta + } + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } ++#else ++static inline bool cur_pf_disabled(void) { return false; } ++#endif ++ ++static inline bool pagefault_disabled(void) ++{ ++ return in_atomic() || cur_pf_disabled(); ++} ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->pids[PIDTYPE_PID].pid; +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -1293,7 +1293,9 @@ static struct task_struct *copy_process( + p->hardirq_context = 0; + p->softirq_context = 0; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL + p->pagefault_disabled = 0; ++#endif + #ifdef CONFIG_LOCKDEP + p->lockdep_depth = 0; /* no locks held yet */ + p->curr_chain_key = 0; diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch new file mode 100644 index 000000000..67cf51b78 --- /dev/null +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch @@ -0,0 +1,169 @@ +Subject: rcu: Frob softirq test +From: Peter Zijlstra +Date: Sat Aug 13 00:23:17 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +With RT_FULL we get the below wreckage: + +[ 126.060484] ======================================================= +[ 126.060486] [ INFO: possible circular locking dependency detected ] +[ 126.060489] 3.0.1-rt10+ #30 +[ 126.060490] ------------------------------------------------------- +[ 126.060492] irq/24-eth0/1235 is trying to acquire lock: +[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060503] +[ 126.060504] but task is already holding lock: +[ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 +[ 126.060511] +[ 126.060511] which lock already depends on the new lock. +[ 126.060513] +[ 126.060514] +[ 126.060514] the existing dependency chain (in reverse order) is: +[ 126.060516] +[ 126.060516] -> #1 (&p->pi_lock){-...-.}: +[ 126.060519] [] lock_acquire+0x145/0x18a +[ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 +[ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f +[ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a +[ 126.060534] [] rt_mutex_lock+0x2d/0x2f +[ 126.060537] [] rcu_boost+0xad/0xde +[ 126.060541] [] rcu_boost_kthread+0x7d/0x9b +[ 126.060544] [] kthread+0x99/0xa1 +[ 126.060547] [] kernel_thread_helper+0x4/0x10 +[ 126.060551] +[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: +[ 126.060555] [] __lock_acquire+0x1157/0x1816 +[ 126.060558] [] lock_acquire+0x145/0x18a +[ 126.060561] [] _raw_spin_lock+0x40/0x73 +[ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060566] [] rt_mutex_unlock+0x27/0x29 +[ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 +[ 126.060573] [] __rcu_read_unlock+0x48/0x89 +[ 126.060576] [] select_task_rq_rt+0xc7/0xd5 +[ 126.060580] [] try_to_wake_up+0x175/0x429 +[ 126.060583] [] wake_up_process+0x15/0x17 +[ 126.060585] [] wakeup_softirqd+0x24/0x26 +[ 126.060590] [] irq_exit+0x49/0x55 +[ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 +[ 126.060597] [] apic_timer_interrupt+0x13/0x20 +[ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 +[ 126.060603] [] irq_thread+0xde/0x1af +[ 126.060606] [] kthread+0x99/0xa1 +[ 126.060608] [] kernel_thread_helper+0x4/0x10 +[ 126.060611] +[ 126.060612] other info that might help us debug this: +[ 126.060614] +[ 126.060615] Possible unsafe locking scenario: +[ 126.060616] +[ 126.060617] CPU0 CPU1 +[ 126.060619] ---- ---- +[ 126.060620] lock(&p->pi_lock); +[ 126.060623] lock(&(lock)->wait_lock); +[ 126.060625] lock(&p->pi_lock); +[ 126.060627] lock(&(lock)->wait_lock); +[ 126.060629] +[ 126.060629] *** DEADLOCK *** +[ 126.060630] +[ 126.060632] 1 lock held by irq/24-eth0/1235: +[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 +[ 126.060638] +[ 126.060638] stack backtrace: +[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 +[ 126.060643] Call Trace: +[ 126.060644] [] print_circular_bug+0x289/0x29a +[ 126.060651] [] __lock_acquire+0x1157/0x1816 +[ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 +[ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060661] [] lock_acquire+0x145/0x18a +[ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060668] [] _raw_spin_lock+0x40/0x73 +[ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 +[ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c +[ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 +[ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 +[ 126.060683] [] rt_mutex_unlock+0x27/0x29 +[ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 +[ 126.060690] [] __rcu_read_unlock+0x48/0x89 +[ 126.060693] [] select_task_rq_rt+0xc7/0xd5 +[ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 +[ 126.060701] [] ? clockevents_program_event+0x8e/0x90 +[ 126.060704] [] try_to_wake_up+0x175/0x429 +[ 126.060708] [] ? tick_program_event+0x1f/0x21 +[ 126.060711] [] wake_up_process+0x15/0x17 +[ 126.060715] [] wakeup_softirqd+0x24/0x26 +[ 126.060718] [] irq_exit+0x49/0x55 +[ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 +[ 126.060724] [] apic_timer_interrupt+0x13/0x20 +[ 126.060726] [] ? migrate_disable+0x75/0x12d +[ 126.060733] [] ? local_bh_disable+0xe/0x1f +[ 126.060736] [] ? local_bh_disable+0x1d/0x1f +[ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 +[ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 +[ 126.060745] [] irq_thread+0xde/0x1af +[ 126.060748] [] ? irq_thread_fn+0x3a/0x3a +[ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 +[ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 +[ 126.060757] [] kthread+0x99/0xa1 +[ 126.060761] [] kernel_thread_helper+0x4/0x10 +[ 126.060764] [] ? finish_task_switch+0x87/0x10a +[ 126.060768] [] ? retint_restore_args+0xe/0xe +[ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c +[ 126.060774] [] ? gs_change+0xb/0xb + +Because irq_exit() does: + +void irq_exit(void) +{ + account_system_vtime(current); + trace_hardirq_exit(); + sub_preempt_count(IRQ_EXIT_OFFSET); + if (!in_interrupt() && local_softirq_pending()) + invoke_softirq(); + + ... +} + +Which triggers a wakeup, which uses RCU, now if the interrupted task has +t->rcu_read_unlock_special set, the rcu usage from the wakeup will end +up in rcu_read_unlock_special(). rcu_read_unlock_special() will test +for in_irq(), which will fail as we just decremented preempt_count +with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for +PREEMPT_RT_FULL reads: + +int in_serving_softirq(void) +{ + int res; + + preempt_disable(); + res = __get_cpu_var(local_softirq_runner) == current; + preempt_enable(); + return res; +} + +Which will thus also fail, resulting in the above wreckage. + +The 'somewhat' ugly solution is to open-code the preempt_count() test +in rcu_read_unlock_special(). + +Also, we're not at all sure how ->rcu_read_unlock_special gets set +here... so this is very likely a bandaid and more thought is required. + +Cc: Paul E. McKenney +Signed-off-by: Peter Zijlstra +--- + kernel/rcutree_plugin.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/rcutree_plugin.h +=================================================================== +--- linux-stable.orig/kernel/rcutree_plugin.h ++++ linux-stable/kernel/rcutree_plugin.h +@@ -362,7 +362,7 @@ void rcu_read_unlock_special(struct task + } + + /* Hardware IRQ handlers cannot block. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + local_irq_restore(flags); + return; + } diff --git a/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch new file mode 100644 index 000000000..abe006bb3 --- /dev/null +++ b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch @@ -0,0 +1,152 @@ +Subject: mm: raw_pagefault_disable +From: Peter Zijlstra +Date: Fri Aug 05 17:16:58 CEST 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Adding migrate_disable() to pagefault_disable() to preserve the +per-cpu thing for kmap_atomic might not have been the best of choices. +But short of adding preempt_disable/migrate_disable foo all over the +kmap code it still seems the best way. + +It does however yield the below borkage as well as wreck !-rt builds +since !-rt does rely on pagefault_disable() not preempting. So fix all +that up by adding raw_pagefault_disable(). + + [] warn_slowpath_common+0x85/0x9d + [] warn_slowpath_fmt+0x46/0x48 + [] ? _raw_spin_lock+0x6c/0x73 + [] ? watchdog_overflow_callback+0x9b/0xd0 + [] watchdog_overflow_callback+0xb7/0xd0 + [] __perf_event_overflow+0x11c/0x1fe + [] ? perf_event_update_userpage+0x149/0x151 + [] ? perf_event_task_disable+0x7c/0x7c + [] perf_event_overflow+0x14/0x16 + [] x86_pmu_handle_irq+0xcb/0x108 + [] perf_event_nmi_handler+0x46/0x91 + [] notifier_call_chain+0x79/0xa6 + [] __atomic_notifier_call_chain+0x66/0x98 + [] ? notifier_call_chain+0xa6/0xa6 + [] atomic_notifier_call_chain+0x14/0x16 + [] notify_die+0x2e/0x30 + [] do_nmi+0x7e/0x22b + [] nmi+0x1a/0x2c + [] ? sub_preempt_count+0x4b/0xaa + <> [] delay_tsc+0xac/0xd1 + [] __delay+0xf/0x11 + [] do_raw_spin_lock+0xd2/0x13c + [] _raw_spin_lock_irqsave+0x6b/0x85 + [] ? task_rq_lock+0x35/0x8d + [] task_rq_lock+0x35/0x8d + [] migrate_disable+0x65/0x12c + [] pagefault_disable+0xe/0x1f + [] dump_trace+0x21f/0x2e2 + [] show_trace_log_lvl+0x54/0x5d + [] show_trace+0x15/0x17 + [] dump_stack+0x77/0x80 + [] spin_bug+0x9c/0xa3 + [] ? task_rq_lock+0x50/0x8d + [] do_raw_spin_lock+0x47/0x13c + [] _raw_spin_lock+0x60/0x73 + [] ? task_rq_lock+0x50/0x8d + [] task_rq_lock+0x50/0x8d + [] migrate_disable+0x65/0x12c + [] pagefault_disable+0xe/0x1f + [] dump_trace+0x21f/0x2e2 + [] save_stack_trace+0x2f/0x4c + [] save_trace+0x3f/0xaf + [] mark_lock+0x228/0x530 + [] __lock_acquire+0x662/0x1812 + [] ? native_sched_clock+0x37/0x6d + [] ? trace_hardirqs_off_caller+0x1f/0x99 + [] ? sched_rt_period_timer+0xbd/0x218 + [] lock_acquire+0x145/0x18a + [] ? sched_rt_period_timer+0xbd/0x218 + [] _raw_spin_lock+0x40/0x73 + [] ? sched_rt_period_timer+0xbd/0x218 + [] sched_rt_period_timer+0xbd/0x218 + [] __run_hrtimer+0x1e4/0x347 + [] ? can_migrate_task.clone.82+0x14a/0x14a + [] hrtimer_interrupt+0xee/0x1d6 + [] ? add_preempt_count+0xae/0xb2 + [] smp_apic_timer_interrupt+0x85/0x98 + [] apic_timer_interrupt+0x13/0x20 + + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org +--- + include/linux/uaccess.h | 30 ++++++++++++++++++++++++++++-- + mm/memory.c | 2 ++ + 2 files changed, 30 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/uaccess.h +=================================================================== +--- linux-stable.orig/include/linux/uaccess.h ++++ linux-stable/include/linux/uaccess.h +@@ -8,8 +8,34 @@ + * These routines enable/disable the pagefault handler in that + * it will not take any MM locks and go straight to the fixup table. + */ ++static inline void raw_pagefault_disable(void) ++{ ++ inc_preempt_count(); ++ barrier(); ++} ++ ++static inline void raw_pagefault_enable(void) ++{ ++ barrier(); ++ dec_preempt_count(); ++ barrier(); ++ preempt_check_resched(); ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline void pagefault_disable(void) ++{ ++ raw_pagefault_disable(); ++} ++ ++static inline void pagefault_enable(void) ++{ ++ raw_pagefault_enable(); ++} ++#else + extern void pagefault_disable(void); + extern void pagefault_enable(void); ++#endif + + #ifndef ARCH_HAS_NOCACHE_UACCESS + +@@ -50,9 +76,9 @@ static inline unsigned long __copy_from_ + mm_segment_t old_fs = get_fs(); \ + \ + set_fs(KERNEL_DS); \ +- pagefault_disable(); \ ++ raw_pagefault_disable(); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ raw_pagefault_enable(); \ + set_fs(old_fs); \ + ret; \ + }) +Index: linux-stable/mm/memory.c +=================================================================== +--- linux-stable.orig/mm/memory.c ++++ linux-stable/mm/memory.c +@@ -3748,6 +3748,7 @@ unlock: + return 0; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL + void pagefault_disable(void) + { + inc_preempt_count(); +@@ -3776,6 +3777,7 @@ void pagefault_enable(void) + preempt_check_resched(); + } + EXPORT_SYMBOL(pagefault_enable); ++#endif + + /* + * By the time we get here, we already hold the mm semaphore diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch new file mode 100644 index 000000000..565c9021a --- /dev/null +++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch @@ -0,0 +1,189 @@ +Subject: crypto: Convert crypto notifier chain to SRCU +From: Peter Zijlstra +Date: Fri, 05 Oct 2012 09:03:24 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The crypto notifier deadlocks on RT. Though this can be a real deadlock +on mainline as well due to fifo fair rwsems. + +The involved parties here are: + +[ 82.172678] swapper/0 S 0000000000000001 0 1 0 0x00000000 +[ 82.172682] ffff88042f18fcf0 0000000000000046 ffff88042f18fc80 ffffffff81491238 +[ 82.172685] 0000000000011cc0 0000000000011cc0 ffff88042f18c040 ffff88042f18ffd8 +[ 82.172688] 0000000000011cc0 0000000000011cc0 ffff88042f18ffd8 0000000000011cc0 +[ 82.172689] Call Trace: +[ 82.172697] [] ? _raw_spin_unlock_irqrestore+0x6c/0x7a +[ 82.172701] [] schedule+0x64/0x66 +[ 82.172704] [] schedule_timeout+0x27/0xd0 +[ 82.172708] [] ? unpin_current_cpu+0x1a/0x6c +[ 82.172713] [] ? migrate_enable+0x12f/0x141 +[ 82.172716] [] wait_for_common+0xbb/0x11f +[ 82.172719] [] ? try_to_wake_up+0x182/0x182 +[ 82.172722] [] wait_for_completion_interruptible+0x1d/0x2e +[ 82.172726] [] crypto_wait_for_test+0x49/0x6b +[ 82.172728] [] crypto_register_alg+0x53/0x5a +[ 82.172730] [] crypto_register_algs+0x33/0x72 +[ 82.172734] [] ? aes_init+0x12/0x12 +[ 82.172737] [] aesni_init+0x64/0x66 +[ 82.172741] [] do_one_initcall+0x7f/0x13b +[ 82.172744] [] kernel_init+0x199/0x22c +[ 82.172747] [] ? loglevel+0x31/0x31 +[ 82.172752] [] kernel_thread_helper+0x4/0x10 +[ 82.172755] [] ? retint_restore_args+0x13/0x13 +[ 82.172759] [] ? start_kernel+0x3ca/0x3ca +[ 82.172761] [] ? gs_change+0x13/0x13 + +[ 82.174186] cryptomgr_test S 0000000000000001 0 41 2 0x00000000 +[ 82.174189] ffff88042c971980 0000000000000046 ffffffff81d74830 0000000000000292 +[ 82.174192] 0000000000011cc0 0000000000011cc0 ffff88042c96eb80 ffff88042c971fd8 +[ 82.174195] 0000000000011cc0 0000000000011cc0 ffff88042c971fd8 0000000000011cc0 +[ 82.174195] Call Trace: +[ 82.174198] [] schedule+0x64/0x66 +[ 82.174201] [] schedule_timeout+0x27/0xd0 +[ 82.174204] [] ? unpin_current_cpu+0x1a/0x6c +[ 82.174206] [] ? migrate_enable+0x12f/0x141 +[ 82.174209] [] wait_for_common+0xbb/0x11f +[ 82.174212] [] ? try_to_wake_up+0x182/0x182 +[ 82.174215] [] wait_for_completion_interruptible+0x1d/0x2e +[ 82.174218] [] cryptomgr_notify+0x280/0x385 +[ 82.174221] [] notifier_call_chain+0x6b/0x98 +[ 82.174224] [] ? rt_down_read+0x10/0x12 +[ 82.174227] [] __blocking_notifier_call_chain+0x70/0x8d +[ 82.174230] [] blocking_notifier_call_chain+0x14/0x16 +[ 82.174234] [] crypto_probing_notify+0x24/0x50 +[ 82.174236] [] crypto_alg_mod_lookup+0x3e/0x74 +[ 82.174238] [] crypto_alloc_base+0x36/0x8f +[ 82.174241] [] cryptd_alloc_ablkcipher+0x6e/0xb5 +[ 82.174243] [] ? kzalloc.clone.5+0xe/0x10 +[ 82.174246] [] ablk_init_common+0x1d/0x38 +[ 82.174249] [] ablk_ecb_init+0x15/0x17 +[ 82.174251] [] __crypto_alloc_tfm+0xc7/0x114 +[ 82.174254] [] ? crypto_lookup_skcipher+0x1f/0xe4 +[ 82.174256] [] crypto_alloc_ablkcipher+0x60/0xa5 +[ 82.174258] [] alg_test_skcipher+0x24/0x9b +[ 82.174261] [] ? finish_task_switch+0x3f/0xfa +[ 82.174263] [] alg_test+0x16f/0x1d7 +[ 82.174267] [] ? cryptomgr_probe+0xac/0xac +[ 82.174269] [] cryptomgr_test+0x2c/0x47 +[ 82.174272] [] kthread+0x7e/0x86 +[ 82.174275] [] ? finish_task_switch+0xaf/0xfa +[ 82.174278] [] kernel_thread_helper+0x4/0x10 +[ 82.174281] [] ? retint_restore_args+0x13/0x13 +[ 82.174284] [] ? __init_kthread_worker+0x8c/0x8c +[ 82.174287] [] ? gs_change+0x13/0x13 + +[ 82.174329] cryptomgr_probe D 0000000000000002 0 47 2 0x00000000 +[ 82.174332] ffff88042c991b70 0000000000000046 ffff88042c991bb0 0000000000000006 +[ 82.174335] 0000000000011cc0 0000000000011cc0 ffff88042c98ed00 ffff88042c991fd8 +[ 82.174338] 0000000000011cc0 0000000000011cc0 ffff88042c991fd8 0000000000011cc0 +[ 82.174338] Call Trace: +[ 82.174342] [] schedule+0x64/0x66 +[ 82.174344] [] __rt_mutex_slowlock+0x85/0xbe +[ 82.174347] [] rt_mutex_slowlock+0xec/0x159 +[ 82.174351] [] rt_mutex_fastlock.clone.8+0x29/0x2f +[ 82.174353] [] rt_mutex_lock+0x33/0x37 +[ 82.174356] [] __rt_down_read+0x50/0x5a +[ 82.174358] [] ? rt_down_read+0x10/0x12 +[ 82.174360] [] rt_down_read+0x10/0x12 +[ 82.174363] [] __blocking_notifier_call_chain+0x58/0x8d +[ 82.174366] [] blocking_notifier_call_chain+0x14/0x16 +[ 82.174369] [] crypto_probing_notify+0x24/0x50 +[ 82.174372] [] crypto_wait_for_test+0x22/0x6b +[ 82.174374] [] crypto_register_instance+0xb4/0xc0 +[ 82.174377] [] cryptd_create+0x378/0x3b6 +[ 82.174379] [] ? __crypto_lookup_template+0x5b/0x63 +[ 82.174382] [] cryptomgr_probe+0x45/0xac +[ 82.174385] [] ? crypto_alloc_pcomp+0x1b/0x1b +[ 82.174388] [] kthread+0x7e/0x86 +[ 82.174391] [] ? finish_task_switch+0xaf/0xfa +[ 82.174394] [] kernel_thread_helper+0x4/0x10 +[ 82.174398] [] ? retint_restore_args+0x13/0x13 +[ 82.174401] [] ? __init_kthread_worker+0x8c/0x8c +[ 82.174403] [] ? gs_change+0x13/0x13 + +cryptomgr_test spawns the cryptomgr_probe thread from the notifier +call. The probe thread fires the same notifier as the test thread and +deadlocks on the rwsem on RT. + +Now this is a potential deadlock in mainline as well, because we have +fifo fair rwsems. If another thread blocks with a down_write() on the +notifier chain before the probe thread issues the down_read() it will +block the probe thread and the whole party is dead locked. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner +--- + crypto/algapi.c | 4 ++-- + crypto/api.c | 6 +++--- + crypto/internal.h | 4 ++-- + 3 files changed, 7 insertions(+), 7 deletions(-) + +Index: linux-stable/crypto/algapi.c +=================================================================== +--- linux-stable.orig/crypto/algapi.c ++++ linux-stable/crypto/algapi.c +@@ -684,13 +684,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); + + int crypto_register_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_register(&crypto_chain, nb); ++ return srcu_notifier_chain_register(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_register_notifier); + + int crypto_unregister_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_unregister(&crypto_chain, nb); ++ return srcu_notifier_chain_unregister(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); + +Index: linux-stable/crypto/api.c +=================================================================== +--- linux-stable.orig/crypto/api.c ++++ linux-stable/crypto/api.c +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); + DECLARE_RWSEM(crypto_alg_sem); + EXPORT_SYMBOL_GPL(crypto_alg_sem); + +-BLOCKING_NOTIFIER_HEAD(crypto_chain); ++SRCU_NOTIFIER_HEAD(crypto_chain); + EXPORT_SYMBOL_GPL(crypto_chain); + + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) +@@ -231,10 +231,10 @@ int crypto_probing_notify(unsigned long + { + int ok; + +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + } + + return ok; +Index: linux-stable/crypto/internal.h +=================================================================== +--- linux-stable.orig/crypto/internal.h ++++ linux-stable/crypto/internal.h +@@ -48,7 +48,7 @@ struct crypto_larval { + + extern struct list_head crypto_alg_list; + extern struct rw_semaphore crypto_alg_sem; +-extern struct blocking_notifier_head crypto_chain; ++extern struct srcu_notifier_head crypto_chain; + + #ifdef CONFIG_PROC_FS + void __init crypto_init_proc(void); +@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(str + + static inline void crypto_notify(unsigned long val, void *v) + { +- blocking_notifier_call_chain(&crypto_chain, val, v); ++ srcu_notifier_call_chain(&crypto_chain, val, v); + } + + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/pid-h-include-atomic-h.patch b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch new file mode 100644 index 000000000..4e62af1de --- /dev/null +++ b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch @@ -0,0 +1,22 @@ +Subject: rwsem-inlcude-fix.patch +From: Thomas Gleixner +Date: Fri, 15 Jul 2011 21:24:27 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/pid.h | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/include/linux/pid.h +=================================================================== +--- linux-stable.orig/include/linux/pid.h ++++ linux-stable/include/linux/pid.h +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch new file mode 100644 index 000000000..4c3dc42a8 --- /dev/null +++ b/debian/patches/features/all/rt/ping-sysrq.patch @@ -0,0 +1,130 @@ +Subject: net: sysrq via icmp +From: Carsten Emde +Date: Tue, 19 Jul 2011 13:51:17 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +There are (probably rare) situations when a system crashed and the system +console becomes unresponsive but the network icmp layer still is alive. +Wouldn't it be wonderful, if we then could submit a sysreq command via ping? + +This patch provides this facility. Please consult the updated documentation +Documentation/sysrq.txt for details. + +Signed-off-by: Carsten Emde + +--- + Documentation/sysrq.txt | 11 +++++++++-- + include/net/netns/ipv4.h | 1 + + net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++ + net/ipv4/sysctl_net_ipv4.c | 7 +++++++ + 4 files changed, 47 insertions(+), 2 deletions(-) + +Index: linux-stable/Documentation/sysrq.txt +=================================================================== +--- linux-stable.orig/Documentation/sysrq.txt ++++ linux-stable/Documentation/sysrq.txt +@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen ( + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. + +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger + ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. ++ ++ + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +Index: linux-stable/include/net/netns/ipv4.h +=================================================================== +--- linux-stable.orig/include/net/netns/ipv4.h ++++ linux-stable/include/net/netns/ipv4.h +@@ -57,6 +57,7 @@ struct netns_ipv4 { + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +Index: linux-stable/net/ipv4/icmp.c +=================================================================== +--- linux-stable.orig/net/ipv4/icmp.c ++++ linux-stable/net/ipv4/icmp.c +@@ -69,6 +69,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -768,6 +769,30 @@ static void icmp_redirect(struct sk_buff + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 ++ ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++{ ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; ++ ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++} ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -794,6 +819,11 @@ static void icmp_echo(struct sk_buff *sk + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + } + +Index: linux-stable/net/ipv4/sysctl_net_ipv4.c +=================================================================== +--- linux-stable.orig/net/ipv4/sysctl_net_ipv4.c ++++ linux-stable/net/ipv4/sysctl_net_ipv4.c +@@ -790,6 +790,13 @@ static struct ctl_table ipv4_net_table[] + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), diff --git a/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch new file mode 100644 index 000000000..04b197040 --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch @@ -0,0 +1,60 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:44 -0500 +Subject: posix-timers: Avoid wakeups when no timers are active +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Waking the thread even when no timers are scheduled is useless. + +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-cpu-timers.c | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) + +Index: linux-stable/kernel/posix-cpu-timers.c +=================================================================== +--- linux-stable.orig/kernel/posix-cpu-timers.c ++++ linux-stable/kernel/posix-cpu-timers.c +@@ -1446,6 +1446,21 @@ wait_to_die: + return 0; + } + ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ + void run_posix_cpu_timers(struct task_struct *tsk) + { + unsigned long cpu = smp_processor_id(); +@@ -1458,7 +1473,7 @@ void run_posix_cpu_timers(struct task_st + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ +- if (!tsk->posix_timer_list) { ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; +@@ -1470,9 +1485,9 @@ void run_posix_cpu_timers(struct task_st + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); + } +- /* XXX signal the thread somehow */ +- wake_up_process(per_cpu(posix_timer_task, cpu)); + } + + /* diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch new file mode 100644 index 000000000..e24210734 --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch @@ -0,0 +1,36 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:29:20 -0500 +Subject: posix-timers: Prevent broadcast signals +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Posix timers should not send broadcast signals and kernel only +signals. Prevent it. + +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-timers.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/posix-timers.c +=================================================================== +--- linux-stable.orig/kernel/posix-timers.c ++++ linux-stable/kernel/posix-timers.c +@@ -497,6 +497,7 @@ static enum hrtimer_restart posix_timer_ + static struct pid *good_sigevent(sigevent_t * event) + { + struct task_struct *rtn = current->group_leader; ++ int sig = event->sigev_signo; + + if ((event->sigev_notify & SIGEV_THREAD_ID ) && + (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || +@@ -505,7 +506,8 @@ static struct pid *good_sigevent(sigeven + return NULL; + + if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && +- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || ++ sig_kernel_coredump(sig))) + return NULL; + + return task_pid(rtn); diff --git a/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch new file mode 100644 index 000000000..862b3e9bb --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch @@ -0,0 +1,29 @@ +From: Arnaldo Carvalho de Melo +Date: Fri, 3 Jul 2009 08:30:00 -0500 +Subject: posix-timers: Shorten posix_cpu_timers/ kernel thread names +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Shorten the softirq kernel thread names because they always overflow the +limited comm length, appearing as "posix_cpu_timer" CPU# times. + +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/posix-cpu-timers.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/posix-cpu-timers.c +=================================================================== +--- linux-stable.orig/kernel/posix-cpu-timers.c ++++ linux-stable/kernel/posix-cpu-timers.c +@@ -1489,7 +1489,7 @@ static int posix_cpu_thread_call(struct + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, +- "posix_cpu_timers/%d",cpu); ++ "posixcputmr/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch new file mode 100644 index 000000000..cd259481f --- /dev/null +++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -0,0 +1,331 @@ +From: John Stultz +Date: Fri, 3 Jul 2009 08:29:58 -0500 +Subject: posix-timers: thread posix-cpu-timers on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +posix-cpu-timer code takes non -rt safe locks in hard irq +context. Move it to a thread. + +[ 3.0 fixes from Peter Zijlstra ] + +Signed-off-by: John Stultz +Signed-off-by: Thomas Gleixner + +--- + include/linux/init_task.h | 7 + + include/linux/sched.h | 3 + init/main.c | 3 + kernel/fork.c | 3 + kernel/posix-cpu-timers.c | 183 ++++++++++++++++++++++++++++++++++++++++++++-- + 5 files changed, 192 insertions(+), 7 deletions(-) + +Index: linux-stable/include/linux/init_task.h +=================================================================== +--- linux-stable.orig/include/linux/init_task.h ++++ linux-stable/include/linux/init_task.h +@@ -143,6 +143,12 @@ extern struct task_group root_task_group + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ + .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ +@@ -208,6 +214,7 @@ extern struct task_group root_task_group + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1185,6 +1185,9 @@ struct task_struct { + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task +Index: linux-stable/init/main.c +=================================================================== +--- linux-stable.orig/init/main.c ++++ linux-stable/init/main.c +@@ -6,7 +6,7 @@ + * GK 2/5/95 - Changed to support mounting root fs via NFS + * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 + * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 +- * Simplified starting of init: Michael A. Griffith ++ * Simplified starting of init: Michael A. Griffith + */ + + #define DEBUG /* Enable initcall_debug */ +@@ -74,6 +74,7 @@ + #include + #include + #include ++#include + + #include + #include +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -1113,6 +1113,9 @@ void mm_init_owner(struct mm_struct *mm, + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +Index: linux-stable/kernel/posix-cpu-timers.c +=================================================================== +--- linux-stable.orig/kernel/posix-cpu-timers.c ++++ linux-stable/kernel/posix-cpu-timers.c +@@ -3,6 +3,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -722,7 +723,7 @@ static int posix_cpu_timer_set(struct k_ + /* + * Disarm any old timer after extracting its expiry time. + */ +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + ret = 0; + old_incr = timer->it.cpu.incr; +@@ -1225,7 +1226,7 @@ void posix_cpu_timer_schedule(struct k_i + /* + * Now re-arm for the new expiry time. + */ +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + arm_timer(timer); + spin_unlock(&p->sighand->siglock); + +@@ -1292,10 +1293,11 @@ static inline int fastpath_timer_check(s + sig = tsk->signal; + if (sig->cputimer.running) { + struct task_cputime group_sample; ++ unsigned long flags; + +- raw_spin_lock(&sig->cputimer.lock); ++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags); + group_sample = sig->cputimer.cputime; +- raw_spin_unlock(&sig->cputimer.lock); ++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags); + + if (task_cputime_expired(&group_sample, &sig->cputime_expires)) + return 1; +@@ -1309,13 +1311,13 @@ static inline int fastpath_timer_check(s + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + /* + * The fast path checks that there are no expired thread or thread +@@ -1380,6 +1382,175 @@ void run_posix_cpu_timers(struct task_st + posix_cpu_timer_kick_nohz(); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include ++#include ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++ ++static int posix_cpu_timers_thread(void *data) ++{ ++ int cpu = (long)data; ++ ++ BUG_ON(per_cpu(posix_timer_task,cpu) != current); ++ ++ while (!kthread_should_stop()) { ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ if (cpu_is_offline(cpu)) ++ goto wait_to_die; ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ __set_current_state(TASK_RUNNING); ++ continue; ++ } ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++ } ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned long cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ if(!per_cpu(posix_timer_task, cpu)) ++ return; ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ } ++ /* XXX signal the thread somehow */ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++} ++ ++/* ++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int posix_cpu_thread_call(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ struct task_struct *p; ++ struct sched_param param; ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ p = kthread_create(posix_cpu_timers_thread, hcpu, ++ "posix_cpu_timers/%d",cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ p->flags |= PF_NOFREEZE; ++ kthread_bind(p, cpu); ++ /* Must be high prio to avoid getting starved */ ++ param.sched_priority = MAX_RT_PRIO-1; ++ sched_setscheduler(p, SCHED_FIFO, ¶m); ++ per_cpu(posix_timer_task,cpu) = p; ++ break; ++ case CPU_ONLINE: ++ /* Strictly unneccessary, as first user will wake it. */ ++ wake_up_process(per_cpu(posix_timer_task,cpu)); ++ break; ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(per_cpu(posix_timer_task, cpu), ++ cpumask_any(cpu_online_mask)); ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++ case CPU_DEAD: ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block posix_cpu_thread_notifier = { ++ .notifier_call = posix_cpu_thread_call, ++ .priority = 10 ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ void *hcpu = (void *)(long)smp_processor_id(); ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); ++ register_cpu_notifier(&posix_cpu_thread_notifier); ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch new file mode 100644 index 000000000..2e9fb5bc1 --- /dev/null +++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch @@ -0,0 +1,23 @@ +Subject: power-disable-highmem-on-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:08:34 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/arch/powerpc/Kconfig +=================================================================== +--- linux-stable.orig/arch/powerpc/Kconfig ++++ linux-stable/arch/powerpc/Kconfig +@@ -286,7 +286,7 @@ menu "Kernel options" + + config HIGHMEM + bool "High memory support" +- depends on PPC32 ++ depends on PPC32 && !PREEMPT_RT_FULL + + source kernel/Kconfig.hz + source kernel/Kconfig.preempt diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch new file mode 100644 index 000000000..21e8ecbb2 --- /dev/null +++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch @@ -0,0 +1,26 @@ +From: Thomas Gleixner +Subject: Powerpc: Use generic rwsem on RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/powerpc/Kconfig +=================================================================== +--- linux-stable.orig/arch/powerpc/Kconfig ++++ linux-stable/arch/powerpc/Kconfig +@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT + + config RWSEM_GENERIC_SPINLOCK + bool ++ default y if PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM + bool +- default y ++ default y if !PREEMPT_RT_FULL + + config GENERIC_LOCKBREAK + bool diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch new file mode 100644 index 000000000..ecd667b1f --- /dev/null +++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch @@ -0,0 +1,313 @@ +Subject: powerpc-preempt-lazy-support.patch +From: Thomas Gleixner +Date: Thu, 01 Nov 2012 10:14:11 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/thread_info.h | 8 ++++- + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kernel/entry_32.S | 17 ++++++++--- + arch/powerpc/kernel/entry_64.S | 48 +++++++++++++++++++-------------- + 5 files changed, 49 insertions(+), 26 deletions(-) + +Index: linux-stable/arch/powerpc/Kconfig +=================================================================== +--- linux-stable.orig/arch/powerpc/Kconfig ++++ linux-stable/arch/powerpc/Kconfig +@@ -133,6 +133,7 @@ config PPC + select GENERIC_CLOCKEVENTS + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER ++ select HAVE_PREEMPT_LAZY + select HAVE_MOD_ARCH_SPECIFIC + select MODULES_USE_ELF_RELA + select CLONE_BACKWARDS +Index: linux-stable/arch/powerpc/include/asm/thread_info.h +=================================================================== +--- linux-stable.orig/arch/powerpc/include/asm/thread_info.h ++++ linux-stable/arch/powerpc/include/asm/thread_info.h +@@ -43,6 +43,8 @@ struct thread_info { + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + struct restart_block restart_block; + unsigned long local_flags; /* private flags for thread */ + +@@ -107,6 +109,7 @@ static inline struct thread_info *curren + #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation + for stack store? */ + #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ ++#define TIF_NEED_RESCHED_LAZY 18 /* lazy rescheduling necessary */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1< +@@ -239,12 +239,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECK + RFI + b . /* prevent speculative execution */ + +-syscall_error: ++syscall_error: + oris r5,r5,0x1000 /* Set SO bit in CR */ + neg r3,r3 + std r5,_CCR(r1) + b .Lsyscall_error_cont +- ++ + /* Traced system call support */ + syscall_dotrace: + bl .save_nvgprs +@@ -270,7 +270,7 @@ syscall_dotrace: + syscall_enosys: + li r3,-ENOSYS + b syscall_exit +- ++ + syscall_exit_work: + #ifdef CONFIG_PPC_BOOK3S + mtmsrd r10,1 /* Restore RI */ +@@ -333,7 +333,7 @@ _GLOBAL(save_nvgprs) + std r0,_TRAP(r1) + blr + +- ++ + /* + * The sigsuspend and rt_sigsuspend system calls can call do_signal + * and thus put the process into the stopped state where we might +@@ -406,7 +406,7 @@ DSCR_DEFAULT: + * the fork code also. + * + * The code which creates the new task context is in 'copy_thread' +- * in arch/powerpc/kernel/process.c ++ * in arch/powerpc/kernel/process.c + */ + .align 7 + _GLOBAL(_switch) +@@ -637,7 +637,7 @@ _GLOBAL(ret_from_except_lite) + andi. r0,r4,_TIF_USER_WORK_MASK + beq restore + +- andi. r0,r4,_TIF_NEED_RESCHED ++ andi. r0,r4,_TIF_NEED_RESCHED_MASK + beq 1f + bl .restore_interrupts + SCHEDULE_USER +@@ -687,10 +687,18 @@ resume_kernel: + + #ifdef CONFIG_PREEMPT + /* Check if we need to preempt */ ++ lwz r8,TI_PREEMPT(r9) ++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ ++ bne restore + andi. r0,r4,_TIF_NEED_RESCHED ++ bne+ check_count ++ ++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY + beq+ restore ++ lwz r8,TI_PREEMPT_LAZY(r9) ++ + /* Check that preempt_count() == 0 and interrupts are enabled */ +- lwz r8,TI_PREEMPT(r9) ++check_count: + cmpwi cr1,r8,0 + ld r0,SOFTE(r1) + cmpdi r0,0 +@@ -707,7 +715,7 @@ resume_kernel: + /* Re-test flags and eventually loop */ + CURRENT_THREAD_INFO(r9, r1) + ld r4,TI_FLAGS(r9) +- andi. r0,r4,_TIF_NEED_RESCHED ++ andi. r0,r4,_TIF_NEED_RESCHED_MASK + bne 1b + + /* +@@ -874,7 +882,7 @@ restore_check_irq_replay: + bl .__check_irq_replay + cmpwi cr0,r3,0 + beq restore_no_replay +- ++ + /* + * We need to re-emit an interrupt. We do so by re-using our + * existing exception frame. We first change the trap value, +@@ -916,7 +924,7 @@ restore_check_irq_replay: + b .ret_from_except + #endif /* CONFIG_PPC_DOORBELL */ + 1: b .ret_from_except /* What else to do here ? */ +- ++ + unrecov_restore: + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception +@@ -928,7 +936,7 @@ unrecov_restore: + * called with the MMU off. + * + * In addition, we need to be in 32b mode, at least for now. +- * ++ * + * Note: r3 is an input parameter to rtas, so don't trash it... + */ + _GLOBAL(enter_rtas) +@@ -962,7 +970,7 @@ _GLOBAL(enter_rtas) + li r0,0 + mtcr r0 + +-#ifdef CONFIG_BUG ++#ifdef CONFIG_BUG + /* There is no way it is acceptable to get here with interrupts enabled, + * check it with the asm equivalent of WARN_ON + */ +@@ -970,7 +978,7 @@ _GLOBAL(enter_rtas) + 1: tdnei r0,0 + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING + #endif +- ++ + /* Hard-disable interrupts */ + mfmsr r6 + rldicl r7,r6,48,1 +@@ -984,7 +992,7 @@ _GLOBAL(enter_rtas) + std r1,PACAR1(r13) + std r6,PACASAVEDMSR(r13) + +- /* Setup our real return addr */ ++ /* Setup our real return addr */ + LOAD_REG_ADDR(r4,.rtas_return_loc) + clrldi r4,r4,2 /* convert to realmode address */ + mtlr r4 +@@ -992,7 +1000,7 @@ _GLOBAL(enter_rtas) + li r0,0 + ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI + andc r0,r6,r0 +- ++ + li r9,1 + rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) + ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI +@@ -1003,7 +1011,7 @@ _GLOBAL(enter_rtas) + LOAD_REG_ADDR(r4, rtas) + ld r5,RTASENTRY(r4) /* get the rtas->entry value */ + ld r4,RTASBASE(r4) /* get the rtas->base value */ +- ++ + mtspr SPRN_SRR0,r5 + mtspr SPRN_SRR1,r6 + rfid +@@ -1021,9 +1029,9 @@ _STATIC(rtas_return_loc) + mfmsr r6 + li r0,MSR_RI + andc r6,r6,r0 +- sync ++ sync + mtmsrd r6 +- ++ + ld r1,PACAR1(r4) /* Restore our SP */ + ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ + +@@ -1121,7 +1129,7 @@ _GLOBAL(enter_prom) + REST_10GPRS(22, r1) + ld r4,_CCR(r1) + mtcr r4 +- ++ + addi r1,r1,PROM_FRAME_SIZE + ld r0,16(r1) + mtlr r0 diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch new file mode 100644 index 000000000..41874e43c --- /dev/null +++ b/debian/patches/features/all/rt/preempt-lazy-support.patch @@ -0,0 +1,604 @@ +Subject: sched: Add support for lazy preemption +From: Thomas Gleixner +Date: Fri, 26 Oct 2012 18:50:54 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +It has become an obsession to mitigate the determinism vs. throughput +loss of RT. Looking at the mainline semantics of preemption points +gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER +tasks. One major issue is the wakeup of tasks which are right away +preempting the waking task while the waking task holds a lock on which +the woken task will block right after having preempted the wakee. In +mainline this is prevented due to the implicit preemption disable of +spin/rw_lock held regions. On RT this is not possible due to the fully +preemptible nature of sleeping spinlocks. + +Though for a SCHED_OTHER task preempting another SCHED_OTHER task this +is really not a correctness issue. RT folks are concerned about +SCHED_FIFO/RR tasks preemption and not about the purely fairness +driven SCHED_OTHER preemption latencies. + +So I introduced a lazy preemption mechanism which only applies to +SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the +existing preempt_count each tasks sports now a preempt_lazy_count +which is manipulated on lock acquiry and release. This is slightly +incorrect as for lazyness reasons I coupled this on +migrate_disable/enable so some other mechanisms get the same treatment +(e.g. get_cpu_light). + +Now on the scheduler side instead of setting NEED_RESCHED this sets +NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and +therefor allows to exit the waking task the lock held region before +the woken task preempts. That also works better for cross CPU wakeups +as the other side can stay in the adaptive spinning loop. + +For RT class preemption there is no change. This simply sets +NEED_RESCHED and forgoes the lazy preemption counter. + + Initial test do not expose any observable latency increasement, but +history shows that I've been proven wrong before :) + +The lazy preemption mode is per default on, but with +CONFIG_SCHED_DEBUG enabled it can be disabled via: + + # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features + +and reenabled via + + # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features + +The test results so far are very machine and workload dependent, but +there is a clear trend that it enhances the non RT workload +performance. + +Signed-off-by: Thomas Gleixner +--- + include/linux/ftrace_event.h | 1 + include/linux/preempt.h | 38 ++++++++++++++++++++++++++- + include/linux/sched.h | 51 ++++++++++++++++++++++++++++++++---- + kernel/Kconfig.preempt | 6 ++++ + kernel/sched/core.c | 60 ++++++++++++++++++++++++++++++++++++++++++- + kernel/sched/fair.c | 16 +++++------ + kernel/sched/features.h | 3 ++ + kernel/sched/sched.h | 9 ++++++ + kernel/trace/trace.c | 41 +++++++++++++++++------------ + kernel/trace/trace.h | 2 + + kernel/trace/trace_output.c | 13 +++++++-- + 11 files changed, 206 insertions(+), 34 deletions(-) + +Index: linux-stable/include/linux/ftrace_event.h +=================================================================== +--- linux-stable.orig/include/linux/ftrace_event.h ++++ linux-stable/include/linux/ftrace_event.h +@@ -58,6 +58,7 @@ struct trace_entry { + int pid; + unsigned short migrate_disable; + unsigned short padding; ++ unsigned char preempt_lazy_count; + }; + + #define FTRACE_MAX_EVENT \ +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -23,15 +23,38 @@ + + #define preempt_count() (current_thread_info()->preempt_count) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif ++ + #ifdef CONFIG_PREEMPT + + asmlinkage void preempt_schedule(void); + ++# ifdef CONFIG_PREEMPT_LAZY + #define preempt_check_resched() \ + do { \ +- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ ++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY))) \ + preempt_schedule(); \ + } while (0) ++# else ++#define preempt_check_resched() \ ++do { \ ++ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ ++ preempt_schedule(); \ ++} while (0) ++# endif + + #ifdef CONFIG_CONTEXT_TRACKING + +@@ -64,6 +87,12 @@ do { \ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ +@@ -84,6 +113,13 @@ do { \ + barrier(); \ + preempt_check_resched(); \ + } while (0) ++ ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) + + /* For debugging and tracer internals only! */ + #define add_preempt_count_notrace(val) \ +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -2454,6 +2454,52 @@ static inline int test_tsk_need_resched( + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} ++ ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++static inline int need_resched(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED) || ++ test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++static inline int need_resched(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++#endif ++ + static inline int restart_syscall(void) + { + set_tsk_thread_flag(current, TIF_SIGPENDING); +@@ -2485,11 +2531,6 @@ static inline int signal_pending_state(l + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + +-static inline int need_resched(void) +-{ +- return unlikely(test_thread_flag(TIF_NEED_RESCHED)); +-} +- + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +Index: linux-stable/kernel/Kconfig.preempt +=================================================================== +--- linux-stable.orig/kernel/Kconfig.preempt ++++ linux-stable/kernel/Kconfig.preempt +@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE + bool + select PREEMPT + ++config HAVE_PREEMPT_LAZY ++ bool ++ ++config PREEMPT_LAZY ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL ++ + choice + prompt "Preemption Model" + default PREEMPT_NONE +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -538,6 +538,37 @@ void resched_task(struct task_struct *p) + smp_send_reschedule(cpu); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++void resched_task_lazy(struct task_struct *p) ++{ ++ int cpu; ++ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_task(p); ++ return; ++ } ++ ++ assert_raw_spin_locked(&task_rq(p)->lock); ++ ++ if (test_tsk_need_resched(p)) ++ return; ++ ++ if (test_tsk_need_resched_lazy(p)) ++ return; ++ ++ set_tsk_need_resched_lazy(p); ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED_LAZY must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(p)) ++ smp_send_reschedule(cpu); ++} ++#endif ++ + void resched_cpu(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -702,6 +733,17 @@ void resched_task(struct task_struct *p) + assert_raw_spin_locked(&task_rq(p)->lock); + set_tsk_need_resched(p); + } ++#ifdef CONFIG_PREEMPT_LAZY ++void resched_task_lazy(struct task_struct *p) ++{ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_task(p); ++ return; ++ } ++ assert_raw_spin_locked(&task_rq(p)->lock); ++ set_tsk_need_resched_lazy(p); ++} ++#endif + #endif /* CONFIG_SMP */ + + #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ +@@ -1776,6 +1818,9 @@ void sched_fork(struct task_struct *p) + /* Want to start with kernel preemption disabled. */ + task_thread_info(p)->preempt_count = 1; + #endif ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(p)->preempt_lazy_count = 0; ++#endif + #ifdef CONFIG_SMP + plist_node_init(&p->pushable_tasks, MAX_PRIO); + #endif +@@ -2993,6 +3038,7 @@ void migrate_disable(void) + return; + } + ++ preempt_lazy_disable(); + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +@@ -3048,6 +3094,7 @@ void migrate_enable(void) + + unpin_current_cpu(); + preempt_enable(); ++ preempt_lazy_enable(); + } + EXPORT_SYMBOL(migrate_enable); + #else +@@ -3184,6 +3231,7 @@ need_resched: + put_prev_task(rq, prev); + next = pick_next_task(rq); + clear_tsk_need_resched(prev); ++ clear_tsk_need_resched_lazy(prev); + rq->skip_clock_update = 0; + + if (likely(prev != next)) { +@@ -3275,6 +3323,14 @@ asmlinkage void __sched notrace preempt_ + if (likely(ti->preempt_count || irqs_disabled())) + return; + ++#ifdef CONFIG_PREEMPT_LAZY ++ /* ++ * Check for lazy preemption ++ */ ++ if (ti->preempt_lazy_count && !test_thread_flag(TIF_NEED_RESCHED)) ++ return; ++#endif ++ + do { + add_preempt_count_notrace(PREEMPT_ACTIVE); + /* +@@ -4955,7 +5011,9 @@ void __cpuinit init_idle(struct task_str + + /* Set the preempt count _outside_ the spinlocks! */ + task_thread_info(idle)->preempt_count = 0; +- ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(idle)->preempt_lazy_count = 0; ++#endif + /* + * The idle tasks have their own, simple scheduling class: + */ +Index: linux-stable/kernel/sched/fair.c +=================================================================== +--- linux-stable.orig/kernel/sched/fair.c ++++ linux-stable/kernel/sched/fair.c +@@ -1847,7 +1847,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) { +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. +@@ -1871,7 +1871,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq + return; + + if (delta > ideal_runtime) +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + } + + static void +@@ -1991,7 +1991,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc + * validating it and just reschedule. + */ + if (queued) { +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + return; + } + /* +@@ -2180,7 +2180,7 @@ static void __account_cfs_rq_runtime(str + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) +- resched_task(rq_of(cfs_rq)->curr); ++ resched_task_lazy(rq_of(cfs_rq)->curr); + } + + static __always_inline +@@ -2765,7 +2765,7 @@ static void hrtick_start_fair(struct rq + + if (delta < 0) { + if (rq->curr == p) +- resched_task(p); ++ resched_task_lazy(p); + return; + } + +@@ -3590,7 +3590,7 @@ static void check_preempt_wakeup(struct + return; + + preempt: +- resched_task(curr); ++ resched_task_lazy(curr); + /* + * Only set the backward buddy when the current task is still + * on the rq. This can happen when a wakeup gets interleaved +@@ -5795,7 +5795,7 @@ static void task_fork_fair(struct task_s + * 'current' within the tree based on its new key value. + */ + swap(curr->vruntime, se->vruntime); +- resched_task(rq->curr); ++ resched_task_lazy(rq->curr); + } + + se->vruntime -= cfs_rq->min_vruntime; +@@ -5820,7 +5820,7 @@ prio_changed_fair(struct rq *rq, struct + */ + if (rq->curr == p) { + if (p->prio > oldprio) +- resched_task(rq->curr); ++ resched_task_lazy(rq->curr); + } else + check_preempt_curr(rq, p, 0); + } +Index: linux-stable/kernel/sched/features.h +=================================================================== +--- linux-stable.orig/kernel/sched/features.h ++++ linux-stable/kernel/sched/features.h +@@ -58,6 +58,9 @@ SCHED_FEAT(NONTASK_POWER, true) + SCHED_FEAT(TTWU_QUEUE, true) + #else + SCHED_FEAT(TTWU_QUEUE, false) ++# ifdef CONFIG_PREEMPT_LAZY ++SCHED_FEAT(PREEMPT_LAZY, true) ++# endif + #endif + + SCHED_FEAT(FORCE_SD_OVERLAP, false) +Index: linux-stable/kernel/sched/sched.h +=================================================================== +--- linux-stable.orig/kernel/sched/sched.h ++++ linux-stable/kernel/sched/sched.h +@@ -1059,6 +1059,15 @@ extern void init_sched_fair_class(void); + extern void resched_task(struct task_struct *p); + extern void resched_cpu(int cpu); + ++#ifdef CONFIG_PREEMPT_LAZY ++extern void resched_task_lazy(struct task_struct *tsk); ++#else ++static inline void resched_task_lazy(struct task_struct *tsk) ++{ ++ resched_task(tsk); ++} ++#endif ++ + extern struct rt_bandwidth def_rt_bandwidth; + extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); + +Index: linux-stable/kernel/trace/trace.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace.c ++++ linux-stable/kernel/trace/trace.c +@@ -1409,6 +1409,7 @@ tracing_generic_entry_update(struct trac + struct task_struct *tsk = current; + + entry->preempt_count = pc & 0xff; ++ entry->preempt_lazy_count = preempt_lazy_count(); + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +@@ -1418,7 +1419,8 @@ tracing_generic_entry_update(struct trac + #endif + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | +- (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); ++ (need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0); + + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } +@@ -2319,15 +2321,17 @@ get_total_entries(struct trace_buffer *b + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n"); +- seq_puts(m, "# / _-----=> irqs-off \n"); +- seq_puts(m, "# | / _----=> need-resched \n"); +- seq_puts(m, "# || / _---=> hardirq/softirq \n"); +- seq_puts(m, "# ||| / _--=> preempt-depth \n"); +- seq_puts(m, "# |||| / _--=> migrate-disable\n"); +- seq_puts(m, "# ||||| / delay \n"); +- seq_puts(m, "# cmd pid |||||| time | caller \n"); +- seq_puts(m, "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n"); ++ seq_puts(m, "# / _-------=> irqs-off \n"); ++ seq_puts(m, "# | / _------=> need-resched \n"); ++ seq_puts(m, "# || / _-----=> need-resched_lazy \n"); ++ seq_puts(m, "# ||| / _----=> hardirq/softirq \n"); ++ seq_puts(m, "# |||| / _---=> preempt-depth \n"); ++ seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n"); ++ seq_puts(m, "# |||||| / _-=> migrate-disable \n"); ++ seq_puts(m, "# ||||||| / delay \n"); ++ seq_puts(m, "# cmd pid |||||||| time | caller \n"); ++ seq_puts(m, "# \\ / |||||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -2351,13 +2355,16 @@ static void print_func_help_header(struc + static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) + { + print_event_info(buf, m); +- seq_puts(m, "# _-----=> irqs-off\n"); +- seq_puts(m, "# / _----=> need-resched\n"); +- seq_puts(m, "# | / _---=> hardirq/softirq\n"); +- seq_puts(m, "# || / _--=> preempt-depth\n"); +- seq_puts(m, "# ||| / delay\n"); +- seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); +- seq_puts(m, "# | | | |||| | |\n"); ++ seq_puts(m, "# _-------=> irqs-off \n"); ++ seq_puts(m, "# / _------=> need-resched \n"); ++ seq_puts(m, "# |/ _-----=> need-resched_lazy \n"); ++ seq_puts(m, "# ||/ _----=> hardirq/softirq \n"); ++ seq_puts(m, "# |||/ _---=> preempt-depth \n"); ++ seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n"); ++ seq_puts(m, "# ||||| / _-=> migrate-disable \n"); ++ seq_puts(m, "# |||||| / delay\n"); ++ seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n"); ++ seq_puts(m, "# | | | |||||| | |\n"); + } + + void +Index: linux-stable/kernel/trace/trace.h +=================================================================== +--- linux-stable.orig/kernel/trace/trace.h ++++ linux-stable/kernel/trace/trace.h +@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head { + * NEED_RESCHED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler ++ * NEED_RESCHED_LAZY - lazy reschedule is requested + */ + enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, +@@ -124,6 +125,7 @@ enum trace_flag_type { + TRACE_FLAG_NEED_RESCHED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, ++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x20, + }; + + #define TRACE_BUF_SIZE 1024 +Index: linux-stable/kernel/trace/trace_output.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace_output.c ++++ linux-stable/kernel/trace/trace_output.c +@@ -606,6 +606,7 @@ int trace_print_lat_fmt(struct trace_seq + { + char hardsoft_irq; + char need_resched; ++ char need_resched_lazy; + char irqs_off; + int hardirq; + int softirq; +@@ -620,14 +621,17 @@ int trace_print_lat_fmt(struct trace_seq + '.'; + need_resched = + (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; ++ need_resched_lazy = ++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + hardsoft_irq = + (hardirq && softirq) ? 'H' : + hardirq ? 'h' : + softirq ? 's' : + '.'; + +- if (!trace_seq_printf(s, "%c%c%c", +- irqs_off, need_resched, hardsoft_irq)) ++ if (!trace_seq_printf(s, "%c%c%c%c", ++ irqs_off, need_resched, need_resched_lazy, ++ hardsoft_irq)) + return 0; + + if (entry->preempt_count) +@@ -635,6 +639,11 @@ int trace_print_lat_fmt(struct trace_seq + else + ret = trace_seq_putc(s, '.'); + ++ if (entry->preempt_lazy_count) ++ ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count); ++ else ++ ret = trace_seq_putc(s, '.'); ++ + if (entry->migrate_disable) + ret = trace_seq_printf(s, "%x", entry->migrate_disable); + else diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch new file mode 100644 index 000000000..5f6690143 --- /dev/null +++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch @@ -0,0 +1,55 @@ +From: Thomas Gleixner +Date: Fri, 24 Jul 2009 12:38:56 +0200 +Subject: preempt: Provide preempt_*_(no)rt variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +RT needs a few preempt_disable/enable points which are not necessary +otherwise. Implement variants to avoid #ifdeffery. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/preempt.h | 20 ++++++++++++++++++-- + 1 file changed, 18 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -70,11 +70,15 @@ do { \ + dec_preempt_count(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifndef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++#endif + + #define preempt_enable() \ + do { \ +- preempt_enable_no_resched(); \ ++ sched_preempt_enable_no_resched(); \ + barrier(); \ + preempt_check_resched(); \ + } while (0) +@@ -126,6 +130,18 @@ do { \ + + #endif /* CONFIG_PREEMPT_COUNT */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch new file mode 100644 index 000000000..1e528e4d8 --- /dev/null +++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -0,0 +1,38 @@ +Subject: printk: %27force_early_printk%27 boot param to help with debugging +From: Peter Zijlstra +Date: Fri, 02 Sep 2011 14:41:29 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Subject: printk: 'force_early_printk' boot param to help with debugging +From: Peter Zijlstra +Date: Fri Sep 02 14:29:33 CEST 2011 + +Gives me an option to screw printk and actually see what the machine +says. + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins +Signed-off-by: Thomas Gleixner +Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org +--- + kernel/printk.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +Index: linux-stable/kernel/printk.c +=================================================================== +--- linux-stable.orig/kernel/printk.c ++++ linux-stable/kernel/printk.c +@@ -1523,6 +1523,13 @@ asmlinkage void early_printk(const char + */ + static bool __read_mostly printk_killswitch; + ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = true; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ + void printk_kill(void) + { + printk_killswitch = true; diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch new file mode 100644 index 000000000..0d9cac91b --- /dev/null +++ b/debian/patches/features/all/rt/printk-kill.patch @@ -0,0 +1,175 @@ +Subject: printk-kill.patch +From: Ingo Molnar +Date: Fri, 22 Jul 2011 17:58:40 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/printk.h | 3 + + kernel/printk.c | 79 ++++++++++++++++++++++++++++++++++--------------- + kernel/watchdog.c | 15 ++++++++- + 3 files changed, 71 insertions(+), 26 deletions(-) + +Index: linux-stable/include/linux/printk.h +=================================================================== +--- linux-stable.orig/include/linux/printk.h ++++ linux-stable/include/linux/printk.h +@@ -101,9 +101,11 @@ int no_printk(const char *fmt, ...) + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); + void early_vprintk(const char *fmt, va_list ap); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + #ifdef CONFIG_PRINTK +@@ -137,7 +139,6 @@ extern int __printk_ratelimit(const char + #define printk_ratelimit() __printk_ratelimit(__func__) + extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); +- + extern int printk_delay_msec; + extern int dmesg_restrict; + extern int kptr_restrict; +Index: linux-stable/kernel/printk.c +=================================================================== +--- linux-stable.orig/kernel/printk.c ++++ linux-stable/kernel/printk.c +@@ -1493,6 +1493,55 @@ static size_t cont_print_text(char *text + return textlen; + } + ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++void early_vprintk(const char *fmt, va_list ap) ++{ ++ if (early_console) { ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ ++ early_console->write(early_console, buf, n); ++ } ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static bool __read_mostly printk_killswitch; ++ ++void printk_kill(void) ++{ ++ printk_killswitch = true; ++} ++ ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; ++} ++#endif ++ + asmlinkage int vprintk_emit(int facility, int level, + const char *dict, size_t dictlen, + const char *fmt, va_list args) +@@ -1506,6 +1555,13 @@ asmlinkage int vprintk_emit(int facility + int this_cpu; + int printed_len = 0; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + boot_delay_msec(level); + printk_delay(); + +@@ -1728,29 +1784,6 @@ static size_t cont_print_text(char *text + + #endif /* CONFIG_PRINTK */ + +-#ifdef CONFIG_EARLY_PRINTK +-struct console *early_console; +- +-void early_vprintk(const char *fmt, va_list ap) +-{ +- if (early_console) { +- char buf[512]; +- int n = vscnprintf(buf, sizeof(buf), fmt, ap); +- +- early_console->write(early_console, buf, n); +- } +-} +- +-asmlinkage void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- +- va_start(ap, fmt); +- early_vprintk(fmt, ap); +- va_end(ap); +-} +-#endif +- + static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) + { +Index: linux-stable/kernel/watchdog.c +=================================================================== +--- linux-stable.orig/kernel/watchdog.c ++++ linux-stable/kernel/watchdog.c +@@ -205,6 +205,8 @@ static int is_softlockup(unsigned long t + + #ifdef CONFIG_HARDLOCKUP_DETECTOR + ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); ++ + static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, +@@ -239,10 +241,19 @@ static void watchdog_overflow_callback(s + if (__this_cpu_read(hard_watchdog_warn) == true) + return; + +- if (hardlockup_panic) ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ if (hardlockup_panic) { + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); +- else ++ } else { ++ raw_spin_lock(&watchdog_output_lock); + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); ++ raw_spin_unlock(&watchdog_output_lock); ++ } + + __this_cpu_write(hard_watchdog_warn, true); + return; diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch new file mode 100644 index 000000000..2157987e6 --- /dev/null +++ b/debian/patches/features/all/rt/printk-rt-aware.patch @@ -0,0 +1,104 @@ +Subject: printk-rt-aware.patch +From: Thomas Gleixner +Date: Wed, 19 Sep 2012 14:50:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/printk.c | 33 +++++++++++++++++++++++++++++---- + 1 file changed, 29 insertions(+), 4 deletions(-) + +Index: linux-stable/kernel/printk.c +=================================================================== +--- linux-stable.orig/kernel/printk.c ++++ linux-stable/kernel/printk.c +@@ -1277,6 +1277,7 @@ static void call_console_drivers(int lev + if (!console_drivers) + return; + ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -1289,6 +1290,7 @@ static void call_console_drivers(int lev + continue; + con->write(con, text, len); + } ++ migrate_enable(); + } + + /* +@@ -1348,12 +1350,18 @@ static inline int can_use_console(unsign + * interrupts disabled. It should return with 'lockbuf_lock' + * released but interrupts still disabled. + */ +-static int console_trylock_for_printk(unsigned int cpu) ++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags) + __releases(&logbuf_lock) + { + int retval = 0, wake = 0; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) && ++ (preempt_count() <= 1); ++#else ++ int lock = 1; ++#endif + +- if (console_trylock()) { ++ if (lock && console_trylock()) { + retval = 1; + + /* +@@ -1688,8 +1696,15 @@ asmlinkage int vprintk_emit(int facility + * The console_trylock_for_printk() function will release 'logbuf_lock' + * regardless of whether it actually gets the console semaphore or not. + */ +- if (console_trylock_for_printk(this_cpu)) ++ if (console_trylock_for_printk(this_cpu, flags)) { ++#ifndef CONFIG_PREEMPT_RT_FULL ++ console_unlock(); ++#else ++ raw_local_irq_restore(flags); + console_unlock(); ++ raw_local_irq_save(flags); ++#endif ++ } + + lockdep_on(); + out_restore_irqs: +@@ -2043,11 +2058,16 @@ static void console_cont_flush(char *tex + goto out; + + len = cont_print_text(text, size); ++#ifndef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); + call_console_drivers(cont.level, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#else ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(cont.level, text, len); ++#endif + return; + out: + raw_spin_unlock_irqrestore(&logbuf_lock, flags); +@@ -2130,12 +2150,17 @@ skip: + console_idx = log_next(console_idx); + console_seq++; + console_prev = msg->flags; +- raw_spin_unlock(&logbuf_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(level, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#else ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(level, text, len); ++#endif + } + console_locked = 0; + mutex_release(&console_lock_dep_map, 1, _RET_IP_); diff --git a/debian/patches/features/all/rt/radix-tree-rt-aware.patch b/debian/patches/features/all/rt/radix-tree-rt-aware.patch new file mode 100644 index 000000000..d920f87b9 --- /dev/null +++ b/debian/patches/features/all/rt/radix-tree-rt-aware.patch @@ -0,0 +1,73 @@ +Subject: radix-tree-rt-aware.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:33:18 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/radix-tree.h | 8 +++++++- + lib/radix-tree.c | 5 ++++- + 2 files changed, 11 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/radix-tree.h +=================================================================== +--- linux-stable.orig/include/linux/radix-tree.h ++++ linux-stable/include/linux/radix-tree.h +@@ -230,7 +230,13 @@ unsigned long radix_tree_next_hole(struc + unsigned long index, unsigned long max_scan); + unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + int radix_tree_preload(gfp_t gfp_mask); ++#else ++static inline int radix_tree_preload(gfp_t gm) { return 0; } ++#endif ++ + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -255,7 +261,7 @@ unsigned long radix_tree_locate_item(str + + static inline void radix_tree_preload_end(void) + { +- preempt_enable(); ++ preempt_enable_nort(); + } + + /** +Index: linux-stable/lib/radix-tree.c +=================================================================== +--- linux-stable.orig/lib/radix-tree.c ++++ linux-stable/lib/radix-tree.c +@@ -215,12 +215,13 @@ radix_tree_node_alloc(struct radix_tree_ + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = &__get_cpu_var(radix_tree_preloads); ++ rtp = &get_cpu_var(radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes[rtp->nr - 1]; + rtp->nodes[rtp->nr - 1] = NULL; + rtp->nr--; + } ++ put_cpu_var(radix_tree_preloads); + } + if (ret == NULL) + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); +@@ -255,6 +256,7 @@ radix_tree_node_free(struct radix_tree_n + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail. On +@@ -289,6 +291,7 @@ out: + return ret; + } + EXPORT_SYMBOL(radix_tree_preload); ++#endif + + /* + * Return the maximum key which can be store into a diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch new file mode 100644 index 000000000..738407a0f --- /dev/null +++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch @@ -0,0 +1,125 @@ +Subject: random: Make it work on rt +From: Thomas Gleixner +Date: Tue, 21 Aug 2012 20:38:50 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Delegate the random insertion to the forced threaded interrupt +handler. Store the return IP of the hard interrupt handler in the irq +descriptor and feed it into the random generator as a source of +entropy. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + drivers/char/random.c | 10 ++++++---- + include/linux/irqdesc.h | 1 + + include/linux/random.h | 2 +- + kernel/irq/handle.c | 8 +++++++- + kernel/irq/manage.c | 6 ++++++ + 5 files changed, 21 insertions(+), 6 deletions(-) + +Index: linux-stable/drivers/char/random.c +=================================================================== +--- linux-stable.orig/drivers/char/random.c ++++ linux-stable/drivers/char/random.c +@@ -742,18 +742,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness); + + static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + __u32 input[4], cycles = get_cycles(); + + input[0] = cycles ^ jiffies; + input[1] = irq; +- if (regs) { +- __u64 ip = instruction_pointer(regs); ++ if (ip) { + input[2] = ip; + input[3] = ip >> 32; + } +@@ -767,7 +765,11 @@ void add_interrupt_randomness(int irq, i + fast_pool->last = now; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; ++#ifndef CONFIG_PREEMPT_RT_FULL + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); ++#else ++ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); ++#endif + /* + * If we don't have a valid cycle counter, and we see + * back-to-back timer interrupts, then skip giving credit for +Index: linux-stable/include/linux/irqdesc.h +=================================================================== +--- linux-stable.orig/include/linux/irqdesc.h ++++ linux-stable/include/linux/irqdesc.h +@@ -52,6 +52,7 @@ struct irq_desc { + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + #ifdef CONFIG_SMP +Index: linux-stable/include/linux/random.h +=================================================================== +--- linux-stable.orig/include/linux/random.h ++++ linux-stable/include/linux/random.h +@@ -12,7 +12,7 @@ + extern void add_device_randomness(const void *, unsigned int); + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value); +-extern void add_interrupt_randomness(int irq, int irq_flags); ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); + + extern void get_random_bytes(void *buf, int nbytes); + extern void get_random_bytes_arch(void *buf, int nbytes); +Index: linux-stable/kernel/irq/handle.c +=================================================================== +--- linux-stable.orig/kernel/irq/handle.c ++++ linux-stable/kernel/irq/handle.c +@@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_d + irqreturn_t + handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) + { ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + irqreturn_t retval = IRQ_NONE; + unsigned int flags = 0, irq = desc->irq_data.irq; + +@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc + action = action->next; + } while (action); + +- add_interrupt_randomness(irq, flags); ++#ifndef CONFIG_PREEMPT_RT_FULL ++ add_interrupt_randomness(irq, flags, ip); ++#else ++ desc->random_ip = ip; ++#endif + + if (!noirqdebug) + note_interrupt(irq, desc, retval); +Index: linux-stable/kernel/irq/manage.c +=================================================================== +--- linux-stable.orig/kernel/irq/manage.c ++++ linux-stable/kernel/irq/manage.c +@@ -880,6 +880,12 @@ static int irq_thread(void *data) + if (!noirqdebug) + note_interrupt(action->irq, desc, action_ret); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++ add_interrupt_randomness(action->irq, 0, ++ desc->random_ip ^ (unsigned long) action); ++ migrate_enable(); ++#endif + wake_threads_waitq(desc); + } + diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch new file mode 100644 index 000000000..83f4be47b --- /dev/null +++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch @@ -0,0 +1,27 @@ +Subject: rcu: Disable RCU_FAST_NO_HZ on RT +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 13:26:09 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This uses a timer_list timer from the irq disabled guts of the idle +code. Disable it for now to prevent wreckage. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + init/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/init/Kconfig +=================================================================== +--- linux-stable.orig/init/Kconfig ++++ linux-stable/init/Kconfig +@@ -577,7 +577,7 @@ config RCU_FANOUT_EXACT + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP ++ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch new file mode 100644 index 000000000..c6f315b51 --- /dev/null +++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch @@ -0,0 +1,267 @@ +Subject: rcu: Merge RCU-bh into RCU-preempt +Date: Wed, 5 Oct 2011 11:59:38 -0700 +From: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The Linux kernel has long RCU-bh read-side critical sections that +intolerably increase scheduling latency under mainline's RCU-bh rules, +which include RCU-bh read-side critical sections being non-preemptible. +This patch therefore arranges for RCU-bh to be implemented in terms of +RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. + +This has the downside of defeating the purpose of RCU-bh, namely, +handling the case where the system is subjected to a network-based +denial-of-service attack that keeps at least one CPU doing full-time +softirq processing. This issue will be fixed by a later commit. + +The current commit will need some work to make it appropriate for +mainline use, for example, it needs to be extended to cover Tiny RCU. + +[ paulmck: Added a useful changelog ] + +Signed-off-by: Thomas Gleixner +Signed-off-by: Paul E. McKenney +Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com +Signed-off-by: Thomas Gleixner + +--- + include/linux/rcupdate.h | 25 +++++++++++++++++++++++++ + include/linux/rcutree.h | 18 ++++++++++++++++-- + kernel/rcupdate.c | 2 ++ + kernel/rcutree.c | 10 ++++++++++ + 4 files changed, 53 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/rcupdate.h +=================================================================== +--- linux-stable.orig/include/linux/rcupdate.h ++++ linux-stable/include/linux/rcupdate.h +@@ -128,6 +128,9 @@ extern void call_rcu(struct rcu_head *he + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -151,6 +154,7 @@ extern void call_rcu(struct rcu_head *he + */ + extern void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); ++#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -224,7 +228,13 @@ static inline int rcu_preempt_depth(void + + /* Internal to kernel */ + extern void rcu_sched_qs(int cpu); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + extern void rcu_bh_qs(int cpu); ++#else ++static inline void rcu_bh_qs(int cpu) { } ++#endif ++ + extern void rcu_check_callbacks(int cpu, int user); + struct notifier_block; + extern void rcu_idle_enter(void); +@@ -374,7 +384,14 @@ static inline int rcu_read_lock_held(voi + * rcu_read_lock_bh_held() is defined out of line to avoid #include-file + * hell. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + extern int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -831,10 +848,14 @@ static inline void rcu_read_unlock(void) + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -844,10 +865,14 @@ static inline void rcu_read_lock_bh(void + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + rcu_lockdep_assert(!rcu_is_cpu_idle(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +Index: linux-stable/include/linux/rcutree.h +=================================================================== +--- linux-stable.orig/include/linux/rcutree.h ++++ linux-stable/include/linux/rcutree.h +@@ -45,7 +45,11 @@ static inline void rcu_virt_note_context + rcu_note_context_switch(cpu); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + extern void synchronize_rcu_bh(void); ++#endif + extern void synchronize_sched_expedited(void); + extern void synchronize_rcu_expedited(void); + +@@ -73,20 +77,30 @@ static inline void synchronize_rcu_bh_ex + } + + extern void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + extern void rcu_barrier_bh(void); ++#endif + extern void rcu_barrier_sched(void); + + extern unsigned long rcutorture_testseq; + extern unsigned long rcutorture_vernum; + extern long rcu_batches_completed(void); +-extern long rcu_batches_completed_bh(void); + extern long rcu_batches_completed_sched(void); + + extern void rcu_force_quiescent_state(void); +-extern void rcu_bh_force_quiescent_state(void); + extern void rcu_sched_force_quiescent_state(void); + + extern void rcu_scheduler_starting(void); + extern int rcu_scheduler_active __read_mostly; + ++#ifndef CONFIG_PREEMPT_RT_FULL ++extern void rcu_bh_force_quiescent_state(void); ++extern long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++#endif ++ + #endif /* __LINUX_RCUTREE_H */ +Index: linux-stable/kernel/rcupdate.c +=================================================================== +--- linux-stable.orig/kernel/rcupdate.c ++++ linux-stable/kernel/rcupdate.c +@@ -156,6 +156,7 @@ int debug_lockdep_rcu_enabled(void) + } + EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -182,6 +183,7 @@ int rcu_read_lock_bh_held(void) + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +Index: linux-stable/kernel/rcutree.c +=================================================================== +--- linux-stable.orig/kernel/rcutree.c ++++ linux-stable/kernel/rcutree.c +@@ -182,6 +182,7 @@ void rcu_sched_qs(int cpu) + rdp->passed_quiesce = 1; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void rcu_bh_qs(int cpu) + { + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); +@@ -190,6 +191,7 @@ void rcu_bh_qs(int cpu) + trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); + rdp->passed_quiesce = 1; + } ++#endif + + /* + * Note a context switch. This is a quiescent state for RCU-sched, +@@ -239,6 +241,7 @@ long rcu_batches_completed_sched(void) + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU BH batches processed thus far for debug & stats. + */ +@@ -256,6 +259,7 @@ void rcu_bh_force_quiescent_state(void) + force_quiescent_state(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); ++#endif + + /* + * Record the number of times rcutorture tests have been initiated and +@@ -2448,6 +2452,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue an RCU callback for invocation after a quicker grace period. + */ +@@ -2456,6 +2461,7 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_state, -1, 0); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /* + * Because a context switch is a grace period for RCU-sched and RCU-bh, +@@ -2533,6 +2539,7 @@ void synchronize_sched(void) + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -2559,6 +2566,7 @@ void synchronize_rcu_bh(void) + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + static int synchronize_sched_expedited_cpu_stop(void *data) + { +@@ -2955,6 +2963,7 @@ static void _rcu_barrier(struct rcu_stat + mutex_unlock(&rsp->barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -2963,6 +2972,7 @@ void rcu_barrier_bh(void) + _rcu_barrier(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. diff --git a/debian/patches/features/all/rt/rcu-more-swait-conversions.patch b/debian/patches/features/all/rt/rcu-more-swait-conversions.patch new file mode 100644 index 000000000..f3423893e --- /dev/null +++ b/debian/patches/features/all/rt/rcu-more-swait-conversions.patch @@ -0,0 +1,101 @@ +Subject: rcu-more-swait-conversions.patch +From: Thomas Gleixner +Date: Wed, 31 Jul 2013 19:00:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/rcutree.h | 5 +++-- + kernel/rcutree_plugin.h | 14 +++++++------- + 2 files changed, 10 insertions(+), 9 deletions(-) + +Index: linux-stable/kernel/rcutree.h +=================================================================== +--- linux-stable.orig/kernel/rcutree.h ++++ linux-stable/kernel/rcutree.h +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + /* + * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and +@@ -190,7 +191,7 @@ struct rcu_node { + /* This can happen due to race conditions. */ + #endif /* #ifdef CONFIG_RCU_BOOST */ + #ifdef CONFIG_RCU_NOCB_CPU +- wait_queue_head_t nocb_gp_wq[2]; ++ struct swait_head nocb_gp_wq[2]; + /* Place for rcu_nocb_kthread() to wait GP. */ + #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ + int need_future_gp[2]; +@@ -323,7 +324,7 @@ struct rcu_data { + atomic_long_t nocb_q_count_lazy; /* (approximate). */ + int nocb_p_count; /* # CBs being invoked by kthread */ + int nocb_p_count_lazy; /* (approximate). */ +- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ ++ struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */ + struct task_struct *nocb_kthread; + #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ + +Index: linux-stable/kernel/rcutree_plugin.h +=================================================================== +--- linux-stable.orig/kernel/rcutree_plugin.h ++++ linux-stable/kernel/rcutree_plugin.h +@@ -2036,7 +2036,7 @@ static int rcu_nocb_needs_gp(struct rcu_ + */ + static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) + { +- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + } + + /* +@@ -2054,8 +2054,8 @@ static void rcu_nocb_gp_set(struct rcu_n + + static void rcu_init_one_nocb(struct rcu_node *rnp) + { +- init_waitqueue_head(&rnp->nocb_gp_wq[0]); +- init_waitqueue_head(&rnp->nocb_gp_wq[1]); ++ init_swait_head(&rnp->nocb_gp_wq[0]); ++ init_swait_head(&rnp->nocb_gp_wq[1]); + } + + /* Is the specified CPU a no-CPUs CPU? */ +@@ -2095,7 +2095,7 @@ static void __call_rcu_nocb_enqueue(stru + return; + len = atomic_long_read(&rdp->nocb_q_count); + if (old_rhpp == &rdp->nocb_head) { +- wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */ ++ swait_wake(&rdp->nocb_wq); /* ... only if queue was empty ... */ + rdp->qlen_last_fqs_check = 0; + } else if (len > rdp->qlen_last_fqs_check + qhimark) { + wake_up_process(t); /* ... or if many callbacks queued. */ +@@ -2185,7 +2185,7 @@ static void rcu_nocb_wait_gp(struct rcu_ + */ + trace_rcu_future_gp(rnp, rdp, c, "StartWait"); + for (;;) { +- wait_event_interruptible( ++ swait_event_interruptible( + rnp->nocb_gp_wq[c & 0x1], + (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); + if (likely(d)) +@@ -2213,7 +2213,7 @@ static int rcu_nocb_kthread(void *arg) + for (;;) { + /* If not polling, wait for next batch of callbacks. */ + if (!rcu_nocb_poll) +- wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); ++ swait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); + list = ACCESS_ONCE(rdp->nocb_head); + if (!list) { + schedule_timeout_interruptible(1); +@@ -2263,7 +2263,7 @@ static int rcu_nocb_kthread(void *arg) + static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) + { + rdp->nocb_tail = &rdp->nocb_head; +- init_waitqueue_head(&rdp->nocb_wq); ++ init_swait_head(&rdp->nocb_wq); + } + + /* Create a kthread for each RCU flavor for each no-CBs CPU. */ diff --git a/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch new file mode 100644 index 000000000..a6ba4eae6 --- /dev/null +++ b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch @@ -0,0 +1,27 @@ +Subject: rcu-more-fallout.patch +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 10:57:54 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/rcutiny.c | 2 ++ + 1 file changed, 2 insertions(+) + +Index: linux-stable/kernel/rcutiny.c +=================================================================== +--- linux-stable.orig/kernel/rcutiny.c ++++ linux-stable/kernel/rcutiny.c +@@ -373,6 +373,7 @@ void call_rcu_sched(struct rcu_head *hea + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Post an RCU bottom-half callback to be invoked after any subsequent + * quiescent state. +@@ -382,3 +383,4 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_ctrlblk); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif diff --git a/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch b/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch new file mode 100644 index 000000000..00a39e1bd --- /dev/null +++ b/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch @@ -0,0 +1,45 @@ +Subject: rcu: rcutiny: Prevent RCU stall +From: Thomas Gleixner +Date: Tue, 16 Oct 2012 18:36:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +rcu_read_unlock_special() checks in_serving_softirq() and leaves early +when true. On RT this is obviously wrong as softirq processing context +can be preempted and therefor such a task can be on the gp_tasks +list. Leaving early here will leave the task on the list and therefor +block RCU processing forever. + +This cannot happen on mainline because softirq processing context +cannot be preempted and therefor this can never happen at all. + +In fact this check looks quite questionable in general. Neither irq +context nor softirq processing context in mainline can ever be +preempted in mainline so the special unlock case should not ever be +invoked in such context. Now the only explanation might be a +rcu_read_unlock() being interrupted and therefor leave the rcu nest +count at 0 before the special unlock bit has been cleared. That looks +fragile. At least it's missing a big fat comment. Paul ???? + +See mainline commits: ec433f0c5 and 8762705a for further enlightment. + +Reported-by: Kristian Lehmann +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org + +--- + kernel/rcutiny_plugin.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/rcutiny_plugin.h +=================================================================== +--- linux-stable.orig/kernel/rcutiny_plugin.h ++++ linux-stable/kernel/rcutiny_plugin.h +@@ -609,7 +609,7 @@ void rcu_read_unlock_special(struct task + rcu_preempt_cpu_qs(); + + /* Hardware IRQ handlers cannot block. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + local_irq_restore(flags); + return; + } diff --git a/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch b/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch new file mode 100644 index 000000000..7db20d85c --- /dev/null +++ b/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch @@ -0,0 +1,82 @@ +Subject: rcutiny: Use simple waitqueue +From: Thomas Gleixner +Date: Mon, 03 Dec 2012 16:25:21 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Simple waitqueues can be handled from interrupt disabled contexts. + +Signed-off-by: Thomas Gleixner +--- + kernel/rcutiny_plugin.h | 17 +++++++++-------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +Index: linux-stable/kernel/rcutiny_plugin.h +=================================================================== +--- linux-stable.orig/kernel/rcutiny_plugin.h ++++ linux-stable/kernel/rcutiny_plugin.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + /* Global control variables for rcupdate callback mechanism. */ + struct rcu_ctrlblk { +@@ -308,7 +309,7 @@ static void show_tiny_preempt_stats(stru + + /* Controls for rcu_kthread() kthread. */ + static struct task_struct *rcu_kthread_task; +-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); ++static DEFINE_SWAIT_HEAD(rcu_kthread_wq); + static unsigned long have_rcu_kthread_work; + + /* +@@ -762,7 +763,7 @@ void synchronize_rcu(void) + } + EXPORT_SYMBOL_GPL(synchronize_rcu); + +-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); ++static DEFINE_SWAIT_HEAD(sync_rcu_preempt_exp_wq); + static unsigned long sync_rcu_preempt_exp_count; + static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); + +@@ -784,7 +785,7 @@ static int rcu_preempted_readers_exp(voi + */ + static void rcu_report_exp_done(void) + { +- wake_up(&sync_rcu_preempt_exp_wq); ++ swait_wake(&sync_rcu_preempt_exp_wq); + } + + /* +@@ -836,8 +837,8 @@ void synchronize_rcu_expedited(void) + } else { + rcu_initiate_boost(); + local_irq_restore(flags); +- wait_event(sync_rcu_preempt_exp_wq, +- !rcu_preempted_readers_exp()); ++ swait_event(sync_rcu_preempt_exp_wq, ++ !rcu_preempted_readers_exp()); + } + + /* Clean up and exit. */ +@@ -907,7 +908,7 @@ static void invoke_rcu_callbacks(void) + { + have_rcu_kthread_work = 1; + if (rcu_kthread_task != NULL) +- wake_up(&rcu_kthread_wq); ++ swait_wake(&rcu_kthread_wq); + } + + #ifdef CONFIG_RCU_TRACE +@@ -937,8 +938,8 @@ static int rcu_kthread(void *arg) + unsigned long flags; + + for (;;) { +- wait_event_interruptible(rcu_kthread_wq, +- have_rcu_kthread_work != 0); ++ swait_event_interruptible(rcu_kthread_wq, ++ have_rcu_kthread_work != 0); + morework = rcu_boost(); + local_irq_save(flags); + work = have_rcu_kthread_work; diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch new file mode 100644 index 000000000..a0b24156d --- /dev/null +++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch @@ -0,0 +1,37 @@ +From: Yong Zhang +Date: Thu, 28 Jul 2011 11:16:00 +0800 +Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When retry happens, it's likely that the task has been migrated to +another cpu (except unplug failed), but it still derefernces the +original hotplug_pcp per cpu data. + +Update the pointer to hotplug_pcp in the retry path, so it points to +the current cpu. + +Signed-off-by: Yong Zhang +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com +Signed-off-by: Thomas Gleixner +--- + kernel/cpu.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/cpu.c +=================================================================== +--- linux-stable.orig/kernel/cpu.c ++++ linux-stable/kernel/cpu.c +@@ -81,9 +81,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp + */ + void pin_current_cpu(void) + { +- struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++ struct hotplug_pcp *hp; + + retry: ++ hp = &__get_cpu_var(hotplug_pcp); ++ + if (!hp->unplug || hp->refcount || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch new file mode 100644 index 000000000..c22108788 --- /dev/null +++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch @@ -0,0 +1,72 @@ +Subject: ARM: Initialize ptl->lock for vector page +From: Frank Rowand +Date: Sat, 1 Oct 2011 18:58:13 -0700 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if +PREEMPT_RT_FULL=y because vectors_user_mapping() creates a +VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no +ptl->lock has been allocated for the page. An attempt to coredump +that page will result in a kernel NULL pointer dereference when +follow_page() attempts to lock the page. + +The call tree to the NULL pointer dereference is: + + do_notify_resume() + get_signal_to_deliver() + do_coredump() + elf_core_dump() + get_dump_page() + __get_user_pages() + follow_page() + pte_offset_map_lock() <----- a #define + ... + rt_spin_lock() + +The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch. + +Signed-off-by: Frank Rowand +Cc: Frank +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com +Signed-off-by: Thomas Gleixner +--- + arch/arm/kernel/process.c | 25 +++++++++++++++++++++++++ + 1 file changed, 25 insertions(+) + +Index: linux-stable/arch/arm/kernel/process.c +=================================================================== +--- linux-stable.orig/arch/arm/kernel/process.c ++++ linux-stable/arch/arm/kernel/process.c +@@ -433,6 +433,31 @@ unsigned long arch_randomize_brk(struct + } + + #ifdef CONFIG_MMU ++ ++/* ++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not ++ * initialized by pgtable_page_ctor() then a coredump of the vector page will ++ * fail. ++ */ ++static int __init vectors_user_mapping_init_page(void) ++{ ++ struct page *page; ++ unsigned long addr = 0xffff0000; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ page = pmd_page(*(pmd)); ++ ++ pgtable_page_ctor(page); ++ ++ return 0; ++} ++late_initcall(vectors_user_mapping_init_page); ++ + /* + * The vectors page is always readable from user space for the + * atomic helpers and the signal restart code. Insert it into the diff --git a/debian/patches/features/all/rt/relay-fix-timer-madness.patch b/debian/patches/features/all/rt/relay-fix-timer-madness.patch new file mode 100644 index 000000000..86cbb4b03 --- /dev/null +++ b/debian/patches/features/all/rt/relay-fix-timer-madness.patch @@ -0,0 +1,55 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:07 -0500 +Subject: relay: fix timer madness +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +remove timer calls (!!!) from deep within the tracing infrastructure. +This was totally bogus code that can cause lockups and worse. Poll +the buffer every 2 jiffies for now. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/relay.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +Index: linux-stable/kernel/relay.c +=================================================================== +--- linux-stable.orig/kernel/relay.c ++++ linux-stable/kernel/relay.c +@@ -339,6 +339,10 @@ static void wakeup_readers(unsigned long + { + struct rchan_buf *buf = (struct rchan_buf *)data; + wake_up_interruptible(&buf->read_wait); ++ /* ++ * Stupid polling for now: ++ */ ++ mod_timer(&buf->timer, jiffies + 1); + } + + /** +@@ -356,6 +360,7 @@ static void __relay_reset(struct rchan_b + init_waitqueue_head(&buf->read_wait); + kref_init(&buf->kref); + setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); ++ mod_timer(&buf->timer, jiffies + 1); + } else + del_timer_sync(&buf->timer); + +@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_ + else + buf->early_bytes += buf->chan->subbuf_size - + buf->padding[old_subbuf]; +- smp_mb(); +- if (waitqueue_active(&buf->read_wait)) +- /* +- * Calling wake_up_interruptible() from here +- * will deadlock if we happen to be logging +- * from the scheduler (trying to re-grab +- * rq->lock), so defer it. +- */ +- mod_timer(&buf->timer, jiffies + 1); + } + + old = buf->data; diff --git a/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch new file mode 100644 index 000000000..810608627 --- /dev/null +++ b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch @@ -0,0 +1,89 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:44:33 -0500 +Subject: core: Do not disable interrupts on RT in res_counter.c +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Frederic Weisbecker reported this warning: + +[ 45.228562] BUG: sleeping function called from invalid context at kernel/rtmutex.c:683 +[ 45.228571] in_atomic(): 0, irqs_disabled(): 1, pid: 4290, name: ntpdate +[ 45.228576] INFO: lockdep is turned off. +[ 45.228580] irq event stamp: 0 +[ 45.228583] hardirqs last enabled at (0): [<(null)>] (null) +[ 45.228589] hardirqs last disabled at (0): [] copy_process+0x68d/0x1500 +[ 45.228602] softirqs last enabled at (0): [] copy_process+0x68d/0x1500 +[ 45.228609] softirqs last disabled at (0): [<(null)>] (null) +[ 45.228617] Pid: 4290, comm: ntpdate Tainted: G W 2.6.29-rc4-rt1-tip #1 +[ 45.228622] Call Trace: +[ 45.228632] [] ? print_irqtrace_events+0xd0/0xe0 +[ 45.228639] [] __might_sleep+0x113/0x130 +[ 45.228646] [] rt_spin_lock+0xa1/0xb0 +[ 45.228653] [] res_counter_charge+0x5d/0x130 +[ 45.228660] [] __mem_cgroup_try_charge+0x7f/0x180 +[ 45.228667] [] mem_cgroup_charge_common+0x57/0x90 +[ 45.228674] [] ? ftrace_call+0x5/0x2b +[ 45.228680] [] mem_cgroup_newpage_charge+0x5d/0x60 +[ 45.228688] [] __do_fault+0x29e/0x4c0 +[ 45.228694] [] ? rt_spin_unlock+0x23/0x80 +[ 45.228700] [] handle_mm_fault+0x205/0x890 +[ 45.228707] [] ? ftrace_call+0x5/0x2b +[ 45.228714] [] do_page_fault+0x11e/0x2a0 +[ 45.228720] [] page_fault+0x25/0x30 +[ 45.228727] [] ? __clear_user+0x3d/0x70 +[ 45.228733] [] ? __clear_user+0x21/0x70 + +The reason is the raw IRQ flag use of kernel/res_counter.c. + +The irq flags tricks there seem a bit pointless: it cannot protect the +c->parent linkage because local_irq_save() is only per CPU. + +So replace it with _nort(). This code needs a second look. + +Reported-by: Frederic Weisbecker +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/res_counter.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Index: linux-stable/kernel/res_counter.c +=================================================================== +--- linux-stable.orig/kernel/res_counter.c ++++ linux-stable/kernel/res_counter.c +@@ -49,7 +49,7 @@ static int __res_counter_charge(struct r + + r = ret = 0; + *limit_fail_at = NULL; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + r = res_counter_charge_locked(c, val, force); +@@ -69,7 +69,7 @@ static int __res_counter_charge(struct r + spin_unlock(&u->lock); + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return ret; + } +@@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct re + struct res_counter *c; + u64 ret = 0; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + for (c = counter; c != top; c = c->parent) { + u64 r; + spin_lock(&c->lock); +@@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct re + ret = r; + spin_unlock(&c->lock); + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + diff --git a/debian/patches/features/all/rt/revert-80d5c3689b886308247da295a228a54df49a44f6.patch b/debian/patches/features/all/rt/revert-80d5c3689b886308247da295a228a54df49a44f6.patch new file mode 100644 index 000000000..f87450108 --- /dev/null +++ b/debian/patches/features/all/rt/revert-80d5c3689b886308247da295a228a54df49a44f6.patch @@ -0,0 +1,31 @@ +From: Sebastian Andrzej Siewior +Subject: [PATCH] net/cpsw: revert 80d5c3689b886308247da295a228a54df49a44f6 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/ti/cpsw.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/net/ethernet/ti/cpsw.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpsw.c ++++ linux-stable/drivers/net/ethernet/ti/cpsw.c +@@ -1286,7 +1286,7 @@ clean_vid: + } + + static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, +- __be16 proto, u16 vid) ++ unsigned short vid) + { + struct cpsw_priv *priv = netdev_priv(ndev); + +@@ -1298,7 +1298,7 @@ static int cpsw_ndo_vlan_rx_add_vid(stru + } + + static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, +- __be16 proto, u16 vid) ++ unsigned short vid) + { + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; diff --git a/debian/patches/features/all/rt/revert-f646968f8f7c624587de729115d802372b9063dd.patch b/debian/patches/features/all/rt/revert-f646968f8f7c624587de729115d802372b9063dd.patch new file mode 100644 index 000000000..30ca6eea9 --- /dev/null +++ b/debian/patches/features/all/rt/revert-f646968f8f7c624587de729115d802372b9063dd.patch @@ -0,0 +1,31 @@ +From: Sebastian Andrzej Siewior +Subject: [PATCH] net/cpsw: revert f646968f8f7c624587de729115d802372b9063dd +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/ti/cpsw.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/net/ethernet/ti/cpsw.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpsw.c ++++ linux-stable/drivers/net/ethernet/ti/cpsw.c +@@ -1633,7 +1633,7 @@ static int cpsw_probe_dual_emac(struct p + priv_sl2->irqs_table[i] = priv->irqs_table[i]; + priv_sl2->num_irqs = priv->num_irqs; + } +- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ++ ndev->features |= NETIF_F_HW_VLAN_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); +@@ -1872,7 +1872,7 @@ static int cpsw_probe(struct platform_de + k++; + } + +- ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ++ ndev->features |= NETIF_F_HW_VLAN_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops); diff --git a/debian/patches/features/all/rt/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch b/debian/patches/features/all/rt/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch new file mode 100644 index 000000000..c005e61da --- /dev/null +++ b/debian/patches/features/all/rt/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch @@ -0,0 +1,52 @@ +From: Sebastian Andrzej Siewior +Subject: [PATCH] net/cpsw: revert f9a8f83b04e0c362a2fc660dbad980d24af209fc +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/net/ethernet/ti/cpmac.c | 4 ++-- + drivers/net/ethernet/ti/cpsw.c | 2 +- + drivers/net/ethernet/ti/davinci_emac.c | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +Index: linux-stable/drivers/net/ethernet/ti/cpmac.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpmac.c ++++ linux-stable/drivers/net/ethernet/ti/cpmac.c +@@ -1172,8 +1172,8 @@ static int cpmac_probe(struct platform_d + snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, phy_id); + +- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, +- PHY_INTERFACE_MODE_MII); ++ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0, ++ PHY_INTERFACE_MODE_MII); + + if (IS_ERR(priv->phy)) { + if (netif_msg_drv(priv)) +Index: linux-stable/drivers/net/ethernet/ti/cpsw.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/cpsw.c ++++ linux-stable/drivers/net/ethernet/ti/cpsw.c +@@ -817,7 +817,7 @@ static void cpsw_slave_open(struct cpsw_ + 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); + + slave->phy = phy_connect(priv->ndev, slave->data->phy_id, +- &cpsw_adjust_link, slave->data->phy_if); ++ &cpsw_adjust_link, 0, slave->data->phy_if); + if (IS_ERR(slave->phy)) { + dev_err(priv->dev, "phy %s not found on slave %d\n", + slave->data->phy_id, slave->slave_num); +Index: linux-stable/drivers/net/ethernet/ti/davinci_emac.c +=================================================================== +--- linux-stable.orig/drivers/net/ethernet/ti/davinci_emac.c ++++ linux-stable/drivers/net/ethernet/ti/davinci_emac.c +@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_devi + + if (priv->phy_id && *priv->phy_id) { + priv->phydev = phy_connect(ndev, priv->phy_id, +- &emac_adjust_link, ++ &emac_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(priv->phydev)) { diff --git a/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch b/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch new file mode 100644 index 000000000..94db7d9d9 --- /dev/null +++ b/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch @@ -0,0 +1,49 @@ +Subject: sched/rt: Fix wait_task_interactive() to test rt_spin_lock state +From: Steven Rostedt +Date: Thu, 01 Mar 2012 13:55:33 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The wait_task_interactive() will have a task sleep waiting for another +task to have a certain state. But it ignores the rt_spin_locks state +and can return with an incorrect result if the task it is waiting +for is blocked on a rt_spin_lock() and is waking up. + +The rt_spin_locks save the tasks state in the saved_state field +and the wait_task_interactive() must also test that state. + +Signed-off-by: Steven Rostedt +Cc: Carsten Emde +Cc: John Kacur +Cc: Peter Zijlstra +Cc: Clark Williams +Cc: stable-rt@vger.kernel.org +Link: http://lkml.kernel.org/r/20120301190345.979435764@goodmis.org +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1084,7 +1084,8 @@ unsigned long wait_task_inactive(struct + * is actually now running somewhere else! + */ + while (task_running(rq, p)) { +- if (match_state && unlikely(p->state != match_state)) ++ if (match_state && unlikely(p->state != match_state) ++ && unlikely(p->saved_state != match_state)) + return 0; + cpu_relax(); + } +@@ -1099,7 +1100,8 @@ unsigned long wait_task_inactive(struct + running = task_running(rq, p); + on_rq = p->on_rq; + ncsw = 0; +- if (!match_state || p->state == match_state) ++ if (!match_state || p->state == match_state ++ || p->saved_state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_rq_unlock(rq, p, &flags); + diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch new file mode 100644 index 000000000..20cf5f10e --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch @@ -0,0 +1,911 @@ +From: Thomas Gleixner +Date: Sun, 26 Jul 2009 19:39:56 +0200 +Subject: rt: Add the preempt-rt lock replacement APIs +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex +based locking functions for preempt-rt. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/rwlock_rt.h | 123 ++++++++++ + include/linux/spinlock.h | 12 - + include/linux/spinlock_api_smp.h | 4 + include/linux/spinlock_rt.h | 158 +++++++++++++ + kernel/Makefile | 9 + kernel/rt.c | 442 +++++++++++++++++++++++++++++++++++++++ + kernel/spinlock.c | 7 + lib/spinlock_debug.c | 5 + 8 files changed, 756 insertions(+), 4 deletions(-) + +Index: linux-stable/include/linux/rwlock_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/rwlock_rt.h +@@ -0,0 +1,123 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ migrate_disable(); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) ++ ++#define read_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#endif +Index: linux-stable/include/linux/spinlock.h +=================================================================== +--- linux-stable.orig/include/linux/spinlock.h ++++ linux-stable/include/linux/spinlock.h +@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(ra + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(ra + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -394,4 +402,6 @@ extern int _atomic_dec_and_lock(atomic_t + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_SPINLOCK_H */ +Index: linux-stable/include/linux/spinlock_api_smp.h +=================================================================== +--- linux-stable.orig/include/linux/spinlock_api_smp.h ++++ linux-stable/include/linux/spinlock_api_smp.h +@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh( + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +Index: linux-stable/include/linux/spinlock_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/spinlock_rt.h +@@ -0,0 +1,158 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#include ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++ ++#define spin_lock_local(lock) rt_spin_lock(lock) ++#define spin_unlock_local(lock) rt_spin_unlock(lock) ++ ++#define spin_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) ++ ++#endif +Index: linux-stable/kernel/Makefile +=================================================================== +--- linux-stable.orig/kernel/Makefile ++++ linux-stable/kernel/Makefile +@@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o + sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ + signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ + rcupdate.o extable.o params.o posix-timers.o \ +- kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \ +- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ ++ kthread.o wait.o sys_ni.o posix-cpu-timers.o \ ++ hrtimer.o nsproxy.o srcu.o semaphore.o \ + notifier.o ksysfs.o cred.o \ + async.o range.o groups.o lglock.o smpboot.o + +@@ -31,7 +31,11 @@ obj-$(CONFIG_FREEZER) += freezer.o + obj-$(CONFIG_PROFILING) += profile.o + obj-$(CONFIG_STACKTRACE) += stacktrace.o + obj-y += time/ ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++obj-y += rwsem.o ++endif + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -43,6 +47,7 @@ endif + obj-$(CONFIG_RT_MUTEXES) += rtmutex.o + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o ++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o + obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o + obj-$(CONFIG_SMP) += smp.o + ifneq ($(CONFIG_SMP),y) +Index: linux-stable/kernel/rt.c +=================================================================== +--- /dev/null ++++ linux-stable/kernel/rt.c +@@ -0,0 +1,442 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock, 0); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/* ++ * rwlock_t functions ++ */ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret = rt_mutex_trylock(&rwlock->lock); ++ ++ migrate_disable(); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_write_trylock(rwlock); ++ if (!ret) ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock_irqsave); ++ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the lock, ++ * but not when read_depth == 0 which means that the lock is ++ * write locked. ++ */ ++ migrate_disable(); ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(lock); ++ else if (!rwlock->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwlock->read_depth++; ++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); ++ } else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ ++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); ++ ++ /* ++ * recursive read locks succeed when current owns the lock ++ */ ++ if (rt_mutex_owner(lock) != current) ++ __rt_spin_lock(lock); ++ rwlock->read_depth++; ++} ++ ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ ++ /* Release the lock only when read_depth is down to 0 */ ++ if (--rwlock->read_depth == 0) ++ __rt_spin_unlock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_write_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_write_lock_irqsave); ++ ++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_read_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_read_lock_irqsave); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); ++ lockdep_init_map(&rwlock->dep_map, name, key, 0); ++#endif ++ rwlock->lock.save_state = 1; ++ rwlock->read_depth = 0; ++} ++EXPORT_SYMBOL(__rt_rwlock_init); ++ ++/* ++ * rw_semaphores ++ */ ++ ++void rt_up_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_write); ++ ++void rt_up_read(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ if (--rwsem->read_depth == 0) ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_read); ++ ++/* ++ * downgrade a write lock into a read lock ++ * - just wake up any readers at the front of the queue ++ */ ++void rt_downgrade_write(struct rw_semaphore *rwsem) ++{ ++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); ++ rwsem->read_depth = 1; ++} ++EXPORT_SYMBOL(rt_downgrade_write); ++ ++int rt_down_write_trylock(struct rw_semaphore *rwsem) ++{ ++ int ret = rt_mutex_trylock(&rwsem->lock); ++ ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_write_trylock); ++ ++void rt_down_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write); ++ ++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write_nested); ++ ++int rt_down_read_trylock(struct rw_semaphore *rwsem) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the rwsem, ++ * but not when read_depth == 0 which means that the rwsem is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(&rwsem->lock); ++ else if (!rwsem->read_depth) ++ ret = 0; ++ ++ if (ret) { ++ rwsem->read_depth++; ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_read_trylock); ++ ++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ ++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ ++ if (rt_mutex_owner(lock) != current) ++ rt_mutex_lock(&rwsem->lock); ++ rwsem->read_depth++; ++} ++ ++void rt_down_read(struct rw_semaphore *rwsem) ++{ ++ __rt_down_read(rwsem, 0); ++} ++EXPORT_SYMBOL(rt_down_read); ++ ++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ __rt_down_read(rwsem, subclass); ++} ++EXPORT_SYMBOL(rt_down_read_nested); ++ ++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); ++ lockdep_init_map(&rwsem->dep_map, name, key, 0); ++#endif ++ rwsem->read_depth = 0; ++ rwsem->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__rt_rwsem_init); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +Index: linux-stable/kernel/spinlock.c +=================================================================== +--- linux-stable.orig/kernel/spinlock.c ++++ linux-stable/kernel/spinlock.c +@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(loc + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_ + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwl + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +Index: linux-stable/lib/spinlock_debug.c +=================================================================== +--- linux-stable.orig/lib/spinlock_debug.c ++++ linux-stable/lib/spinlock_debug.c +@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t * + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch new file mode 100644 index 000000000..e109d1b5b --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch @@ -0,0 +1,127 @@ +Subject: rt-add-rt-spinlocks.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 19:43:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types_rt.h | 33 ++++++++++++++++++++++++ + include/linux/spinlock_types.h | 11 +++++--- + include/linux/spinlock_types_rt.h | 51 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 92 insertions(+), 3 deletions(-) + +Index: linux-stable/include/linux/rwlock_types_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/rwlock_types_rt.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) ++ ++#endif +Index: linux-stable/include/linux/spinlock_types.h +=================================================================== +--- linux-stable.orig/include/linux/spinlock_types.h ++++ linux-stable/include/linux/spinlock_types.h +@@ -11,8 +11,13 @@ + + #include + +-#include +- +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include ++#else ++# include ++# include ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_TYPES_H */ +Index: linux-stable/include/linux/spinlock_types_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/spinlock_types_rt.h +@@ -0,0 +1,51 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#include ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define __DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch new file mode 100644 index 000000000..2afeda16f --- /dev/null +++ b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch @@ -0,0 +1,146 @@ +Subject: rt-add-rt-to-mutex-headers.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 20:56:22 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/mutex.h | 21 +++++++---- + include/linux/mutex_rt.h | 84 +++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 98 insertions(+), 7 deletions(-) + +Index: linux-stable/include/linux/mutex.h +=================================================================== +--- linux-stable.orig/include/linux/mutex.h ++++ linux-stable/include/linux/mutex.h +@@ -17,6 +17,17 @@ + + #include + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -98,13 +109,6 @@ do { \ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -170,6 +174,9 @@ extern int __must_check mutex_lock_killa + */ + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); ++ ++#endif /* !PREEMPT_RT_FULL */ ++ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX +Index: linux-stable/include/linux/mutex_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/mutex_rt.h +@@ -0,0 +1,84 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include ++ ++/* FIXME: Just for __lockfunc */ ++#include ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch new file mode 100644 index 000000000..cd399a5c4 --- /dev/null +++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch @@ -0,0 +1,31 @@ +Subject: rt: Introduce cpu_chill() +From: Thomas Gleixner +Date: Wed, 07 Mar 2012 20:51:03 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Retry loops on RT might loop forever when the modifying side was +preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill() +defaults to cpu_relax() for non RT. On RT it puts the looping task to +sleep for a tick so the preempted task can make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + include/linux/delay.h | 6 ++++++ + 1 file changed, 6 insertions(+) + +Index: linux-stable/include/linux/delay.h +=================================================================== +--- linux-stable.orig/include/linux/delay.h ++++ linux-stable/include/linux/delay.h +@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define cpu_chill() msleep(1) ++#else ++# define cpu_chill() cpu_relax() ++#endif ++ + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch new file mode 100644 index 000000000..e19973229 --- /dev/null +++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch @@ -0,0 +1,269 @@ +Subject: rt-local-irq-lock.patch +From: Thomas Gleixner +Date: Mon, 20 Jun 2011 09:03:47 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/locallock.h | 254 ++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 254 insertions(+) + +Index: linux-stable/include/linux/locallock.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/locallock.h +@@ -0,0 +1,254 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __get_cpu_var(lvar).flags; \ ++ } while (0) ++ ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ &__get_cpu_var(var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar) ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif diff --git a/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch new file mode 100644 index 000000000..28c125194 --- /dev/null +++ b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch @@ -0,0 +1,628 @@ +Subject: rt-mutex-add-sleeping-spinlocks-support.patch +From: Thomas Gleixner +Date: Fri, 10 Jun 2011 11:21:25 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 35 +++- + kernel/futex.c | 3 + kernel/rtmutex.c | 384 +++++++++++++++++++++++++++++++++++++++++++++--- + kernel/rtmutex_common.h | 9 + + 4 files changed, 404 insertions(+), 27 deletions(-) + +Index: linux-stable/include/linux/rtmutex.h +=================================================================== +--- linux-stable.orig/include/linux/rtmutex.h ++++ linux-stable/include/linux/rtmutex.h +@@ -29,9 +29,10 @@ struct rt_mutex { + raw_spinlock_t wait_lock; + struct plist_head wait_list; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; +- const char *name, *file; ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++ const char *file; ++ const char *name; + int line; + void *magic; + #endif +@@ -56,19 +57,39 @@ struct hrtimer_sleeper; + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) ++ ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) ++ ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +Index: linux-stable/kernel/futex.c +=================================================================== +--- linux-stable.orig/kernel/futex.c ++++ linux-stable/kernel/futex.c +@@ -2325,8 +2325,7 @@ static int futex_wait_requeue_pi(u32 __u + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -8,6 +8,12 @@ + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen + * ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt ++ * + * See Documentation/rt-mutex-design.txt for details. + */ + #include +@@ -97,6 +103,12 @@ static inline void mark_rt_mutex_waiters + } + #endif + ++static inline void init_lists(struct rt_mutex *lock) ++{ ++ if (unlikely(!lock->wait_list.node_list.prev)) ++ plist_head_init(&lock->wait_list); ++} ++ + /* + * Calculate task priority from the waiter list priority + * +@@ -143,6 +155,14 @@ static void rt_mutex_adjust_prio(struct + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -254,13 +274,15 @@ static int rt_mutex_adjust_prio_chain(st + /* Release the task */ + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue above changed the top waiter, then we need + * to wake the new top waiter up to try to get the lock. + */ +- +- if (top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); + raw_spin_unlock(&lock->wait_lock); + goto out_put_task; + } +@@ -305,6 +327,25 @@ static int rt_mutex_adjust_prio_chain(st + return ret; + } + ++ ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++ ++/* ++ * Note that RT tasks are excluded from lateral-steals to prevent the ++ * introduction of an unbounded latency ++ */ ++static inline int lock_is_stealable(struct task_struct *task, ++ struct task_struct *pendowner, int mode) ++{ ++ if (mode == STEAL_NORMAL || rt_task(task)) { ++ if (task->prio >= pendowner->prio) ++ return 0; ++ } else if (task->prio > pendowner->prio) ++ return 0; ++ return 1; ++} ++ + /* + * Try to take an rt-mutex + * +@@ -314,8 +355,9 @@ static int rt_mutex_adjust_prio_chain(st + * @task: the task which wants to acquire the lock + * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int ++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { + /* + * We have to be careful here if the atomic speedups are +@@ -348,12 +390,14 @@ static int try_to_take_rt_mutex(struct r + * 3) it is top waiter + */ + if (rt_mutex_has_waiters(lock)) { +- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { +- if (!waiter || waiter != rt_mutex_top_waiter(lock)) +- return 0; +- } ++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; ++ ++ if (task != pown && !lock_is_stealable(task, pown, mode)) ++ return 0; + } + ++ /* We got the lock. */ ++ + if (waiter || rt_mutex_has_waiters(lock)) { + unsigned long flags; + struct rt_mutex_waiter *top; +@@ -378,7 +422,6 @@ static int try_to_take_rt_mutex(struct r + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + } + +- /* We got the lock. */ + debug_rt_mutex_lock(lock); + + rt_mutex_set_owner(lock, task); +@@ -388,6 +431,13 @@ static int try_to_take_rt_mutex(struct r + return 1; + } + ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ + /* + * Task blocks on lock. + * +@@ -502,7 +552,7 @@ static void wakeup_next_waiter(struct rt + + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); + +- wake_up_process(waiter->task); ++ rt_mutex_wake_waiter(waiter); + } + + /* +@@ -581,18 +631,315 @@ void rt_mutex_adjust_pi(struct task_stru + return; + } + +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); +- + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * preemptible spin_lock functions: ++ */ ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ might_sleep(); ++ ++ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) ++ rt_mutex_deadlock_account_lock(lock, current); ++ else ++ slowfn(lock); ++} ++ ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) ++ rt_mutex_deadlock_account_unlock(current); ++ else ++ slowfn(lock); ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; ++ ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif ++ ++# define pi_lock(lock) raw_spin_lock_irq(lock) ++# define pi_unlock(lock) raw_spin_unlock_irq(lock) ++ ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter waiter, *top_waiter; ++ int ret; ++ ++ rt_mutex_init_waiter(&waiter, true); ++ ++ raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); ++ ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ BUG_ON(rt_mutex_owner(lock) == self); ++ ++ /* ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). ++ */ ++ pi_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ ++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0); ++ BUG_ON(ret); ++ ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) ++ break; ++ ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_print_deadlock(&waiter); ++ ++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) ++ schedule_rt_mutex(lock); ++ ++ raw_spin_lock(&lock->wait_lock); ++ ++ pi_lock(&self->pi_lock); ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ pi_unlock(&self->pi_lock); ++ } ++ ++ /* ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ pi_lock(&self->pi_lock); ++ __set_current_state(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ pi_unlock(&self->pi_lock); ++ ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!plist_node_empty(&waiter.list_entry)); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++/* ++ * Slow path to release a rt_mutex spin_lock style ++ */ ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ raw_spin_lock(&lock->wait_lock); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ rt_mutex_deadlock_account_unlock(current); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ raw_spin_unlock(&lock->wait_lock); ++ return; ++ } ++ ++ wakeup_next_waiter(lock); ++ ++ raw_spin_unlock(&lock->wait_lock); ++ ++ /* Undo pi boosting.when necessary */ ++ rt_mutex_adjust_prio(current); ++} ++ ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock); ++} ++EXPORT_SYMBOL(__rt_spin_lock); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock); ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif ++ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) ++{ ++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ ++ if (atomic_add_unless(atomic, -1, 1)) ++ return 0; ++ migrate_disable(); ++ rt_spin_lock(lock); ++ if (atomic_dec_and_test(atomic)) ++ return 1; ++ rt_spin_unlock(lock); ++ migrate_enable(); ++ return 0; ++} ++EXPORT_SYMBOL(atomic_dec_and_spin_lock); ++ ++void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ + /** + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take + * @state: the state the task should block in (TASK_INTERRUPTIBLE +- * or TASK_UNINTERRUPTIBLE) ++ * or TASK_UNINTERRUPTIBLE) + * @timeout: the pre-initialized and started timer, or NULL for none + * @waiter: the pre-initialized rt_mutex_waiter + * +@@ -648,9 +995,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, + struct rt_mutex_waiter waiter; + int ret = 0; + +- debug_rt_mutex_init_waiter(&waiter); ++ rt_mutex_init_waiter(&waiter, false); + + raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { +@@ -703,6 +1051,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo + int ret = 0; + + raw_spin_lock(&lock->wait_lock); ++ init_lists(lock); + + if (likely(rt_mutex_owner(lock) != current)) { + +@@ -935,12 +1284,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); + void __rt_mutex_init(struct rt_mutex *lock, const char *name) + { + lock->owner = NULL; +- raw_spin_lock_init(&lock->wait_lock); + plist_head_init(&lock->wait_list); + + debug_rt_mutex_init(lock, name); + } +-EXPORT_SYMBOL_GPL(__rt_mutex_init); ++EXPORT_SYMBOL(__rt_mutex_init); + + /** + * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a +@@ -955,7 +1303,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); + void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { +- __rt_mutex_init(lock, NULL); ++ rt_mutex_init(lock); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); + rt_mutex_deadlock_account_lock(lock, proxy_owner); +Index: linux-stable/kernel/rtmutex_common.h +=================================================================== +--- linux-stable.orig/kernel/rtmutex_common.h ++++ linux-stable/kernel/rtmutex_common.h +@@ -49,6 +49,7 @@ struct rt_mutex_waiter { + struct plist_node pi_list_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; +@@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(st + # include "rtmutex.h" + #endif + ++static inline void ++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ waiter->task = NULL; ++ waiter->savestate = savestate; ++} ++ + #endif diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch new file mode 100644 index 000000000..b45044125 --- /dev/null +++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch @@ -0,0 +1,52 @@ +Subject: rt-preempt-base-config.patch +From: Thomas Gleixner +Date: Fri, 17 Jun 2011 12:39:57 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/Kconfig.preempt | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/Kconfig.preempt +=================================================================== +--- linux-stable.orig/kernel/Kconfig.preempt ++++ linux-stable/kernel/Kconfig.preempt +@@ -1,3 +1,10 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT + + choice + prompt "Preemption Model" +@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" +- select PREEMPT_COUNT ++ select PREEMPT + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + help + This option reduces the latency of the kernel by making +@@ -52,6 +59,14 @@ config PREEMPT + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptiple ++ RT kernel. ++ + endchoice + + config PREEMPT_COUNT diff --git a/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch new file mode 100644 index 000000000..89eb9d23d --- /dev/null +++ b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch @@ -0,0 +1,136 @@ +Subject: rt: rwsem/rwlock: lockdep annotations +From: Thomas Gleixner +Date: Fri, 28 Sep 2012 10:49:42 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +rwlocks and rwsems on RT do not allow multiple readers. Annotate the +lockdep acquire functions accordingly. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/rt.c | 53 ++++++++++++++++++++++++++++++++--------------------- + 1 file changed, 32 insertions(+), 21 deletions(-) + +Index: linux-stable/kernel/rt.c +=================================================================== +--- linux-stable.orig/kernel/rt.c ++++ linux-stable/kernel/rt.c +@@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t + * write locked. + */ + migrate_disable(); +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { + ret = rt_mutex_trylock(lock); +- else if (!rwlock->read_depth) ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ } else if (!rwlock->read_depth) { + ret = 0; ++ } + +- if (ret) { ++ if (ret) + rwlock->read_depth++; +- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); +- } else ++ else + migrate_enable(); + + return ret; +@@ -242,13 +244,13 @@ void __lockfunc rt_read_lock(rwlock_t *r + { + struct rt_mutex *lock = &rwlock->lock; + +- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); +- + /* + * recursive read locks succeed when current owns the lock + */ +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(lock); ++ } + rwlock->read_depth++; + } + +@@ -264,11 +266,11 @@ EXPORT_SYMBOL(rt_write_unlock); + + void __lockfunc rt_read_unlock(rwlock_t *rwlock) + { +- rwlock_release(&rwlock->dep_map, 1, _RET_IP_); +- + /* Release the lock only when read_depth is down to 0 */ +- if (--rwlock->read_depth == 0) ++ if (--rwlock->read_depth == 0) { ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); ++ } + } + EXPORT_SYMBOL(rt_read_unlock); + +@@ -315,9 +317,10 @@ EXPORT_SYMBOL(rt_up_write); + + void rt_up_read(struct rw_semaphore *rwsem) + { +- rwsem_release(&rwsem->dep_map, 1, _RET_IP_); +- if (--rwsem->read_depth == 0) ++ if (--rwsem->read_depth == 0) { ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); ++ } + } + EXPORT_SYMBOL(rt_up_read); + +@@ -356,6 +359,13 @@ void rt_down_write_nested(struct rw_sem + } + EXPORT_SYMBOL(rt_down_write_nested); + ++void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest) ++{ ++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++ + int rt_down_read_trylock(struct rw_semaphore *rwsem) + { + struct rt_mutex *lock = &rwsem->lock; +@@ -366,15 +376,16 @@ int rt_down_read_trylock(struct rw_sema + * but not when read_depth == 0 which means that the rwsem is + * write locked. + */ +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { + ret = rt_mutex_trylock(&rwsem->lock); +- else if (!rwsem->read_depth) ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ } else if (!rwsem->read_depth) { + ret = 0; ++ } + +- if (ret) { ++ if (ret) + rwsem->read_depth++; +- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); +- } + return ret; + } + EXPORT_SYMBOL(rt_down_read_trylock); +@@ -383,10 +394,10 @@ static void __rt_down_read(struct rw_sem + { + struct rt_mutex *lock = &rwsem->lock; + +- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); +- +- if (rt_mutex_owner(lock) != current) ++ if (rt_mutex_owner(lock) != current) { ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); ++ } + rwsem->read_depth++; + } + diff --git a/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch new file mode 100644 index 000000000..f09edd9e9 --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch @@ -0,0 +1,41 @@ +Subject: sched: Do not compare cpu masks in scheduler +Date: Tue, 27 Sep 2011 08:40:24 -0400 +From: Peter Zijlstra +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Peter Zijlstra +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124423.128129033@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 14 +++++--------- + 1 file changed, 5 insertions(+), 9 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -2943,16 +2943,12 @@ static inline void update_migrate_disabl + */ + mask = tsk_cpus_allowed(p); + +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); + +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- +- /* Let migrate_enable know to fix things back up */ +- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; +- } ++ /* Let migrate_enable know to fix things back up */ ++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; + } + + void migrate_disable(void) diff --git a/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch new file mode 100644 index 000000000..0eab2ee2e --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch @@ -0,0 +1,71 @@ +Subject: sched: Have migrate_disable ignore bounded threads +Date: Tue, 27 Sep 2011 08:40:25 -0400 +From: Peter Zijlstra +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Peter Zijlstra +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 23 +++++++++-------------- + 1 file changed, 9 insertions(+), 14 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -2955,7 +2955,7 @@ void migrate_disable(void) + { + struct task_struct *p = current; + +- if (in_atomic()) { ++ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; + #endif +@@ -2986,7 +2986,7 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + +- if (in_atomic()) { ++ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) { + #ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; + #endif +@@ -3007,26 +3007,21 @@ void migrate_enable(void) + + if (unlikely(migrate_disabled_updated(p))) { + /* +- * See comment in update_migrate_disable() about locking. ++ * Undo whatever update_migrate_disable() did, also see there ++ * about locking. + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); +- mask = tsk_cpus_allowed(p); ++ + /* + * Clearing migrate_disable causes tsk_cpus_allowed to + * show the tasks original cpu affinity. + */ + p->migrate_disable = 0; +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { +- /* Get the mask now that migration is enabled */ +- mask = tsk_cpus_allowed(p); +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } ++ mask = tsk_cpus_allowed(p); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } else + p->migrate_disable = 0; diff --git a/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch new file mode 100644 index 000000000..696a9ad5d --- /dev/null +++ b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch @@ -0,0 +1,308 @@ +Subject: sched: Postpone actual migration disalbe to schedule +From: Steven Rostedt +Date: Tue, 27 Sep 2011 08:40:23 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The migrate_disable() can cause a bit of a overhead to the RT kernel, +as changing the affinity is expensive to do at every lock encountered. +As a running task can not migrate, the actual disabling of migration +does not need to occur until the task is about to schedule out. + +In most cases, a task that disables migration will enable it before +it schedules making this change improve performance tremendously. + +[ Frank Rowand: UP compile fix ] + +Signed-off-by: Steven Rostedt +Cc: Peter Zijlstra +Cc: Clark Williams +Link: http://lkml.kernel.org/r/20110927124422.779693167@goodmis.org +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 251 +++++++++++++++++++++++++++------------------------- + 1 file changed, 132 insertions(+), 119 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -2915,6 +2915,135 @@ static inline void schedule_debug(struct + schedstat_inc(this_rq(), sched_count); + } + ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */ ++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN) ++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN) ++ ++static inline void update_migrate_disable(struct task_struct *p) ++{ ++ const struct cpumask *mask; ++ ++ if (likely(!p->migrate_disable)) ++ return; ++ ++ /* Did we already update affinity? */ ++ if (unlikely(migrate_disabled_updated(p))) ++ return; ++ ++ /* ++ * Since this is always current we can get away with only locking ++ * rq->lock, the ->cpus_allowed value can normally only be changed ++ * while holding both p->pi_lock and rq->lock, but seeing that this ++ * is current, we cannot actually be waking up, so all code that ++ * relies on serialization against p->pi_lock is out of scope. ++ * ++ * Having rq->lock serializes us against things like ++ * set_cpus_allowed_ptr() that can still happen concurrently. ++ */ ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ ++ /* Let migrate_enable know to fix things back up */ ++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN; ++ } ++} ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ ++ preempt_disable(); ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ preempt_enable(); ++ return; ++ } ++ ++ pin_current_cpu(); ++ p->migrate_disable = 1; ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ preempt_disable(); ++ if (migrate_disable_count(p) > 1) { ++ p->migrate_disable--; ++ preempt_enable(); ++ return; ++ } ++ ++ if (unlikely(migrate_disabled_updated(p))) { ++ /* ++ * See comment in update_migrate_disable() about locking. ++ */ ++ rq = this_rq(); ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ mask = tsk_cpus_allowed(p); ++ /* ++ * Clearing migrate_disable causes tsk_cpus_allowed to ++ * show the tasks original cpu affinity. ++ */ ++ p->migrate_disable = 0; ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) { ++ /* Get the mask now that migration is enabled */ ++ mask = tsk_cpus_allowed(p); ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } else ++ p->migrate_disable = 0; ++ ++ unpin_current_cpu(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++#else ++static inline void update_migrate_disable(struct task_struct *p) { } ++#define migrate_disabled_updated(p) 0 ++#endif ++ + static void put_prev_task(struct rq *rq, struct task_struct *prev) + { + if (prev->on_rq || rq->skip_clock_update < 0) +@@ -3008,6 +3137,8 @@ need_resched: + + raw_spin_lock_irq(&rq->lock); + ++ update_migrate_disable(prev); ++ + switch_count = &prev->nivcsw; + if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { + if (unlikely(signal_pending_state(prev->state, prev))) { +@@ -4828,7 +4959,7 @@ void __cpuinit init_idle(struct task_str + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (!__migrate_disabled(p)) { ++ if (!migrate_disabled_updated(p)) { + if (p->sched_class && p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); +@@ -4898,124 +5029,6 @@ out: + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + +-#ifdef CONFIG_PREEMPT_RT_FULL +-void migrate_disable(void) +-{ +- struct task_struct *p = current; +- const struct cpumask *mask; +- unsigned long flags; +- struct rq *rq; +- +- if (in_atomic()) { +-#ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic++; +-#endif +- return; +- } +- +-#ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); +-#endif +- +- preempt_disable(); +- if (p->migrate_disable) { +- p->migrate_disable++; +- preempt_enable(); +- return; +- } +- +- pin_current_cpu(); +- if (unlikely(!scheduler_running)) { +- p->migrate_disable = 1; +- preempt_enable(); +- return; +- } +- +- /* +- * Since this is always current we can get away with only locking +- * rq->lock, the ->cpus_allowed value can normally only be changed +- * while holding both p->pi_lock and rq->lock, but seeing that this +- * it current, we cannot actually be waking up, so all code that +- * relies on serialization against p->pi_lock is out of scope. +- * +- * Taking rq->lock serializes us against things like +- * set_cpus_allowed_ptr() that can still happen concurrently. +- */ +- rq = this_rq(); +- raw_spin_lock_irqsave(&rq->lock, flags); +- p->migrate_disable = 1; +- mask = tsk_cpus_allowed(p); +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } +- raw_spin_unlock_irqrestore(&rq->lock, flags); +- preempt_enable(); +-} +-EXPORT_SYMBOL(migrate_disable); +- +-void migrate_enable(void) +-{ +- struct task_struct *p = current; +- const struct cpumask *mask; +- unsigned long flags; +- struct rq *rq; +- +- if (in_atomic()) { +-#ifdef CONFIG_SCHED_DEBUG +- p->migrate_disable_atomic--; +-#endif +- return; +- } +- +-#ifdef CONFIG_SCHED_DEBUG +- WARN_ON_ONCE(p->migrate_disable_atomic); +-#endif +- WARN_ON_ONCE(p->migrate_disable <= 0); +- +- preempt_disable(); +- if (p->migrate_disable > 1) { +- p->migrate_disable--; +- preempt_enable(); +- return; +- } +- +- if (unlikely(!scheduler_running)) { +- p->migrate_disable = 0; +- unpin_current_cpu(); +- preempt_enable(); +- return; +- } +- +- /* +- * See comment in migrate_disable(). +- */ +- rq = this_rq(); +- raw_spin_lock_irqsave(&rq->lock, flags); +- mask = tsk_cpus_allowed(p); +- p->migrate_disable = 0; +- +- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); +- +- if (!cpumask_equal(&p->cpus_allowed, mask)) { +- /* Get the mask now that migration is enabled */ +- mask = tsk_cpus_allowed(p); +- if (p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, mask); +- p->nr_cpus_allowed = cpumask_weight(mask); +- } +- +- raw_spin_unlock_irqrestore(&rq->lock, flags); +- unpin_current_cpu(); +- preempt_enable(); +-} +-EXPORT_SYMBOL(migrate_enable); +-#endif /* CONFIG_PREEMPT_RT_FULL */ +- + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing + * this because either it can't run here any more (set_cpus_allowed() diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch new file mode 100644 index 000000000..9b239974e --- /dev/null +++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch @@ -0,0 +1,40 @@ +Subject: rt: Improve the serial console PASS_LIMIT +From: Ingo Molnar +Date: Wed Dec 14 13:05:54 CET 2011 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Beyond the warning: + + drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable] + +the solution of just looping infinitely was ugly - up it to 1 million to +give it a chance to continue in some really ugly situation. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/8250/8250_core.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +Index: linux-stable/drivers/tty/serial/8250/8250_core.c +=================================================================== +--- linux-stable.orig/drivers/tty/serial/8250/8250_core.c ++++ linux-stable/drivers/tty/serial/8250/8250_core.c +@@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* f + #define DEBUG_INTR(fmt...) do { } while (0) + #endif + +-#define PASS_LIMIT 512 ++/* ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + diff --git a/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch new file mode 100644 index 000000000..cd9ff628d --- /dev/null +++ b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch @@ -0,0 +1,48 @@ +Subject: tracing: Show padding as unsigned short +From: Steven Rostedt +Date: Wed, 16 Nov 2011 13:19:35 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +RT added two bytes to trace migrate disable counting to the trace events +and used two bytes of the padding to make the change. The structures and +all were updated correctly, but the display in the event formats was +not: + +cat /debug/tracing/events/sched/sched_switch/format + +name: sched_switch +ID: 51 +format: + field:unsigned short common_type; offset:0; size:2; signed:0; + field:unsigned char common_flags; offset:2; size:1; signed:0; + field:unsigned char common_preempt_count; offset:3; size:1; signed:0; + field:int common_pid; offset:4; size:4; signed:1; + field:unsigned short common_migrate_disable; offset:8; size:2; signed:0; + field:int common_padding; offset:10; size:2; signed:0; + + +The field for common_padding has the correct size and offset, but the +use of "int" might confuse some parsers (and people that are reading +it). This needs to be changed to "unsigned short". + +Signed-off-by: Steven Rostedt +Link: http://lkml.kernel.org/r/1321467575.4181.36.camel@frodo +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner + +--- + kernel/trace/trace_events.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/kernel/trace/trace_events.c +=================================================================== +--- linux-stable.orig/kernel/trace/trace_events.c ++++ linux-stable/kernel/trace/trace_events.c +@@ -155,6 +155,7 @@ static int trace_define_common_fields(vo + __common_field(unsigned char, preempt_count); + __common_field(int, pid); + __common_field(unsigned short, migrate_disable); ++ __common_field(unsigned short, padding); + + return ret; + } diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch new file mode 100644 index 000000000..08deb5bb3 --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch @@ -0,0 +1,23 @@ +Subject: rtmutex-avoid-include-hell.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 20:06:39 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/include/linux/rtmutex.h +=================================================================== +--- linux-stable.orig/include/linux/rtmutex.h ++++ linux-stable/include/linux/rtmutex.h +@@ -14,7 +14,7 @@ + + #include + #include +-#include ++#include + + extern int max_lock_depth; /* for sysctl */ + diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch new file mode 100644 index 000000000..658e1e2ec --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch @@ -0,0 +1,222 @@ +Subject: rtmutex-futex-prepare-rt.patch +From: Thomas Gleixner +Date: Fri, 10 Jun 2011 11:04:15 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/futex.c | 77 ++++++++++++++++++++++++++++++++++++++---------- + kernel/rtmutex.c | 31 ++++++++++++++++--- + kernel/rtmutex_common.h | 2 + + 3 files changed, 91 insertions(+), 19 deletions(-) + +Index: linux-stable/kernel/futex.c +=================================================================== +--- linux-stable.orig/kernel/futex.c ++++ linux-stable/kernel/futex.c +@@ -1445,6 +1445,16 @@ retry_private: + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ free_pi_state(pi_state); ++ continue; + } else if (ret) { + /* -EDEADLK */ + this->pi_state = NULL; +@@ -2288,7 +2298,7 @@ static int futex_wait_requeue_pi(u32 __u + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; + struct rt_mutex *pi_mutex = NULL; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2335,20 +2345,55 @@ static int futex_wait_requeue_pi(u32 __u + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -2357,9 +2402,10 @@ static int futex_wait_requeue_pi(u32 __u + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + } + } else { + /* +@@ -2372,7 +2418,8 @@ static int futex_wait_requeue_pi(u32 __u + ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); + debug_rt_mutex_free_waiter(&rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -68,6 +68,11 @@ static void fixup_rt_mutex_waiters(struc + clear_rt_mutex_waiters(lock); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if the architecture + * supports cmpxchg and if there's no debugging state to be set up +@@ -197,7 +202,7 @@ static int rt_mutex_adjust_prio_chain(st + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -400,6 +405,23 @@ static int task_blocks_on_rt_mutex(struc + int chain_walk = 0, res; + + raw_spin_lock_irqsave(&task->pi_lock, flags); ++ ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + __rt_mutex_adjust_prio(task); + waiter->task = task; + waiter->lock = lock; +@@ -424,7 +446,7 @@ static int task_blocks_on_rt_mutex(struc + plist_add(&waiter->pi_list_entry, &owner->pi_waiters); + + __rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); + } +@@ -518,7 +540,7 @@ static void remove_waiter(struct rt_mute + } + __rt_mutex_adjust_prio(owner); + +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); +@@ -552,7 +574,8 @@ void rt_mutex_adjust_pi(struct task_stru + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || waiter->list_entry.prio == task->prio) { ++ if (!rt_mutex_real_waiter(waiter) || ++ waiter->list_entry.prio == task->prio) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } +Index: linux-stable/kernel/rtmutex_common.h +=================================================================== +--- linux-stable.orig/kernel/rtmutex_common.h ++++ linux-stable/kernel/rtmutex_common.h +@@ -103,6 +103,8 @@ static inline struct task_struct *rt_mut + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch new file mode 100644 index 000000000..20e10dd85 --- /dev/null +++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch @@ -0,0 +1,85 @@ +Subject: rtmutex-lock-killable.patch +From: Thomas Gleixner +Date: Thu, 09 Jun 2011 11:43:52 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rtmutex.h | 1 + + kernel/rtmutex.c | 33 +++++++++++++++++++++++++++------ + 2 files changed, 28 insertions(+), 6 deletions(-) + +Index: linux-stable/include/linux/rtmutex.h +=================================================================== +--- linux-stable.orig/include/linux/rtmutex.h ++++ linux-stable/include/linux/rtmutex.h +@@ -90,6 +90,7 @@ extern void rt_mutex_destroy(struct rt_m + extern void rt_mutex_lock(struct rt_mutex *lock); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, + int detect_deadlock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout, + int detect_deadlock); +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -816,12 +816,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); + /** + * rt_mutex_lock_interruptible - lock a rt_mutex interruptible + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ + int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, +@@ -835,17 +835,38 @@ int __sched rt_mutex_lock_interruptible( + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + + /** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal ++ * -EDEADLK when the lock would deadlock (when deadlock detection is on) ++ */ ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock, ++ int detect_deadlock) ++{ ++ might_sleep(); ++ ++ return rt_mutex_fastlock(lock, TASK_KILLABLE, ++ detect_deadlock, rt_mutex_slowlock); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); ++ ++/** + * rt_mutex_timed_lock - lock a rt_mutex interruptible + * the timeout structure is provided + * by the caller + * +- * @lock: the rt_mutex to be locked ++ * @lock: the rt_mutex to be locked + * @timeout: timeout structure or NULL (no timeout) + * @detect_deadlock: deadlock detection on/off + * + * Returns: +- * 0 on success +- * -EINTR when interrupted by a signal ++ * 0 on success ++ * -EINTR when interrupted by a signal + * -ETIMEDOUT when the timeout expired + * -EDEADLK when the lock would deadlock (when deadlock detection is on) + */ diff --git a/debian/patches/features/all/rt/rwsem-add-rt-variant.patch b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch new file mode 100644 index 000000000..37a7c8788 --- /dev/null +++ b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch @@ -0,0 +1,183 @@ +Subject: rwsem-add-rt-variant.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 21:02:53 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwsem.h | 6 ++ + include/linux/rwsem_rt.h | 128 +++++++++++++++++++++++++++++++++++++++++++++++ + lib/Makefile | 3 + + 3 files changed, 137 insertions(+) + +Index: linux-stable/include/linux/rwsem.h +=================================================================== +--- linux-stable.orig/include/linux/rwsem.h ++++ linux-stable/include/linux/rwsem.h +@@ -16,6 +16,10 @@ + + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -149,4 +153,6 @@ extern void up_read_non_owner(struct rw_ + # define up_read_non_owner(sem) up_read(sem) + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* _LINUX_RWSEM_H */ +Index: linux-stable/include/linux/rwsem_rt.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/rwsem_rt.h +@@ -0,0 +1,128 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++/* ++ * RW-semaphores are a spinlock plus a reader-depth count. ++ * ++ * Note that the semantics are different from the usual ++ * Linux rw-sems, in PREEMPT_RT mode we do not allow ++ * multiple readers to hold the lock at once, we only allow ++ * a read-lock owner to read-lock recursively. This is ++ * better for latency, makes the implementation inherently ++ * fair and makes it simpler as well. ++ */ ++ ++#include ++ ++struct rw_semaphore { ++ struct rt_mutex lock; ++ int read_depth; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); ++ ++#define __rt_init_rwsem(sem, name, key) \ ++ do { \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), (name), (key));\ ++ } while (0) ++ ++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) ++ ++# define rt_init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rt_init_rwsem((sem), #sem, &__key); \ ++} while (0) ++ ++extern void rt_down_write(struct rw_semaphore *rwsem); ++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest); ++extern void rt_down_read(struct rw_semaphore *rwsem); ++extern int rt_down_write_trylock(struct rw_semaphore *rwsem); ++extern int rt_down_read_trylock(struct rw_semaphore *rwsem); ++extern void rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_write(struct rw_semaphore *rwsem); ++extern void rt_downgrade_write(struct rw_semaphore *rwsem); ++ ++#define init_rwsem(sem) rt_init_rwsem(sem) ++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) ++ ++static inline void down_read(struct rw_semaphore *sem) ++{ ++ rt_down_read(sem); ++} ++ ++static inline int down_read_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_read_trylock(sem); ++} ++ ++static inline void down_write(struct rw_semaphore *sem) ++{ ++ rt_down_write(sem); ++} ++ ++static inline int down_write_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_write_trylock(sem); ++} ++ ++static inline void up_read(struct rw_semaphore *sem) ++{ ++ rt_up_read(sem); ++} ++ ++static inline void up_write(struct rw_semaphore *sem) ++{ ++ rt_up_write(sem); ++} ++ ++static inline void downgrade_write(struct rw_semaphore *sem) ++{ ++ rt_downgrade_write(sem); ++} ++ ++static inline void down_read_nested(struct rw_semaphore *sem, int subclass) ++{ ++ return rt_down_read_nested(sem, subclass); ++} ++ ++static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ++{ ++ rt_down_write_nested(sem, subclass); ++} ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, &nest_lock->dep_map); ++} ++ ++#else ++ ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, NULL); ++} ++#endif ++#endif +Index: linux-stable/lib/Makefile +=================================================================== +--- linux-stable.orig/lib/Makefile ++++ linux-stable/lib/Makefile +@@ -41,8 +41,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o + obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o + obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o ++endif + lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o + + CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) diff --git a/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch b/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch new file mode 100644 index 000000000..df8eb6aac --- /dev/null +++ b/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch @@ -0,0 +1,34 @@ +Subject: sched: Adjust sched_reset_on_fork when nothing else changes +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 14:58:00 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If the policy and priority remain unchanged a possible modification of +sched_reset_on_fork gets lost in the early exit path. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4192,10 +4192,13 @@ recheck: + } + + /* +- * If not changing anything there's no need to proceed further: ++ * If not changing anything there's no need to proceed ++ * further, but store a possible modification of ++ * reset_on_fork. + */ + if (unlikely(policy == p->policy && (!rt_policy(policy) || + param->sched_priority == p->rt_priority))) { ++ p->sched_reset_on_fork = reset_on_fork; + task_rq_unlock(rq, p, &flags); + return 0; + } diff --git a/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch b/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch new file mode 100644 index 000000000..d13f08cca --- /dev/null +++ b/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch @@ -0,0 +1,76 @@ +Subject: sched: Better debug output for might sleep +From: Thomas Gleixner +Date: Fri, 05 Oct 2012 08:56:15 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +might sleep can tell us where interrupts have been disabled, but we +have no idea what disabled preemption. Add some debug infrastructure. + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 3 +++ + kernel/sched/core.c | 23 +++++++++++++++++++++-- + 2 files changed, 24 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1329,6 +1329,9 @@ struct task_struct { + struct mutex perf_event_mutex; + struct list_head perf_event_list; + #endif ++#ifdef CONFIG_DEBUG_PREEMPT ++ unsigned long preempt_disable_ip; ++#endif + #ifdef CONFIG_NUMA + struct mempolicy *mempolicy; /* Protected by alloc_lock */ + short il_next; +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -2808,8 +2808,13 @@ void __kprobes add_preempt_count(int val + DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= + PREEMPT_MASK - 10); + #endif +- if (preempt_count() == val) +- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ if (preempt_count() == val) { ++ unsigned long ip = get_parent_ip(CALLER_ADDR1); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } + } + EXPORT_SYMBOL(add_preempt_count); + +@@ -2852,6 +2857,13 @@ static noinline void __schedule_bug(stru + print_modules(); + if (irqs_disabled()) + print_irqtrace_events(prev); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif + dump_stack(); + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); + } +@@ -7118,6 +7130,13 @@ void __might_sleep(const char *file, int + debug_show_held_locks(current); + if (irqs_disabled()) + print_irqtrace_events(current); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (!preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif + dump_stack(); + } + EXPORT_SYMBOL(__might_sleep); diff --git a/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch new file mode 100644 index 000000000..a334841f1 --- /dev/null +++ b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch @@ -0,0 +1,27 @@ +Subject: sched-clear-pf-thread-bound-on-fallback-rq.patch +From: Thomas Gleixner +Date: Fri, 04 Nov 2011 20:48:36 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1245,6 +1245,12 @@ out: + } + } + ++ /* ++ * Clear PF_NO_SETAFFINITY, otherwise we wreckage ++ * migrate_disable/enable. See optimization for ++ * PF_NO_SETAFFINITY tasks there. ++ */ ++ p->flags &= ~PF_NO_SETAFFINITY; + return dest_cpu; + } + diff --git a/debian/patches/features/all/rt/sched-cond-resched.patch b/debian/patches/features/all/rt/sched-cond-resched.patch new file mode 100644 index 000000000..65f8512ac --- /dev/null +++ b/debian/patches/features/all/rt/sched-cond-resched.patch @@ -0,0 +1,35 @@ +Subject: sched-cond-resched.patch +From: Thomas Gleixner +Date: Tue, 07 Jun 2011 11:25:03 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4392,9 +4392,17 @@ static inline int should_resched(void) + + static void __cond_resched(void) + { +- add_preempt_count(PREEMPT_ACTIVE); +- __schedule(); +- sub_preempt_count(PREEMPT_ACTIVE); ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ __schedule(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ /* ++ * Check again in case we missed a preemption ++ * opportunity between schedule and now. ++ */ ++ barrier(); ++ ++ } while (need_resched()); + } + + int __sched _cond_resched(void) diff --git a/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch b/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch new file mode 100644 index 000000000..daa925a9d --- /dev/null +++ b/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch @@ -0,0 +1,168 @@ +Subject: sched: Consider pi boosting in setscheduler +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 15:13:49 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If a PI boosted task policy/priority is modified by a setscheduler() +call we unconditionally dequeue and requeue the task if it is on the +runqueue even if the new priority is lower than the current effective +boosted priority. This can result in undesired reordering of the +priority bucket list. + +If the new priority is less or equal than the current effective we +just store the new parameters in the task struct and leave the +scheduler class and the runqueue untouched. This is handled when the +task deboosts itself. Only if the new priority is higher than the +effective boosted priority we apply the change immediately. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + include/linux/sched/rt.h | 5 +++++ + kernel/rtmutex.c | 12 ++++++++++++ + kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++--------- + 3 files changed, 48 insertions(+), 9 deletions(-) + +Index: linux-stable/include/linux/sched/rt.h +=================================================================== +--- linux-stable.orig/include/linux/sched/rt.h ++++ linux-stable/include/linux/sched/rt.h +@@ -35,6 +35,7 @@ static inline int rt_task(struct task_st + #ifdef CONFIG_RT_MUTEXES + extern int rt_mutex_getprio(struct task_struct *p); + extern void rt_mutex_setprio(struct task_struct *p, int prio); ++extern int rt_mutex_check_prio(struct task_struct *task, int newprio); + extern void rt_mutex_adjust_pi(struct task_struct *p); + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) + { +@@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struc + { + return p->normal_prio; + } ++static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ return 0; ++} + # define rt_mutex_adjust_pi(p) do { } while (0) + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) + { +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -125,6 +125,18 @@ int rt_mutex_getprio(struct task_struct + } + + /* ++ * Called by sched_setscheduler() to check whether the priority change ++ * is overruled by a possible priority boosting. ++ */ ++int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ if (!task_has_pi_waiters(task)) ++ return 0; ++ ++ return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio; ++} ++ ++/* + * Adjust the priority of a task, after its pi_waiters got modified. + * + * This can be both boosting and unboosting. task->pi_lock must be held. +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -3839,7 +3839,8 @@ EXPORT_SYMBOL(sleep_on_timeout); + * This function changes the 'effective' priority of a task. It does + * not touch ->normal_prio like __setscheduler(). + * +- * Used by the rt_mutex code to implement priority inheritance logic. ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. + */ + void rt_mutex_setprio(struct task_struct *p, int prio) + { +@@ -4062,20 +4063,25 @@ static struct task_struct *find_process_ + return pid ? find_task_by_vpid(pid) : current; + } + +-/* Actually do priority change: must hold rq lock. */ +-static void +-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ++static void __setscheduler_params(struct task_struct *p, int policy, int prio) + { + p->policy = policy; + p->rt_priority = prio; + p->normal_prio = normal_prio(p); ++ set_load_weight(p); ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void ++__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ++{ ++ __setscheduler_params(p, policy, prio); + /* we are holding p->pi_lock already */ + p->prio = rt_mutex_getprio(p); + if (rt_prio(p->prio)) + p->sched_class = &rt_sched_class; + else + p->sched_class = &fair_sched_class; +- set_load_weight(p); + } + + /* +@@ -4097,6 +4103,7 @@ static bool check_same_owner(struct task + static int __sched_setscheduler(struct task_struct *p, int policy, + const struct sched_param *param, bool user) + { ++ int newprio = MAX_RT_PRIO - 1 - param->sched_priority; + int retval, oldprio, oldpolicy = -1, on_rq, running; + unsigned long flags; + const struct sched_class *prev_class; +@@ -4224,6 +4231,25 @@ recheck: + task_rq_unlock(rq, p, &flags); + goto recheck; + } ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ oldprio = p->prio; ++ ++ /* ++ * Special case for priority boosted tasks. ++ * ++ * If the new priority is lower or equal (user space view) ++ * than the current (boosted) priority, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ if (rt_mutex_check_prio(p, newprio)) { ++ __setscheduler_params(p, policy, param->sched_priority); ++ task_rq_unlock(rq, p, &flags); ++ return 0; ++ } ++ + on_rq = p->on_rq; + running = task_current(rq, p); + if (on_rq) +@@ -4231,9 +4257,6 @@ recheck: + if (running) + p->sched_class->put_prev_task(rq, p); + +- p->sched_reset_on_fork = reset_on_fork; +- +- oldprio = p->prio; + prev_class = p->sched_class; + __setscheduler(rq, p, policy, param->sched_priority); + +@@ -4246,7 +4269,6 @@ recheck: + */ + enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); + } +- + check_class_changed(rq, p, prev_class, oldprio); + task_rq_unlock(rq, p, &flags); + diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch new file mode 100644 index 000000000..16c2fd675 --- /dev/null +++ b/debian/patches/features/all/rt/sched-delay-put-task.patch @@ -0,0 +1,83 @@ +Subject: sched-delay-put-task.patch +From: Thomas Gleixner +Date: Tue, 31 May 2011 16:59:16 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 13 +++++++++++++ + kernel/fork.c | 15 ++++++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1435,6 +1435,9 @@ struct task_struct { + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++#endif + }; + + /* Future-safe accessor for struct task_struct's cpus_allowed. */ +@@ -1594,6 +1597,15 @@ extern struct pid *cad_pid; + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -1601,6 +1613,7 @@ static inline void put_task_struct(struc + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + extern void task_cputime(struct task_struct *t, +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -230,7 +230,9 @@ static inline void put_signal_struct(str + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); +@@ -245,7 +247,18 @@ void __put_task_struct(struct task_struc + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + void __init __weak arch_task_cache_init(void) { } + diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch new file mode 100644 index 000000000..58e66ef9b --- /dev/null +++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch @@ -0,0 +1,31 @@ +Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 17:03:52 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Carsten reported problems when running: + + taskset 01 chrt -f 1 sleep 1 + +from within rc.local on a F15 machine. The task stays running and +never gets on the run queue because some of the run queues have +rt_throttled=1 which does not go away. Works nice from a ssh login +shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well. + +Signed-off-by: Thomas Gleixner +--- + init/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/init/Kconfig +=================================================================== +--- linux-stable.orig/init/Kconfig ++++ linux-stable/init/Kconfig +@@ -985,6 +985,7 @@ config CFS_BANDWIDTH + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch new file mode 100644 index 000000000..14a511937 --- /dev/null +++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch @@ -0,0 +1,30 @@ +Subject: sched-disable-ttwu-queue.patch +From: Thomas Gleixner +Date: Tue, 13 Sep 2011 16:42:35 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/features.h | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: linux-stable/kernel/sched/features.h +=================================================================== +--- linux-stable.orig/kernel/sched/features.h ++++ linux-stable/kernel/sched/features.h +@@ -50,11 +50,15 @@ SCHED_FEAT(LB_BIAS, true) + */ + SCHED_FEAT(NONTASK_POWER, true) + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, true) ++#else ++SCHED_FEAT(TTWU_QUEUE, false) ++#endif + + SCHED_FEAT(FORCE_SD_OVERLAP, false) + SCHED_FEAT(RT_RUNTIME_SHARE, true) diff --git a/debian/patches/features/all/rt/sched-enqueue-to-head.patch b/debian/patches/features/all/rt/sched-enqueue-to-head.patch new file mode 100644 index 000000000..6f845a6c9 --- /dev/null +++ b/debian/patches/features/all/rt/sched-enqueue-to-head.patch @@ -0,0 +1,71 @@ +Subject: sched: Queue RT tasks to head when prio drops +From: Thomas Gleixner +Date: Tue, 04 Dec 2012 08:56:41 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The following scenario does not work correctly: + +Runqueue of CPUx contains two runnable and pinned tasks: + T1: SCHED_FIFO, prio 80 + T2: SCHED_FIFO, prio 80 + +T1 is on the cpu and executes the following syscalls (classic priority +ceiling scenario): + + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90); + ... + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80); + ... + +Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back +to sleep the scheduler picks T2. Surprise! + +The same happens w/o actual preemption when T1 is forced into the +scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes +pick_next_task() which returns T2. So T1 gets preempted and scheduled +out. + +This happens because sched_setscheduler() dequeues T1 from the prio 90 +list and then enqueues it on the tail of the prio 80 list behind T2. +This violates the POSIX spec and surprises user space which relies on +the guarantee that SCHED_FIFO tasks are not scheduled out unless they +give the CPU up voluntarily or are preempted by a higher priority +task. In the latter case the preempted task must get back on the CPU +after the preempting task schedules out again. + +We fixed a similar issue already in commit 60db48c (sched: Queue a +deboosted task to the head of the RT prio queue). The same treatment +is necessary for sched_setscheduler(). So enqueue to head of the prio +bucket list if the priority of the task is lowered. + +It might be possible that existing user space relies on the current +behaviour, but it can be considered highly unlikely due to the corner +case nature of the application scenario. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4239,8 +4239,13 @@ recheck: + + if (running) + p->sched_class->set_curr_task(rq); +- if (on_rq) +- enqueue_task(rq, p, 0); ++ if (on_rq) { ++ /* ++ * We enqueue to tail when the priority of a task is ++ * increased (user space view). ++ */ ++ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); ++ } + + check_class_changed(rq, p, prev_class, oldprio); + task_rq_unlock(rq, p, &flags); diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch new file mode 100644 index 000000000..d7f512680 --- /dev/null +++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch @@ -0,0 +1,26 @@ +Subject: sched-limit-nr-migrate.patch +From: Thomas Gleixner +Date: Mon, 06 Jun 2011 12:12:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 4 ++++ + 1 file changed, 4 insertions(+) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -272,7 +272,11 @@ late_initcall(sched_init_debug); + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#else ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#endif + + /* + * period over which we average the RT time consumption, measured diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch new file mode 100644 index 000000000..d2368e9fc --- /dev/null +++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch @@ -0,0 +1,50 @@ +Subject: sched-might-sleep-do-not-account-rcu-depth.patch +From: Thomas Gleixner +Date: Tue, 07 Jun 2011 09:19:06 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rcupdate.h | 7 +++++++ + kernel/sched/core.c | 3 ++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/rcupdate.h +=================================================================== +--- linux-stable.orig/include/linux/rcupdate.h ++++ linux-stable/include/linux/rcupdate.h +@@ -190,6 +190,11 @@ void synchronize_rcu(void); + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -213,6 +218,8 @@ static inline int rcu_preempt_depth(void + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -7146,7 +7146,8 @@ void __init sched_init(void) + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); ++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + ++ sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } diff --git a/debian/patches/features/all/rt/sched-migrate-disable.patch b/debian/patches/features/all/rt/sched-migrate-disable.patch new file mode 100644 index 000000000..6799e1ff8 --- /dev/null +++ b/debian/patches/features/all/rt/sched-migrate-disable.patch @@ -0,0 +1,201 @@ +Subject: sched-migrate-disable.patch +From: Thomas Gleixner +Date: Thu, 16 Jun 2011 13:26:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/preempt.h | 8 ++++ + include/linux/sched.h | 13 +++++-- + kernel/sched/core.c | 88 +++++++++++++++++++++++++++++++++++++++++++++--- + lib/smp_processor_id.c | 6 +-- + 4 files changed, 104 insertions(+), 11 deletions(-) + +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -130,6 +130,14 @@ do { \ + + #endif /* CONFIG_PREEMPT_COUNT */ + ++#ifdef CONFIG_SMP ++extern void migrate_disable(void); ++extern void migrate_enable(void); ++#else ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++#endif ++ + #ifdef CONFIG_PREEMPT_RT_FULL + # define preempt_disable_rt() preempt_disable() + # define preempt_enable_rt() preempt_enable() +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1074,6 +1074,7 @@ struct task_struct { + #endif + + unsigned int policy; ++ int migrate_disable; + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -1441,9 +1442,6 @@ struct task_struct { + #endif + }; + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +- + #ifdef CONFIG_NUMA_BALANCING + extern void task_numa_fault(int node, int pages, bool migrated); + extern void set_numabalancing_state(bool enabled); +@@ -2616,6 +2614,15 @@ static inline void set_task_cpu(struct t + + #endif /* CONFIG_SMP */ + ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) ++{ ++ if (p->migrate_disable) ++ return cpumask_of(task_cpu(p)); ++ ++ return &p->cpus_allowed; ++} ++ + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4828,11 +4828,12 @@ void __cpuinit init_idle(struct task_str + #ifdef CONFIG_SMP + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + { +- if (p->sched_class && p->sched_class->set_cpus_allowed) +- p->sched_class->set_cpus_allowed(p, new_mask); +- ++ if (!p->migrate_disable) { ++ if (p->sched_class && p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++ } + cpumask_copy(&p->cpus_allowed, new_mask); +- p->nr_cpus_allowed = cpumask_weight(new_mask); + } + + /* +@@ -4878,7 +4879,7 @@ int set_cpus_allowed_ptr(struct task_str + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -4897,6 +4898,83 @@ out: + } + EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); + ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ preempt_disable(); ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ preempt_enable(); ++ return; ++ } ++ ++ pin_current_cpu(); ++ if (unlikely(!scheduler_running)) { ++ p->migrate_disable = 1; ++ preempt_enable(); ++ return; ++ } ++ rq = task_rq_lock(p, &flags); ++ p->migrate_disable = 1; ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ task_rq_unlock(rq, p, &flags); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ const struct cpumask *mask; ++ unsigned long flags; ++ struct rq *rq; ++ ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ preempt_disable(); ++ if (p->migrate_disable > 1) { ++ p->migrate_disable--; ++ preempt_enable(); ++ return; ++ } ++ ++ if (unlikely(!scheduler_running)) { ++ p->migrate_disable = 0; ++ unpin_current_cpu(); ++ preempt_enable(); ++ return; ++ } ++ ++ rq = task_rq_lock(p, &flags); ++ p->migrate_disable = 0; ++ mask = tsk_cpus_allowed(p); ++ ++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); ++ ++ if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ if (p->sched_class->set_cpus_allowed) ++ p->sched_class->set_cpus_allowed(p, mask); ++ p->nr_cpus_allowed = cpumask_weight(mask); ++ } ++ ++ task_rq_unlock(rq, p, &flags); ++ unpin_current_cpu(); ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++ + /* + * Move (not current) task off this cpu, onto dest cpu. We're doing + * this because either it can't run here any more (set_cpus_allowed() +Index: linux-stable/lib/smp_processor_id.c +=================================================================== +--- linux-stable.orig/lib/smp_processor_id.c ++++ linux-stable/lib/smp_processor_id.c +@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor + if (!printk_ratelimit()) + goto out_enable; + +- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " +- "code: %s/%d\n", +- preempt_count() - 1, current->comm, current->pid); ++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " ++ "code: %s/%d\n", preempt_count() - 1, ++ current->migrate_disable, current->comm, current->pid); + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); + diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch new file mode 100644 index 000000000..aaf348589 --- /dev/null +++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch @@ -0,0 +1,143 @@ +Subject: sched-mmdrop-delayed.patch +From: Thomas Gleixner +Date: Mon, 06 Jun 2011 12:20:33 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with +RT + +Signed-off-by: Thomas Gleixner +--- + include/linux/mm_types.h | 4 ++++ + include/linux/sched.h | 12 ++++++++++++ + kernel/fork.c | 13 +++++++++++++ + kernel/sched/core.c | 19 +++++++++++++++++-- + 4 files changed, 46 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/mm_types.h +=================================================================== +--- linux-stable.orig/include/linux/mm_types.h ++++ linux-stable/include/linux/mm_types.h +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -441,6 +442,9 @@ struct mm_struct { + int first_nid; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + }; + + /* first nid will either be a valid NID or one of these values */ +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -2129,12 +2129,24 @@ extern struct mm_struct * mm_alloc(void) + + /* mmdrop drops the mm and the page tables */ + extern void __mmdrop(struct mm_struct *); ++ + static inline void mmdrop(struct mm_struct * mm) + { + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + /* mmput gets rid of the mappings and all user-space */ + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -617,6 +617,19 @@ void __mmdrop(struct mm_struct *mm) + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + /* + * Decrement the use count and release all resources for an mm. + */ +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1901,8 +1901,12 @@ static void finish_task_switch(struct rq + finish_arch_post_lock_switch(); + + fire_sched_in_preempt_notifiers(current); ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) +- mmdrop(mm); ++ mmdrop_delayed(mm); + if (unlikely(prev_state == TASK_DEAD)) { + /* + * Remove function-return probe instances associated with this +@@ -4920,6 +4924,8 @@ static int migration_cpu_stop(void *data + + #ifdef CONFIG_HOTPLUG_CPU + ++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); ++ + /* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. +@@ -4932,7 +4938,12 @@ void idle_task_exit(void) + + if (mm != &init_mm) + switch_mm(mm, &init_mm, current); +- mmdrop(mm); ++ ++ /* ++ * Defer the cleanup to an alive cpu. On RT we can neither ++ * call mmdrop() nor mmdrop_delayed() from here. ++ */ ++ per_cpu(idle_last_mm, smp_processor_id()) = mm; + } + + /* +@@ -5249,6 +5260,10 @@ migration_call(struct notifier_block *nf + + case CPU_DEAD: + calc_load_migrate(rq); ++ if (per_cpu(idle_last_mm, cpu)) { ++ mmdrop(per_cpu(idle_last_mm, cpu)); ++ per_cpu(idle_last_mm, cpu) = NULL; ++ } + break; + #endif + } diff --git a/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch new file mode 100644 index 000000000..925ebb7de --- /dev/null +++ b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch @@ -0,0 +1,68 @@ +Subject: sched, rt: Fix migrate_enable() thinko +From: Mike Galbraith +Date: Tue, 23 Aug 2011 16:12:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Assigning mask = tsk_cpus_allowed(p) after p->migrate_disable = 0 ensures +that we won't see a mask change.. no push/pull, we stack tasks on one CPU. + +Also add a couple fields to sched_debug for the next guy. + +[ Build fix from Stratos Psomadakis ] + +Signed-off-by: Mike Galbraith +Cc: Paul E. McKenney +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314108763.6689.4.camel@marge.simson.net +Signed-off-by: Thomas Gleixner + +--- + kernel/sched/core.c | 4 +++- + kernel/sched/debug.c | 7 +++++++ + 2 files changed, 10 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4975,12 +4975,14 @@ void migrate_enable(void) + */ + rq = this_rq(); + raw_spin_lock_irqsave(&rq->lock, flags); +- p->migrate_disable = 0; + mask = tsk_cpus_allowed(p); ++ p->migrate_disable = 0; + + WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask)); + + if (!cpumask_equal(&p->cpus_allowed, mask)) { ++ /* Get the mask now that migration is enabled */ ++ mask = tsk_cpus_allowed(p); + if (p->sched_class->set_cpus_allowed) + p->sched_class->set_cpus_allowed(p, mask); + p->nr_cpus_allowed = cpumask_weight(mask); +Index: linux-stable/kernel/sched/debug.c +=================================================================== +--- linux-stable.orig/kernel/sched/debug.c ++++ linux-stable/kernel/sched/debug.c +@@ -246,6 +246,9 @@ void print_rt_rq(struct seq_file *m, int + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++#ifdef CONFIG_SMP ++ P(rt_nr_migratory); ++#endif + + #undef PN + #undef P +@@ -568,6 +571,10 @@ void proc_sched_show_task(struct task_st + P(se.load.weight); + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); ++#endif ++ P(nr_cpus_allowed); + #undef PN + #undef __PN + #undef P diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch new file mode 100644 index 000000000..9ff0c78f4 --- /dev/null +++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch @@ -0,0 +1,96 @@ +Subject: sched-rt-mutex-wakeup.patch +From: Thomas Gleixner +Date: Sat, 25 Jun 2011 09:21:04 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 2 ++ + kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++- + kernel/sched/sched.h | 3 ++- + 3 files changed, 34 insertions(+), 2 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1034,6 +1034,7 @@ enum perf_event_task_context { + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -2015,6 +2016,7 @@ extern void xtime_update(unsigned long t + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1494,8 +1494,25 @@ try_to_wake_up(struct task_struct *p, un + + smp_wmb(); + raw_spin_lock_irqsave(&p->pi_lock, flags); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) ++ p->saved_state = TASK_RUNNING; ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + success = 1; /* we're going to change ->state */ + cpu = task_cpu(p); +@@ -1591,6 +1608,18 @@ int wake_up_process(struct task_struct * + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +Index: linux-stable/kernel/sched/sched.h +=================================================================== +--- linux-stable.orig/kernel/sched/sched.h ++++ linux-stable/kernel/sched/sched.h +@@ -882,7 +882,8 @@ static inline void finish_lock_switch(st + */ + #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ + #define WF_FORK 0x02 /* child wakeup after fork */ +-#define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + static inline void update_load_add(struct load_weight *lw, unsigned long inc) + { diff --git a/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch new file mode 100644 index 000000000..0da6f916d --- /dev/null +++ b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch @@ -0,0 +1,93 @@ +Subject: sched: Teach migrate_disable about atomic contexts +From: Peter Zijlstra +Date: Fri, 02 Sep 2011 14:41:37 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Subject: sched: teach migrate_disable about atomic contexts +From: Peter Zijlstra +Date: Fri Sep 02 14:29:27 CEST 2011 + + [] spin_bug+0x94/0xa8 + [] do_raw_spin_lock+0x43/0xea + [] _raw_spin_lock_irqsave+0x6b/0x85 + [] ? migrate_disable+0x75/0x12d + [] ? pin_current_cpu+0x36/0xb0 + [] migrate_disable+0x75/0x12d + [] pagefault_disable+0xe/0x1f + [] copy_from_user_nmi+0x74/0xe6 + [] perf_callchain_user+0xf3/0x135 + +Now clearly we can't go around taking locks from NMI context, cure +this by short-circuiting migrate_disable() when we're in an atomic +context already. + +Add some extra debugging to avoid things like: + + preempt_disable() + migrate_disable(); + + preempt_enable(); + migrate_enable(); + +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins +Signed-off-by: Thomas Gleixner +Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org +--- + include/linux/sched.h | 3 +++ + kernel/sched/core.c | 21 +++++++++++++++++++++ + 2 files changed, 24 insertions(+) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1076,6 +1076,9 @@ struct task_struct { + unsigned int policy; + #ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif + #endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -4906,6 +4906,17 @@ void migrate_disable(void) + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif ++ + preempt_disable(); + if (p->migrate_disable) { + p->migrate_disable++; +@@ -4954,6 +4965,16 @@ void migrate_enable(void) + unsigned long flags; + struct rq *rq; + ++ if (in_atomic()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ WARN_ON_ONCE(p->migrate_disable_atomic); ++#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + preempt_disable(); diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch new file mode 100644 index 000000000..a25ee53b3 --- /dev/null +++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch @@ -0,0 +1,37 @@ +Subject: sched: ttwu: Return success when only changing the saved_state value +From: Thomas Gleixner +Date: Tue, 13 Dec 2011 21:42:19 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When a task blocks on a rt lock, it saves the current state in +p->saved_state, so a lock related wake up will not destroy the +original state. + +When a real wakeup happens, while the task is running due to a lock +wakeup already, we update p->saved_state to TASK_RUNNING, but we do +not return success, which might cause another wakeup in the waitqueue +code and the task remains in the waitqueue list. Return success in +that case as well. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1501,8 +1501,10 @@ try_to_wake_up(struct task_struct *p, un + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { +- if (p->saved_state & state) ++ if (p->saved_state & state) { + p->saved_state = TASK_RUNNING; ++ success = 1; ++ } + } + goto out; + } diff --git a/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch new file mode 100644 index 000000000..f080670e5 --- /dev/null +++ b/debian/patches/features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -0,0 +1,42 @@ +From b24ee416f22bd2a2325b8f6afa5a4065dd3560e9 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Mon, 18 Mar 2013 15:12:49 -0400 +Subject: [PATCH] sched/workqueue: Only wake up idle workers if not blocked on + sleeping spin lock +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +In -rt, most spin_locks() turn into mutexes. One of these spin_lock +conversions is performed on the workqueue gcwq->lock. When the idle +worker is worken, the first thing it will do is grab that same lock and +it too will block, possibly jumping into the same code, but because +nr_running would already be decremented it prevents an infinite loop. + +But this is still a waste of CPU cycles, and it doesn't follow the method +of mainline, as new workers should only be woken when a worker thread is +truly going to sleep, and not just blocked on a spin_lock(). + +Check the saved_state too before waking up new workers. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/sched/core.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -3020,8 +3020,10 @@ need_resched: + * If a worker went to sleep, notify and ask workqueue + * whether it wants to wake up a task to maintain + * concurrency. ++ * Only call wake up if prev isn't blocked on a sleeping ++ * spin lock. + */ +- if (prev->flags & PF_WQ_WORKER) { ++ if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { + struct task_struct *to_wakeup; + + to_wakeup = wq_worker_sleeping(prev, cpu); diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch new file mode 100644 index 000000000..31ae87496 --- /dev/null +++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch @@ -0,0 +1,118 @@ +Subject: scsi-fcoe-rt-aware.patch +From: Thomas Gleixner +Date: Sat, 12 Nov 2011 14:00:48 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + drivers/scsi/fcoe/fcoe.c | 18 +++++++++--------- + drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++-- + drivers/scsi/libfc/fc_exch.c | 4 ++-- + 3 files changed, 13 insertions(+), 13 deletions(-) + +Index: linux-stable/drivers/scsi/fcoe/fcoe.c +=================================================================== +--- linux-stable.orig/drivers/scsi/fcoe/fcoe.c ++++ linux-stable/drivers/scsi/fcoe/fcoe.c +@@ -1281,7 +1281,7 @@ static void fcoe_percpu_thread_destroy(u + struct sk_buff *skb; + #ifdef CONFIG_SMP + struct fcoe_percpu_s *p0; +- unsigned targ_cpu = get_cpu(); ++ unsigned targ_cpu = get_cpu_light(); + #endif /* CONFIG_SMP */ + + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); +@@ -1337,7 +1337,7 @@ static void fcoe_percpu_thread_destroy(u + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + } +- put_cpu(); ++ put_cpu_light(); + #else + /* + * This a non-SMP scenario where the singular Rx thread is +@@ -1555,11 +1555,11 @@ err2: + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1757,11 +1757,11 @@ static inline int fcoe_filter_frames(str + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1837,13 +1837,13 @@ static void fcoe_recv_frame(struct sk_bu + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +Index: linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c +=================================================================== +--- linux-stable.orig/drivers/scsi/fcoe/fcoe_ctlr.c ++++ linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs( + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +Index: linux-stable/drivers/scsi/libfc/fc_exch.c +=================================================================== +--- linux-stable.orig/drivers/scsi/libfc/fc_exch.c ++++ linux-stable/drivers/scsi/libfc/fc_exch.c +@@ -741,10 +741,10 @@ static struct fc_exch *fc_exch_em_alloc( + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch new file mode 100644 index 000000000..bf694ce39 --- /dev/null +++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch @@ -0,0 +1,50 @@ +Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll +From: John Kacur +Date: Fri, 27 Apr 2012 12:48:46 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +RT triggers the following: + +[ 11.307652] [] __might_sleep+0xe7/0x110 +[ 11.307663] [] rt_spin_lock+0x24/0x60 +[ 11.307670] [] ? rt_spin_lock_slowunlock+0x78/0x90 +[ 11.307703] [] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx] +[ 11.307736] [] qla2x00_poll+0x67/0x90 [qla2xxx] + +Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler +which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed +to call them with interrupts disabled. Therefore we use local_irq_save_nort() +instead which saves flags without disabling interrupts. + +This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt + +Suggested-by: Thomas Gleixner +Signed-off-by: John Kacur +Cc: Steven Rostedt +Cc: David Sommerseth +Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + drivers/scsi/qla2xxx/qla_inline.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/scsi/qla2xxx/qla_inline.h +=================================================================== +--- linux-stable.orig/drivers/scsi/qla2xxx/qla_inline.h ++++ linux-stable/drivers/scsi/qla2xxx/qla_inline.h +@@ -58,12 +58,12 @@ qla2x00_poll(struct rsp_que *rsp) + { + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (IS_QLA82XX(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static inline uint8_t * diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch new file mode 100644 index 000000000..3ffd34154 --- /dev/null +++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch @@ -0,0 +1,192 @@ +Subject: seqlock: Prevent rt starvation +From: Thomas Gleixner +Date: Wed, 22 Feb 2012 12:03:30 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If a low prio writer gets preempted while holding the seqlock write +locked, a high prio reader spins forever on RT. + +To prevent this let the reader grab the spinlock, so it blocks and +eventually boosts the writer. This way the writer can proceed and +endless spinning is prevented. + +For seqcount writers we disable preemption over the update code +path. Thaanks to Al Viro for distangling some VFS code to make that +possible. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org + +--- + include/linux/seqlock.h | 55 +++++++++++++++++++++++++++++++++++++++--------- + include/net/dst.h | 2 - + include/net/neighbour.h | 4 +-- + 3 files changed, 48 insertions(+), 13 deletions(-) + +Index: linux-stable/include/linux/seqlock.h +=================================================================== +--- linux-stable.orig/include/linux/seqlock.h ++++ linux-stable/include/linux/seqlock.h +@@ -146,18 +146,30 @@ static inline int read_seqcount_retry(co + * Sequence counter only version assumes that callers are using their + * own mutexing. + */ +-static inline void write_seqcount_begin(seqcount_t *s) ++static inline void __write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void write_seqcount_end(seqcount_t *s) ++static inline void write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __write_seqcount_begin(s); ++} ++ ++static inline void __write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void write_seqcount_end(seqcount_t *s) ++{ ++ __write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /** + * write_seqcount_barrier - invalidate in-progress read-side seq operations + * @s: pointer to seqcount_t +@@ -198,10 +210,33 @@ typedef struct { + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = ACCESS_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_lock(&sl->lock); ++ spin_unlock(&sl->lock); ++ goto repeat; ++ } ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -216,36 +251,36 @@ static inline unsigned read_seqretry(con + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -254,7 +289,7 @@ static inline unsigned long __write_seql + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -264,7 +299,7 @@ static inline unsigned long __write_seql + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +Index: linux-stable/include/net/dst.h +=================================================================== +--- linux-stable.orig/include/net/dst.h ++++ linux-stable/include/net/dst.h +@@ -393,7 +393,7 @@ static inline void dst_confirm(struct ds + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +Index: linux-stable/include/net/neighbour.h +=================================================================== +--- linux-stable.orig/include/net/neighbour.h ++++ linux-stable/include/net/neighbour.h +@@ -335,7 +335,7 @@ static inline int neigh_hh_bridge(struct + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -390,7 +390,7 @@ struct neighbour_cb { + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch new file mode 100644 index 000000000..08db55efe --- /dev/null +++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch @@ -0,0 +1,38 @@ +Subject: signal-fix-up-rcu-wreckage.patch +From: Thomas Gleixner +Date: Fri, 22 Jul 2011 08:07:08 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/signal.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +Index: linux-stable/kernel/signal.c +=================================================================== +--- linux-stable.orig/kernel/signal.c ++++ linux-stable/kernel/signal.c +@@ -1394,12 +1394,12 @@ struct sighand_struct *__lock_task_sigha + struct sighand_struct *sighand; + + for (;;) { +- local_irq_save(*flags); ++ local_irq_save_nort(*flags); + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) { + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + break; + } + +@@ -1410,7 +1410,7 @@ struct sighand_struct *__lock_task_sigha + } + spin_unlock(&sighand->siglock); + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + } + + return sighand; diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch new file mode 100644 index 000000000..d640812f6 --- /dev/null +++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch @@ -0,0 +1,30 @@ +Subject: signal-revert-ptrace-preempt-magic.patch +From: Thomas Gleixner +Date: Wed, 21 Sep 2011 19:57:12 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/signal.c | 8 -------- + 1 file changed, 8 deletions(-) + +Index: linux-stable/kernel/signal.c +=================================================================== +--- linux-stable.orig/kernel/signal.c ++++ linux-stable/kernel/signal.c +@@ -1908,15 +1908,7 @@ static void ptrace_stop(int exit_code, i + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + freezable_schedule(); + } else { + /* diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch new file mode 100644 index 000000000..c467ab99f --- /dev/null +++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -0,0 +1,269 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 08:44:56 -0500 +Subject: signals: Allow rt tasks to cache one sigqueue struct +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +To avoid allocation allow rt tasks to cache one sigqueue struct in +task struct. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/sched.h | 1 + include/linux/signal.h | 1 + kernel/exit.c | 2 - + kernel/fork.c | 1 + kernel/signal.c | 94 ++++++++++++++++++++++++++++++++++++++++++++----- + 5 files changed, 89 insertions(+), 10 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1215,6 +1215,7 @@ struct task_struct { + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ +Index: linux-stable/include/linux/signal.h +=================================================================== +--- linux-stable.orig/include/linux/signal.h ++++ linux-stable/include/linux/signal.h +@@ -226,6 +226,7 @@ static inline void init_sigpending(struc + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +Index: linux-stable/kernel/exit.c +=================================================================== +--- linux-stable.orig/kernel/exit.c ++++ linux-stable/kernel/exit.c +@@ -145,7 +145,7 @@ static void __exit_signal(struct task_st + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -1231,6 +1231,7 @@ static struct task_struct *copy_process( + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = p->stime = p->gtime = 0; + p->utimescaled = p->stimescaled = 0; +Index: linux-stable/kernel/signal.c +=================================================================== +--- linux-stable.orig/kernel/signal.c ++++ linux-stable/kernel/signal.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -349,13 +350,45 @@ static bool task_participate_group_stop( + return false; + } + ++#ifdef __HAVE_ARCH_CMPXCHG ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ return NULL; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ return 1; ++} ++ ++#endif ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -372,7 +405,10 @@ __sigqueue_alloc(int sig, struct task_st + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -389,6 +425,13 @@ __sigqueue_alloc(int sig, struct task_st + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -398,6 +441,21 @@ static void __sigqueue_free(struct sigqu + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ atomic_dec(&up->sigpending); ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -411,6 +469,21 @@ void flush_sigqueue(struct sigpending *q + } + + /* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ ++/* + * Flush all pending signals for a task. + */ + void __flush_signals(struct task_struct *t) +@@ -562,7 +635,7 @@ static void collect_signal(int sig, stru + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -608,6 +681,8 @@ int dequeue_signal(struct task_struct *t + { + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -1547,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid); + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; +@@ -2367,7 +2443,7 @@ relock: + } + + /** +- * signal_delivered - ++ * signal_delivered - + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler +@@ -3126,7 +3202,7 @@ int do_sigaction(int sig, struct k_sigac + return 0; + } + +-static int ++static int + do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) + { + stack_t oss; +@@ -3270,7 +3346,7 @@ int __compat_save_altstack(compat_stack_ + */ + SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) + { +- return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); ++ return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); + } + + #endif +@@ -3395,7 +3471,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + if (!ret && oact) { + sigset_to_compat(&mask, &old_ka.sa.sa_mask); +- ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), ++ ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), + &oact->sa_handler); + ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); + ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); +@@ -3571,7 +3647,7 @@ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t + return -EFAULT; + return sigsuspend(&newset); + } +- ++ + #ifdef CONFIG_COMPAT + COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) + { diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch new file mode 100644 index 000000000..a79292d3e --- /dev/null +++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch @@ -0,0 +1,134 @@ +Subject: skbufhead-raw-lock.patch +From: Thomas Gleixner +Date: Tue, 12 Jul 2011 15:38:34 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/netdevice.h | 1 + + include/linux/skbuff.h | 7 +++++++ + net/core/dev.c | 26 ++++++++++++++++++++------ + 3 files changed, 28 insertions(+), 6 deletions(-) + +Index: linux-stable/include/linux/netdevice.h +=================================================================== +--- linux-stable.orig/include/linux/netdevice.h ++++ linux-stable/include/linux/netdevice.h +@@ -1808,6 +1808,7 @@ struct softnet_data { + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + }; + + static inline void input_queue_head_incr(struct softnet_data *sd) +Index: linux-stable/include/linux/skbuff.h +=================================================================== +--- linux-stable.orig/include/linux/skbuff.h ++++ linux-stable/include/linux/skbuff.h +@@ -133,6 +133,7 @@ struct sk_buff_head { + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1074,6 +1075,12 @@ static inline void skb_queue_head_init(s + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -188,14 +188,14 @@ static inline struct hlist_head *dev_ind + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -3633,7 +3633,7 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -3642,10 +3642,13 @@ static void flush_backlog(void *arg) + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + } + + static int napi_gro_complete(struct sk_buff *skb) +@@ -4151,10 +4154,17 @@ static void net_rx_action(struct softirq + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; + int budget = netdev_budget; ++ struct sk_buff *skb; + void *have; + + local_irq_disable(); + ++ while ((skb = __skb_dequeue(&sd->tofree_queue))) { ++ local_irq_enable(); ++ kfree_skb(skb); ++ local_irq_disable(); ++ } ++ + while (!list_empty(&sd->poll_list)) { + struct napi_struct *n; + int work, weight; +@@ -6015,6 +6025,9 @@ static int dev_cpu_callback(struct notif + netif_rx(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; + } +@@ -6287,8 +6300,9 @@ static int __init net_dev_init(void) + struct softnet_data *sd = &per_cpu(softnet_data, i); + + memset(sd, 0, sizeof(*sd)); +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + sd->completion_queue = NULL; + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue = NULL; diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch new file mode 100644 index 000000000..869acaaa2 --- /dev/null +++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch @@ -0,0 +1,49 @@ +Subject: slub: Enable irqs for __GFP_WAIT +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:08:15 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +SYSTEM_RUNNING might be too late for enabling interrupts. Allocations +with GFP_WAIT can happen before that. So use this as an indicator. + +Signed-off-by: Thomas Gleixner +--- + mm/slub.c | 13 +++++-------- + 1 file changed, 5 insertions(+), 8 deletions(-) + +Index: linux-stable/mm/slub.c +=================================================================== +--- linux-stable.orig/mm/slub.c ++++ linux-stable/mm/slub.c +@@ -1281,14 +1281,15 @@ static struct page *allocate_slab(struct + struct page *page; + struct kmem_cache_order_objects oo = s->oo; + gfp_t alloc_gfp; ++ bool enableirqs; + + flags &= gfp_allowed_mask; + ++ enableirqs = (flags & __GFP_WAIT) != 0; + #ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) ++ enableirqs |= system_state == SYSTEM_RUNNING; + #endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1328,11 +1329,7 @@ static struct page *allocate_slab(struct + kmemcheck_mark_unallocated_pages(page, pages); + } + +-#ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) +-#endif ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; diff --git a/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch new file mode 100644 index 000000000..e18310250 --- /dev/null +++ b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch @@ -0,0 +1,38 @@ +From: Sebastian Andrzej Siewior +Subject: slub: delay ctor until the object is requested +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +It seems that allocation of plenty objects causes latency on ARM since that +code can not be preempted + +Signed-off-by: Sebastian Andrzej Siewior +--- + mm/slub.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +Index: linux-stable/mm/slub.c +=================================================================== +--- linux-stable.orig/mm/slub.c ++++ linux-stable/mm/slub.c +@@ -1347,8 +1347,10 @@ static void setup_object(struct kmem_cac + void *object) + { + setup_object_debug(s, page, object); ++#ifndef CONFIG_PREEMPT_RT_FULL + if (unlikely(s->ctor)) + s->ctor(object); ++#endif + } + + static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +@@ -2444,6 +2446,10 @@ redo: + + if (unlikely(gfpflags & __GFP_ZERO) && object) + memset(object, 0, s->object_size); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (unlikely(s->ctor) && object) ++ s->ctor(object); ++#endif + + slab_post_alloc_hook(s, gfpflags, object); + diff --git a/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch new file mode 100644 index 000000000..96c1be46b --- /dev/null +++ b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch @@ -0,0 +1,177 @@ +Subject: softirq: Adapt NOHZ softirq pending check to new RT scheme +From: Thomas Gleixner +Date: Sun, 28 Oct 2012 13:46:16 +0000 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +We can't rely on ksoftirqd anymore and we need to check the tasks +which run a particular softirq and if such a task is pi blocked ignore +the other pending bits of that task as well. + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 83 ++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 58 insertions(+), 25 deletions(-) + +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -66,46 +66,71 @@ char *softirq_to_name[NR_SOFTIRQS] = { + + #ifdef CONFIG_NO_HZ + # ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct softirq_runner { ++ struct task_struct *runner[NR_SOFTIRQS]; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); ++ ++static inline void softirq_set_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ ++ sr->runner[sirq] = current; ++} ++ ++static inline void softirq_clr_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ ++ sr->runner[sirq] = NULL; ++} ++ + /* +- * On preempt-rt a softirq might be blocked on a lock. There might be +- * no other runnable task on this CPU because the lock owner runs on +- * some other CPU. So we have to go into idle with the pending bit +- * set. Therefor we need to check this otherwise we warn about false +- * positives which confuses users and defeats the whole purpose of +- * this test. ++ * On preempt-rt a softirq running context might be blocked on a ++ * lock. There might be no other runnable task on this CPU because the ++ * lock owner runs on some other CPU. So we have to go into idle with ++ * the pending bit set. Therefor we need to check this otherwise we ++ * warn about false positives which confuses users and defeats the ++ * whole purpose of this test. + * + * This code is called with interrupts disabled. + */ + void softirq_check_pending_idle(void) + { + static int rate_limit; +- u32 warnpending = 0, pending; ++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners); ++ u32 warnpending; ++ int i; + + if (rate_limit >= 10) + return; + +- pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; +- if (pending) { +- struct task_struct *tsk; ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ struct task_struct *tsk = sr->runner[i]; + +- tsk = __get_cpu_var(ksoftirqd); + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ +- raw_spin_lock(&tsk->pi_lock); +- +- if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) +- warnpending = 1; +- +- raw_spin_unlock(&tsk->pi_lock); ++ if (tsk) { ++ raw_spin_lock(&tsk->pi_lock); ++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { ++ /* Clear all bits pending in that task */ ++ warnpending &= ~(tsk->softirqs_raised); ++ warnpending &= ~(1 << i); ++ } ++ raw_spin_unlock(&tsk->pi_lock); ++ } + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", +- pending); ++ warnpending); + rate_limit++; + } + } +@@ -125,6 +150,10 @@ void softirq_check_pending_idle(void) + } + } + # endif ++ ++#else /* !NO_HZ */ ++static inline void softirq_set_runner(unsigned int sirq) { } ++static inline void softirq_clr_runner(unsigned int sirq) { } + #endif + + /* +@@ -486,6 +515,7 @@ static void do_current_softirqs(int need + */ + lock_softirq(i); + local_irq_disable(); ++ softirq_set_runner(i); + /* + * Check with the local_softirq_pending() bits, + * whether we need to process this still or if someone +@@ -496,6 +526,7 @@ static void do_current_softirqs(int need + set_softirq_pending(pending & ~mask); + do_single_softirq(i, need_rcu_bh_qs); + } ++ softirq_clr_runner(i); + unlock_softirq(i); + WARN_ON(current->softirq_nestcnt != 1); + } +@@ -566,7 +597,7 @@ void thread_do_softirq(void) + } + } + +-void __raise_softirq_irqoff(unsigned int nr) ++static void do_raise_softirq_irqoff(unsigned int nr) + { + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +@@ -583,12 +614,19 @@ void __raise_softirq_irqoff(unsigned int + __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); + } + ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_softirqd(); ++} ++ + /* + * This function must run with irqs disabled! + */ + void raise_softirq_irqoff(unsigned int nr) + { +- __raise_softirq_irqoff(nr); ++ do_raise_softirq_irqoff(nr); + + /* + * If we're in an hard interrupt we let irq return code deal +@@ -610,11 +648,6 @@ void raise_softirq_irqoff(unsigned int n + wakeup_softirqd(); + } + +-void do_raise_softirq_irqoff(unsigned int nr) +-{ +- raise_softirq_irqoff(nr); +-} +- + static inline int ksoftirqd_softirq_pending(void) + { + return current->softirqs_raised; diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch new file mode 100644 index 000000000..9e07715bd --- /dev/null +++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch @@ -0,0 +1,192 @@ +Subject: softirq-disable-softirq-stacks-for-rt.patch +From: Thomas Gleixner +Date: Mon, 18 Jul 2011 13:59:17 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/powerpc/kernel/irq.c | 2 ++ + arch/powerpc/kernel/misc_32.S | 2 ++ + arch/powerpc/kernel/misc_64.S | 2 ++ + arch/sh/kernel/irq.c | 2 ++ + arch/sparc/kernel/irq_64.c | 2 ++ + arch/x86/kernel/entry_64.S | 2 ++ + arch/x86/kernel/irq_32.c | 2 ++ + arch/x86/kernel/irq_64.c | 3 ++- + include/linux/interrupt.h | 3 +-- + 9 files changed, 17 insertions(+), 3 deletions(-) + +Index: linux-stable/arch/powerpc/kernel/irq.c +=================================================================== +--- linux-stable.orig/arch/powerpc/kernel/irq.c ++++ linux-stable/arch/powerpc/kernel/irq.c +@@ -603,6 +603,7 @@ void irq_ctx_init(void) + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline void do_softirq_onstack(void) + { + struct thread_info *curtp, *irqtp; +@@ -639,6 +640,7 @@ void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +Index: linux-stable/arch/powerpc/kernel/misc_32.S +=================================================================== +--- linux-stable.orig/arch/powerpc/kernel/misc_32.S ++++ linux-stable/arch/powerpc/kernel/misc_32.S +@@ -36,6 +36,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq) + lwz r0,4(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_handle_irq) + mflr r0 +Index: linux-stable/arch/powerpc/kernel/misc_64.S +=================================================================== +--- linux-stable.orig/arch/powerpc/kernel/misc_64.S ++++ linux-stable/arch/powerpc/kernel/misc_64.S +@@ -29,6 +29,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq) + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_handle_irq) + ld r8,0(r6) +Index: linux-stable/arch/sh/kernel/irq.c +=================================================================== +--- linux-stable.orig/arch/sh/kernel/irq.c ++++ linux-stable/arch/sh/kernel/irq.c +@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void) + { + unsigned long flags; +@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +Index: linux-stable/arch/sparc/kernel/irq_64.c +=================================================================== +--- linux-stable.orig/arch/sparc/kernel/irq_64.c ++++ linux-stable/arch/sparc/kernel/irq_64.c +@@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, st + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq(void) + { + unsigned long flags; +@@ -723,6 +724,7 @@ void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +Index: linux-stable/arch/x86/kernel/entry_64.S +=================================================================== +--- linux-stable.orig/arch/x86/kernel/entry_64.S ++++ linux-stable/arch/x86/kernel/entry_64.S +@@ -1334,6 +1334,7 @@ bad_gs: + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(call_softirq) + CFI_STARTPROC +@@ -1353,6 +1354,7 @@ ENTRY(call_softirq) + ret + CFI_ENDPROC + END(call_softirq) ++#endif + + #ifdef CONFIG_XEN + zeroentry xen_hypervisor_callback xen_do_hypervisor_callback +Index: linux-stable/arch/x86/kernel/irq_32.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/irq_32.c ++++ linux-stable/arch/x86/kernel/irq_32.c +@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu) + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void) + { + unsigned long flags; +@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void) + + local_irq_restore(flags); + } ++#endif + + bool handle_irq(unsigned irq, struct pt_regs *regs) + { +Index: linux-stable/arch/x86/kernel/irq_64.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/irq_64.c ++++ linux-stable/arch/x86/kernel/irq_64.c +@@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_ + return true; + } + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + extern void call_softirq(void); + + asmlinkage void do_softirq(void) +@@ -108,3 +108,4 @@ asmlinkage void do_softirq(void) + } + local_irq_restore(flags); + } ++#endif +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -445,10 +445,9 @@ struct softirq_action + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- +-#ifndef CONFIG_PREEMPT_RT_FULL + static inline void thread_do_softirq(void) { do_softirq(); } + #else + extern void thread_do_softirq(void); diff --git a/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch b/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch new file mode 100644 index 000000000..f5f523569 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch @@ -0,0 +1,31 @@ +Subject: softirq: Export in_serving_softirq() +From: John Kacur +Date: Mon, 14 Nov 2011 02:44:43 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +ERROR: "in_serving_softirq" [net/sched/cls_cgroup.ko] undefined! + +The above can be fixed by exporting in_serving_softirq + +Signed-off-by: John Kacur +Cc: Paul McKenney +Cc: stable-rt@vger.kernel.org +Link: http://lkml.kernel.org/r/1321235083-21756-2-git-send-email-jkacur@redhat.com +Signed-off-by: Thomas Gleixner + +--- + kernel/softirq.c | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -439,6 +439,7 @@ int in_serving_softirq(void) + preempt_enable(); + return res; + } ++EXPORT_SYMBOL(in_serving_softirq); + + /* + * Called with bh and local interrupts disabled. For full RT cpu must diff --git a/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch new file mode 100644 index 000000000..5055deb59 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch @@ -0,0 +1,136 @@ +Subject: softirq: Init softirq local lock after per cpu section is set up +From: Steven Rostedt +Date: Thu, 04 Oct 2012 11:02:04 -0400 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +I discovered this bug when booting 3.4-rt on my powerpc box. It crashed +with the following report: + +------------[ cut here ]------------ +kernel BUG at /work/rt/stable-rt.git/kernel/rtmutex_common.h:75! +Oops: Exception in kernel mode, sig: 5 [#1] +PREEMPT SMP NR_CPUS=64 NUMA PA Semi PWRficient +Modules linked in: +NIP: c0000000004aa03c LR: c0000000004aa01c CTR: c00000000009b2ac +REGS: c00000003e8d7950 TRAP: 0700 Not tainted (3.4.11-test-rt19) +MSR: 9000000000029032 CR: 24000082 XER: 20000000 +SOFTE: 0 +TASK = c00000003e8fdcd0[11] 'ksoftirqd/1' THREAD: c00000003e8d4000 CPU: 1 +GPR00: 0000000000000001 c00000003e8d7bd0 c000000000d6cbb0 0000000000000000 +GPR04: c00000003e8fdcd0 0000000000000000 0000000024004082 c000000000011454 +GPR08: 0000000000000000 0000000080000001 c00000003e8fdcd1 0000000000000000 +GPR12: 0000000024000084 c00000000fff0280 ffffffffffffffff 000000003ffffad8 +GPR16: ffffffffffffffff 000000000072c798 0000000000000060 0000000000000000 +GPR20: 0000000000642741 000000000072c858 000000003ffffaf0 0000000000000417 +GPR24: 000000000072dcd0 c00000003e7ff990 0000000000000000 0000000000000001 +GPR28: 0000000000000000 c000000000792340 c000000000ccec78 c000000001182338 +NIP [c0000000004aa03c] .wakeup_next_waiter+0x44/0xb8 +LR [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 +Call Trace: +[c00000003e8d7bd0] [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 (unreliable) +[c00000003e8d7c60] [c0000000004a0320] .rt_spin_lock_slowunlock+0x8c/0xe4 +[c00000003e8d7ce0] [c0000000004a07cc] .rt_spin_unlock+0x54/0x64 +[c00000003e8d7d60] [c0000000000636bc] .__thread_do_softirq+0x130/0x174 +[c00000003e8d7df0] [c00000000006379c] .run_ksoftirqd+0x9c/0x1a4 +[c00000003e8d7ea0] [c000000000080b68] .kthread+0xa8/0xb4 +[c00000003e8d7f90] [c00000000001c2f8] .kernel_thread+0x54/0x70 +Instruction dump: +60000000 e86d01c8 38630730 4bff7061 60000000 ebbf0008 7c7c1b78 e81d0040 +7fe00278 7c000074 7800d182 68000001 <0b000000> e88d01c8 387d0010 38840738 + +The rtmutex_common.h:75 is: + +rt_mutex_top_waiter(struct rt_mutex *lock) +{ + struct rt_mutex_waiter *w; + + w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, + list_entry); + BUG_ON(w->lock != lock); + + return w; +} + +Where the waiter->lock is corrupted. I saw various other random bugs +that all had to with the softirq lock and plist. As plist needs to be +initialized before it is used I investigated how this lock is +initialized. It's initialized with: + +void __init softirq_early_init(void) +{ + local_irq_lock_init(local_softirq_lock); +} + +Where: + +#define local_irq_lock_init(lvar) \ + do { \ + int __cpu; \ + for_each_possible_cpu(__cpu) \ + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + +As the softirq lock is a local_irq_lock, which is a per_cpu lock, the +initialization is done to all per_cpu versions of the lock. But lets +look at where the softirq_early_init() is called from. + +In init/main.c: start_kernel() + +/* + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ + softirq_early_init(); + tick_init(); + boot_cpu_init(); + page_address_init(); + printk(KERN_NOTICE "%s", linux_banner); + setup_arch(&command_line); + mm_init_owner(&init_mm, &init_task); + mm_init_cpumask(&init_mm); + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + +One of the first things that is called is the initialization of the +softirq lock. But if you look further down, we see the per_cpu areas +have not been set up yet. Thus initializing a local_irq_lock() before +the per_cpu section is set up, may not work as it is initializing the +per cpu locks before the per cpu exists. + +By moving the softirq_early_init() right after setup_per_cpu_areas(), +the kernel boots fine. + +Signed-off-by: Steven Rostedt +Cc: Clark Williams +Cc: John Kacur +Cc: Carsten Emde +Cc: vomlehn@texas.net +Link: http://lkml.kernel.org/r/1349362924.6755.18.camel@gandalf.local.home +Signed-off-by: Thomas Gleixner + +--- + init/main.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/init/main.c +=================================================================== +--- linux-stable.orig/init/main.c ++++ linux-stable/init/main.c +@@ -496,7 +496,6 @@ asmlinkage void __init start_kernel(void + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ +- softirq_early_init(); + boot_cpu_init(); + page_address_init(); + pr_notice("%s", linux_banner); +@@ -506,6 +505,7 @@ asmlinkage void __init start_kernel(void + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + + build_all_zonelists(NULL, NULL); diff --git a/debian/patches/features/all/rt/softirq-local-lock.patch b/debian/patches/features/all/rt/softirq-local-lock.patch new file mode 100644 index 000000000..732b85f4e --- /dev/null +++ b/debian/patches/features/all/rt/softirq-local-lock.patch @@ -0,0 +1,323 @@ +Subject: softirq-local-lock.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:57:18 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/hardirq.h | 16 +++- + include/linux/interrupt.h | 12 +++ + include/linux/sched.h | 1 + init/main.c | 1 + kernel/softirq.c | 166 +++++++++++++++++++++++++++++++++++++++++++++- + 5 files changed, 191 insertions(+), 5 deletions(-) + +Index: linux-stable/include/linux/hardirq.h +=================================================================== +--- linux-stable.orig/include/linux/hardirq.h ++++ linux-stable/include/linux/hardirq.h +@@ -61,7 +61,11 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + #ifndef PREEMPT_ACTIVE + #define PREEMPT_ACTIVE_BITS 1 +@@ -74,10 +78,17 @@ + #endif + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0U) ++extern int in_serving_softirq(void); ++#endif ++ + /* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? +@@ -87,7 +98,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -447,7 +447,13 @@ struct softirq_action + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline void thread_do_softirq(void) { do_softirq(); } ++#else ++extern void thread_do_softirq(void); ++#endif ++ + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); +@@ -634,6 +640,12 @@ void tasklet_hrtimer_cancel(struct taskl + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + /* + * Autoprobing for irqs: + * +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1444,6 +1444,7 @@ struct task_struct { + #endif + #ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; ++ int softirq_nestcnt; + #endif + }; + +Index: linux-stable/init/main.c +=================================================================== +--- linux-stable.orig/init/main.c ++++ linux-stable/init/main.c +@@ -496,6 +496,7 @@ asmlinkage void __init start_kernel(void + * Interrupts are still disabled. Do necessary setups, then + * enable them + */ ++ softirq_early_init(); + boot_cpu_init(); + page_address_init(); + pr_notice("%s", linux_banner); +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -168,6 +169,7 @@ static void handle_pending_softirqs(u32 + local_irq_disable(); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -368,6 +370,162 @@ asmlinkage void do_softirq(void) + + #endif + ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock ++ */ ++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); ++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); ++ ++static void __do_softirq(void); ++ ++void __init softirq_early_init(void) ++{ ++ local_irq_lock_init(local_softirq_lock); ++} ++ ++void local_bh_disable(void) ++{ ++ migrate_disable(); ++ current->softirq_nestcnt++; ++} ++EXPORT_SYMBOL(local_bh_disable); ++ ++void local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ if ((current->softirq_nestcnt == 1) && ++ local_softirq_pending() && ++ local_trylock(local_softirq_lock)) { ++ ++ local_irq_disable(); ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_irq_enable(); ++ local_unlock(local_softirq_lock); ++ WARN_ON(current->softirq_nestcnt != 1); ++ } ++ current->softirq_nestcnt--; ++ migrate_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable); ++ ++void local_bh_enable_ip(unsigned long ip) ++{ ++ local_bh_enable(); ++} ++EXPORT_SYMBOL(local_bh_enable_ip); ++ ++/* For tracing */ ++int notrace __in_softirq(void) ++{ ++ if (__get_cpu_var(local_softirq_lock).owner == current) ++ return __get_cpu_var(local_softirq_lock).nestcnt; ++ return 0; ++} ++ ++int in_serving_softirq(void) ++{ ++ int res; ++ ++ preempt_disable(); ++ res = __get_cpu_var(local_softirq_runner) == current; ++ preempt_enable(); ++ return res; ++} ++ ++/* ++ * Called with bh and local interrupts disabled. For full RT cpu must ++ * be pinned. ++ */ ++static void __do_softirq(void) ++{ ++ u32 pending = local_softirq_pending(); ++ int cpu = smp_processor_id(); ++ ++ current->softirq_nestcnt++; ++ ++ /* Reset the pending bitmask before enabling irqs */ ++ set_softirq_pending(0); ++ ++ __get_cpu_var(local_softirq_runner) = current; ++ ++ lockdep_softirq_enter(); ++ ++ handle_pending_softirqs(pending, cpu); ++ ++ pending = local_softirq_pending(); ++ if (pending) ++ wakeup_softirqd(); ++ ++ lockdep_softirq_exit(); ++ __get_cpu_var(local_softirq_runner) = NULL; ++ ++ current->softirq_nestcnt--; ++} ++ ++static int __thread_do_softirq(int cpu) ++{ ++ /* ++ * Prevent the current cpu from going offline. ++ * pin_current_cpu() can reenable preemption and block on the ++ * hotplug mutex. When it returns, the current cpu is ++ * pinned. It might be the wrong one, but the offline check ++ * below catches that. ++ */ ++ pin_current_cpu(); ++ /* ++ * If called from ksoftirqd (cpu >= 0) we need to check ++ * whether we are on the wrong cpu due to cpu offlining. If ++ * called via thread_do_softirq() no action required. ++ */ ++ if (cpu >= 0 && cpu_is_offline(cpu)) { ++ unpin_current_cpu(); ++ return -1; ++ } ++ preempt_enable(); ++ local_lock(local_softirq_lock); ++ local_irq_disable(); ++ /* ++ * We cannot switch stacks on RT as we want to be able to ++ * schedule! ++ */ ++ if (local_softirq_pending()) ++ __do_softirq(); ++ local_unlock(local_softirq_lock); ++ unpin_current_cpu(); ++ preempt_disable(); ++ local_irq_enable(); ++ return 0; ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq()) { ++ preempt_disable(); ++ __thread_do_softirq(-1); ++ preempt_enable(); ++ } ++} ++ ++static int ksoftirqd_do_softirq(int cpu) ++{ ++ return __thread_do_softirq(cpu); ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++#endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. + */ +@@ -381,9 +539,9 @@ void irq_enter(void) + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_check_idle(cpu); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -391,10 +549,14 @@ void irq_enter(void) + + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) + __do_softirq(); + else + wakeup_softirqd(); ++#else ++ wakeup_softirqd(); ++#endif + } + + static inline void tick_irq_exit(void) diff --git a/debian/patches/features/all/rt/softirq-make-fifo.patch b/debian/patches/features/all/rt/softirq-make-fifo.patch new file mode 100644 index 000000000..6046590b4 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-make-fifo.patch @@ -0,0 +1,53 @@ +Subject: softirq-make-fifo.patch +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 21:06:43 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -372,6 +372,8 @@ asmlinkage void do_softirq(void) + + static inline void local_bh_disable_nort(void) { local_bh_disable(); } + static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } + + #else /* !PREEMPT_RT_FULL */ + +@@ -526,6 +528,20 @@ static int ksoftirqd_do_softirq(int cpu) + static inline void local_bh_disable_nort(void) { } + static inline void _local_bh_enable_nort(void) { } + ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++} ++ ++static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ + #endif /* PREEMPT_RT_FULL */ + /* + * Enter an interrupt context. +@@ -1077,6 +1093,8 @@ static struct notifier_block __cpuinitda + + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, ++ .cleanup = ksoftirqd_clr_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", diff --git a/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch new file mode 100644 index 000000000..f2e68328d --- /dev/null +++ b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch @@ -0,0 +1,79 @@ +Subject: softirq: Make serving softirqs a task flag +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 14:30:25 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Avoid the percpu softirq_runner pointer magic by using a task flag. + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 1 + + kernel/softirq.c | 20 +++----------------- + 2 files changed, 4 insertions(+), 17 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1667,6 +1667,7 @@ extern void thread_group_cputime_adjuste + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -383,7 +383,6 @@ static void ksoftirqd_clr_sched_params(u + * On RT we serialize softirq execution with a cpu local lock + */ + static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); +-static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); + + static void __do_softirq_common(int need_rcu_bh_qs); + +@@ -438,22 +437,9 @@ void _local_bh_enable(void) + } + EXPORT_SYMBOL(_local_bh_enable); + +-/* For tracing */ +-int notrace __in_softirq(void) +-{ +- if (__get_cpu_var(local_softirq_lock).owner == current) +- return __get_cpu_var(local_softirq_lock).nestcnt; +- return 0; +-} +- + int in_serving_softirq(void) + { +- int res; +- +- preempt_disable(); +- res = __get_cpu_var(local_softirq_runner) == current; +- preempt_enable(); +- return res; ++ return current->flags & PF_IN_SOFTIRQ; + } + EXPORT_SYMBOL(in_serving_softirq); + +@@ -471,7 +457,7 @@ static void __do_softirq_common(int need + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- __get_cpu_var(local_softirq_runner) = current; ++ current->flags |= PF_IN_SOFTIRQ; + + lockdep_softirq_enter(); + +@@ -482,7 +468,7 @@ static void __do_softirq_common(int need + wakeup_softirqd(); + + lockdep_softirq_exit(); +- __get_cpu_var(local_softirq_runner) = NULL; ++ current->flags &= ~PF_IN_SOFTIRQ; + + current->softirq_nestcnt--; + } diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch new file mode 100644 index 000000000..70fc377e2 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch @@ -0,0 +1,154 @@ +Subject: softirq: Check preemption after reenabling interrupts +From: Thomas Gleixner +Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET) +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +raise_softirq_irqoff() disables interrupts and wakes the softirq +daemon, but after reenabling interrupts there is no preemption check, +so the execution of the softirq thread might be delayed arbitrarily. + +In principle we could add that check to local_irq_enable/restore, but +that's overkill as the rasie_softirq_irqoff() sections are the only +ones which show this behaviour. + +Reported-by: Carsten Emde +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-iopoll.c | 3 +++ + block/blk-softirq.c | 3 +++ + include/linux/preempt.h | 3 +++ + net/core/dev.c | 6 ++++++ + 4 files changed, 15 insertions(+) + +Index: linux-stable/block/blk-iopoll.c +=================================================================== +--- linux-stable.orig/block/blk-iopoll.c ++++ linux-stable/block/blk-iopoll.c +@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll + list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(blk_iopoll_sched); + +@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct so + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_noti + &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +Index: linux-stable/block/blk-softirq.c +=================================================================== +--- linux-stable.orig/block/blk-softirq.c ++++ linux-stable/block/blk-softirq.c +@@ -51,6 +51,7 @@ static void trigger_softirq(void *data) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -93,6 +94,7 @@ static int __cpuinit blk_cpu_notify(stru + &__get_cpu_var(blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +@@ -150,6 +152,7 @@ do_local: + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /** +Index: linux-stable/include/linux/preempt.h +=================================================================== +--- linux-stable.orig/include/linux/preempt.h ++++ linux-stable/include/linux/preempt.h +@@ -72,8 +72,10 @@ do { \ + + #ifndef CONFIG_PREEMPT_RT_BASE + # define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() barrier() + #else + # define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() preempt_check_resched() + #endif + + #define preempt_enable() \ +@@ -127,6 +129,7 @@ do { \ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + + #endif /* CONFIG_PREEMPT_COUNT */ + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -2108,6 +2108,7 @@ static inline void __netif_reschedule(st + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2129,6 +2130,7 @@ void dev_kfree_skb_irq(struct sk_buff *s + sd->completion_queue = skb; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + } + EXPORT_SYMBOL(dev_kfree_skb_irq); +@@ -3138,6 +3140,7 @@ enqueue: + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -4035,6 +4038,7 @@ static void net_rps_action_and_irq_enabl + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static int process_backlog(struct napi_struct *napi, int quota) +@@ -4107,6 +4111,7 @@ void __napi_schedule(struct napi_struct + local_irq_save(flags); + ____napi_schedule(&__get_cpu_var(softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -6045,6 +6050,7 @@ static int dev_cpu_callback(struct notif + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) { diff --git a/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch new file mode 100644 index 000000000..d9caa0bcd --- /dev/null +++ b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch @@ -0,0 +1,120 @@ +From: Thomas Gleixner +Date: Fri, 3 Jul 2009 13:16:38 -0500 +Subject: softirq: Sanitize softirq pending for NOHZ/RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 2 + + kernel/softirq.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ + kernel/time/tick-sched.c | 9 ------ + 3 files changed, 66 insertions(+), 8 deletions(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -455,6 +455,8 @@ extern void __raise_softirq_irqoff(unsig + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); + ++extern void softirq_check_pending_idle(void); ++ + /* This is the worklist that queues up per-cpu softirq work. + * + * send_remote_sendirq() adds work to these lists, and +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -62,6 +62,69 @@ char *softirq_to_name[NR_SOFTIRQS] = { + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ ++# ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * On preempt-rt a softirq might be blocked on a lock. There might be ++ * no other runnable task on this CPU because the lock owner runs on ++ * some other CPU. So we have to go into idle with the pending bit ++ * set. Therefor we need to check this otherwise we warn about false ++ * positives which confuses users and defeats the whole purpose of ++ * this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ u32 warnpending = 0, pending; ++ ++ if (rate_limit >= 10) ++ return; ++ ++ pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ if (pending) { ++ struct task_struct *tsk; ++ ++ tsk = __get_cpu_var(ksoftirqd); ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ */ ++ raw_spin_lock(&tsk->pi_lock); ++ ++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING)) ++ warnpending = 1; ++ ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ pending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +Index: linux-stable/kernel/time/tick-sched.c +=================================================================== +--- linux-stable.orig/kernel/time/tick-sched.c ++++ linux-stable/kernel/time/tick-sched.c +@@ -731,14 +731,7 @@ static bool can_stop_idle_tick(int cpu, + return false; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { +- pr_warn("NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + return false; + } + diff --git a/debian/patches/features/all/rt/softirq-split-handling-function.patch b/debian/patches/features/all/rt/softirq-split-handling-function.patch new file mode 100644 index 000000000..c57db24ab --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-handling-function.patch @@ -0,0 +1,71 @@ +Subject: softirq: Split handling function +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 15:33:53 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Split out the inner handling function, so RT can reuse it. + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 43 +++++++++++++++++++++++-------------------- + 1 file changed, 23 insertions(+), 20 deletions(-) + +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -142,31 +142,34 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + +-static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) ++static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs) + { +- struct softirq_action *h = softirq_vec; ++ struct softirq_action *h = softirq_vec + vec_nr; + unsigned int prev_count = preempt_count(); + +- local_irq_enable(); +- for ( ; pending; h++, pending >>= 1) { +- unsigned int vec_nr = h - softirq_vec; ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, (unsigned int) preempt_count()); ++ preempt_count() = prev_count; ++ } ++ if (need_rcu_bh_qs) ++ rcu_bh_qs(cpu); ++} + +- if (!(pending & 1)) +- continue; ++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) ++{ ++ unsigned int vec_nr; + +- kstat_incr_softirqs_this_cpu(vec_nr); +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- printk(KERN_ERR +- "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, (unsigned int) preempt_count()); +- preempt_count() = prev_count; +- } +- if (need_rcu_bh_qs) +- rcu_bh_qs(cpu); ++ local_irq_enable(); ++ for (vec_nr = 0; pending; vec_nr++, pending >>= 1) { ++ if (pending & 1) ++ handle_softirq(vec_nr, cpu, need_rcu_bh_qs); + } + local_irq_disable(); + } diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch new file mode 100644 index 000000000..01d891c02 --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-locks.patch @@ -0,0 +1,456 @@ +Subject: softirq: Split softirq locks +From: Thomas Gleixner +Date: Thu, 04 Oct 2012 14:20:47 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The 3.x RT series removed the split softirq implementation in favour +of pushing softirq processing into the context of the thread which +raised it. Though this prevents us from handling the various softirqs +at different priorities. Now instead of reintroducing the split +softirq threads we split the locks which serialize the softirq +processing. + +If a softirq is raised in context of a thread, then the softirq is +noted on a per thread field, if the thread is in a bh disabled +region. If the softirq is raised from hard interrupt context, then the +bit is set in the flag field of ksoftirqd and ksoftirqd is invoked. +When a thread leaves a bh disabled region, then it tries to execute +the softirqs which have been raised in its own context. It acquires +the per softirq / per cpu lock for the softirq and then checks, +whether the softirq is still pending in the per cpu +local_softirq_pending() field. If yes, it runs the softirq. If no, +then some other task executed it already. This allows for zero config +softirq elevation in the context of user space tasks or interrupt +threads. + +Signed-off-by: Thomas Gleixner +--- + include/linux/sched.h | 1 + kernel/softirq.c | 305 ++++++++++++++++++++++++++++++-------------------- + 2 files changed, 185 insertions(+), 121 deletions(-) + +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1446,6 +1446,7 @@ struct task_struct { + #ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; + int softirq_nestcnt; ++ unsigned int softirqs_raised; + #endif + #ifdef CONFIG_PREEMPT_RT_FULL + # if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -162,6 +162,12 @@ static void handle_softirq(unsigned int + rcu_bh_qs(cpu); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ + static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) + { + unsigned int vec_nr; +@@ -174,7 +180,19 @@ static void handle_pending_softirqs(u32 + local_irq_disable(); + } + +-#ifndef CONFIG_PREEMPT_RT_FULL ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); ++ cond_resched(); ++ return; ++ } ++ local_irq_enable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -375,6 +393,32 @@ asmlinkage void do_softirq(void) + + #endif + ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an interrupt or softirq, we're done ++ * (this also catches softirq-disabled code). We will ++ * actually run the softirq once we return from ++ * the irq or softirq. ++ * ++ * Otherwise we wake up ksoftirqd to make sure we ++ * schedule the softirq soon. ++ */ ++ if (!in_interrupt()) ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++} ++ + static inline void local_bh_disable_nort(void) { local_bh_disable(); } + static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } + static void ksoftirqd_set_sched_params(unsigned int cpu) { } +@@ -383,20 +427,78 @@ static void ksoftirqd_clr_sched_params(u + #else /* !PREEMPT_RT_FULL */ + + /* +- * On RT we serialize softirq execution with a cpu local lock ++ * On RT we serialize softirq execution with a cpu local lock per softirq + */ +-static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); ++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); + +-static void __do_softirq_common(int need_rcu_bh_qs); ++void __init softirq_early_init(void) ++{ ++ int i; + +-void __do_softirq(void) ++ for (i = 0; i < NR_SOFTIRQS; i++) ++ local_irq_lock_init(local_softirq_locks[i]); ++} ++ ++static void lock_softirq(int which) + { +- __do_softirq_common(0); ++ __local_lock(&__get_cpu_var(local_softirq_locks[which])); + } + +-void __init softirq_early_init(void) ++static void unlock_softirq(int which) + { +- local_irq_lock_init(local_softirq_lock); ++ __local_unlock(&__get_cpu_var(local_softirq_locks[which])); ++} ++ ++static void do_single_softirq(int which, int need_rcu_bh_qs) ++{ ++ unsigned long old_flags = current->flags; ++ ++ current->flags &= ~PF_MEMALLOC; ++ vtime_account_irq_enter(current); ++ current->flags |= PF_IN_SOFTIRQ; ++ lockdep_softirq_enter(); ++ local_irq_enable(); ++ handle_softirq(which, smp_processor_id(), need_rcu_bh_qs); ++ local_irq_disable(); ++ lockdep_softirq_exit(); ++ current->flags &= ~PF_IN_SOFTIRQ; ++ vtime_account_irq_enter(current); ++ tsk_restore_flags(current, old_flags, PF_MEMALLOC); ++} ++ ++/* ++ * Called with interrupts disabled. Process softirqs which were raised ++ * in current context (or on behalf of ksoftirqd). ++ */ ++static void do_current_softirqs(int need_rcu_bh_qs) ++{ ++ while (current->softirqs_raised) { ++ int i = __ffs(current->softirqs_raised); ++ unsigned int pending, mask = (1U << i); ++ ++ current->softirqs_raised &= ~mask; ++ local_irq_enable(); ++ ++ /* ++ * If the lock is contended, we boost the owner to ++ * process the softirq or leave the critical section ++ * now. ++ */ ++ lock_softirq(i); ++ local_irq_disable(); ++ /* ++ * Check with the local_softirq_pending() bits, ++ * whether we need to process this still or if someone ++ * else took care of it. ++ */ ++ pending = local_softirq_pending(); ++ if (pending & mask) { ++ set_softirq_pending(pending & ~mask); ++ do_single_softirq(i, need_rcu_bh_qs); ++ } ++ unlock_softirq(i); ++ WARN_ON(current->softirq_nestcnt != 1); ++ } + } + + void local_bh_disable(void) +@@ -411,17 +513,11 @@ void local_bh_enable(void) + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + +- if ((current->softirq_nestcnt == 1) && +- local_softirq_pending() && +- local_trylock(local_softirq_lock)) { ++ local_irq_disable(); ++ if (current->softirq_nestcnt == 1 && current->softirqs_raised) ++ do_current_softirqs(1); ++ local_irq_enable(); + +- local_irq_disable(); +- if (local_softirq_pending()) +- __do_softirq(); +- local_irq_enable(); +- local_unlock(local_softirq_lock); +- WARN_ON(current->softirq_nestcnt != 1); +- } + current->softirq_nestcnt--; + migrate_enable(); + } +@@ -446,86 +542,82 @@ int in_serving_softirq(void) + } + EXPORT_SYMBOL(in_serving_softirq); + +-/* +- * Called with bh and local interrupts disabled. For full RT cpu must +- * be pinned. +- */ +-static void __do_softirq_common(int need_rcu_bh_qs) ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) + { +- u32 pending = local_softirq_pending(); +- int cpu = smp_processor_id(); +- ++ local_irq_disable(); + current->softirq_nestcnt++; +- +- /* Reset the pending bitmask before enabling irqs */ +- set_softirq_pending(0); +- +- current->flags |= PF_IN_SOFTIRQ; +- +- lockdep_softirq_enter(); +- +- handle_pending_softirqs(pending, cpu, need_rcu_bh_qs); +- +- pending = local_softirq_pending(); +- if (pending) +- wakeup_softirqd(); +- +- lockdep_softirq_exit(); +- current->flags &= ~PF_IN_SOFTIRQ; +- ++ do_current_softirqs(1); + current->softirq_nestcnt--; ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); + } + +-static int __thread_do_softirq(int cpu) ++/* ++ * Called from netif_rx_ni(). Preemption enabled, but migration ++ * disabled. So the cpu can't go away under us. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq() && current->softirqs_raised) { ++ current->softirq_nestcnt++; ++ do_current_softirqs(0); ++ current->softirq_nestcnt--; ++ } ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) + { ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++ + /* +- * Prevent the current cpu from going offline. +- * pin_current_cpu() can reenable preemption and block on the +- * hotplug mutex. When it returns, the current cpu is +- * pinned. It might be the wrong one, but the offline check +- * below catches that. ++ * If we are not in a hard interrupt and inside a bh disabled ++ * region, we simply raise the flag on current. local_bh_enable() ++ * will make sure that the softirq is executed. Otherwise we ++ * delegate it to ksoftirqd. + */ +- pin_current_cpu(); ++ if (!in_irq() && current->softirq_nestcnt) ++ current->softirqs_raised |= (1U << nr); ++ else if (__this_cpu_read(ksoftirqd)) ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); ++} ++ ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ + /* +- * If called from ksoftirqd (cpu >= 0) we need to check +- * whether we are on the wrong cpu due to cpu offlining. If +- * called via thread_do_softirq() no action required. ++ * If we're in an hard interrupt we let irq return code deal ++ * with the wakeup of ksoftirqd. + */ +- if (cpu >= 0 && cpu_is_offline(cpu)) { +- unpin_current_cpu(); +- return -1; +- } +- preempt_enable(); +- local_lock(local_softirq_lock); +- local_irq_disable(); ++ if (in_irq()) ++ return; ++ + /* +- * We cannot switch stacks on RT as we want to be able to +- * schedule! ++ * If we are in thread context but outside of a bh disabled ++ * region, we need to wake ksoftirqd as well. ++ * ++ * CHECKME: Some of the places which do that could be wrapped ++ * into local_bh_disable/enable pairs. Though it's unclear ++ * whether this is worth the effort. To find those places just ++ * raise a WARN() if the condition is met. + */ +- if (local_softirq_pending()) +- __do_softirq_common(cpu >= 0); +- local_unlock(local_softirq_lock); +- unpin_current_cpu(); +- preempt_disable(); +- local_irq_enable(); +- return 0; ++ if (!current->softirq_nestcnt) ++ wakeup_softirqd(); + } + +-/* +- * Called from netif_rx_ni(). Preemption enabled. +- */ +-void thread_do_softirq(void) ++void do_raise_softirq_irqoff(unsigned int nr) + { +- if (!in_serving_softirq()) { +- preempt_disable(); +- __thread_do_softirq(-1); +- preempt_enable(); +- } ++ raise_softirq_irqoff(nr); + } + +-static int ksoftirqd_do_softirq(int cpu) ++static inline int ksoftirqd_softirq_pending(void) + { +- return __thread_do_softirq(cpu); ++ return current->softirqs_raised; + } + + static inline void local_bh_disable_nort(void) { } +@@ -536,6 +628,10 @@ static inline void ksoftirqd_set_sched_p + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); ++ /* Take over all pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending(); ++ local_irq_enable(); + } + + static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) +@@ -574,8 +670,14 @@ static inline void invoke_softirq(void) + __do_softirq(); + else + wakeup_softirqd(); +-#else ++#else /* PREEMPT_RT_FULL */ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ if (__this_cpu_read(ksoftirqd) && ++ __this_cpu_read(ksoftirqd)->softirqs_raised) + wakeup_softirqd(); ++ local_irq_restore(flags); + #endif + } + +@@ -613,26 +715,6 @@ void irq_exit(void) + rcu_irq_exit(); + } + +-/* +- * This function must run with irqs disabled! +- */ +-inline void raise_softirq_irqoff(unsigned int nr) +-{ +- __raise_softirq_irqoff(nr); +- +- /* +- * If we're in an interrupt or softirq, we're done +- * (this also catches softirq-disabled code). We will +- * actually run the softirq once we return from +- * the irq or softirq. +- * +- * Otherwise we wake up ksoftirqd to make sure we +- * schedule the softirq soon. +- */ +- if (!in_interrupt()) +- wakeup_softirqd(); +-} +- + void raise_softirq(unsigned int nr) + { + unsigned long flags; +@@ -642,12 +724,6 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void __raise_softirq_irqoff(unsigned int nr) +-{ +- trace_softirq_raise(nr); +- or_softirq_pending(1UL << nr); +-} +- + void open_softirq(int nr, void (*action)(struct softirq_action *)) + { + softirq_vec[nr].action = action; +@@ -1091,20 +1167,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); + + static int ksoftirqd_should_run(unsigned int cpu) + { +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) +-{ +- local_irq_disable(); +- if (local_softirq_pending()) { +- __do_softirq(); +- rcu_note_context_switch(cpu); +- local_irq_enable(); +- cond_resched(); +- return; +- } +- local_irq_enable(); ++ return ksoftirqd_softirq_pending(); + } + + #ifdef CONFIG_HOTPLUG_CPU diff --git a/debian/patches/features/all/rt/softirq-split-out-code.patch b/debian/patches/features/all/rt/softirq-split-out-code.patch new file mode 100644 index 000000000..b1e2615ae --- /dev/null +++ b/debian/patches/features/all/rt/softirq-split-out-code.patch @@ -0,0 +1,104 @@ +Subject: softirq-split-out-code.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:46:49 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/softirq.c | 62 ++++++++++++++++++++++++++----------------------------- + 1 file changed, 30 insertions(+), 32 deletions(-) + +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -77,6 +77,34 @@ static void wakeup_softirqd(void) + wake_up_process(tsk); + } + ++static void handle_pending_softirqs(u32 pending, int cpu) ++{ ++ struct softirq_action *h = softirq_vec; ++ unsigned int prev_count = preempt_count(); ++ ++ local_irq_enable(); ++ for ( ; pending; h++, pending >>= 1) { ++ unsigned int vec_nr = h - softirq_vec; ++ ++ if (!(pending & 1)) ++ continue; ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ printk(KERN_ERR ++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, (unsigned int) preempt_count()); ++ preempt_count() = prev_count; ++ } ++ rcu_bh_qs(cpu); ++ } ++ local_irq_disable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -212,7 +240,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); + + asmlinkage void __do_softirq(void) + { +- struct softirq_action *h; + __u32 pending; + unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + int cpu; +@@ -230,7 +257,7 @@ asmlinkage void __do_softirq(void) + account_irq_enter_time(current); + + __local_bh_disable((unsigned long)__builtin_return_address(0), +- SOFTIRQ_OFFSET); ++ SOFTIRQ_OFFSET); + lockdep_softirq_enter(); + + cpu = smp_processor_id(); +@@ -238,36 +265,7 @@ restart: + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- do { +- if (pending & 1) { +- unsigned int vec_nr = h - softirq_vec; +- int prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- printk(KERN_ERR "huh, entered softirq %u %s %p" +- "with preempt_count %08x," +- " exited with %08x?\n", vec_nr, +- softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count() = prev_count; +- } +- +- rcu_bh_qs(cpu); +- } +- h++; +- pending >>= 1; +- } while (pending); +- +- local_irq_disable(); ++ handle_pending_softirqs(pending, cpu); + + pending = local_softirq_pending(); + if (pending) { diff --git a/debian/patches/features/all/rt/softirq-thread-do-softirq.patch b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch new file mode 100644 index 000000000..10820122e --- /dev/null +++ b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch @@ -0,0 +1,36 @@ +Subject: softirq-thread-do-softirq.patch +From: Thomas Gleixner +Date: Tue, 28 Jun 2011 15:44:15 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/interrupt.h | 1 + + net/core/dev.c | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -443,6 +443,7 @@ struct softirq_action + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); ++static inline void thread_do_softirq(void) { do_softirq(); } + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -3204,7 +3204,7 @@ int netif_rx_ni(struct sk_buff *skb) + preempt_disable(); + err = netif_rx(skb); + if (local_softirq_pending()) +- do_softirq(); ++ thread_do_softirq(); + preempt_enable(); + + return err; diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch new file mode 100644 index 000000000..317e02dd1 --- /dev/null +++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch @@ -0,0 +1,213 @@ +Subject: spinlock-types-separate-raw.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 19:34:01 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types.h | 4 + + include/linux/spinlock_types.h | 74 ------------------------------------ + include/linux/spinlock_types_nort.h | 33 ++++++++++++++++ + include/linux/spinlock_types_raw.h | 56 +++++++++++++++++++++++++++ + 4 files changed, 95 insertions(+), 72 deletions(-) + +Index: linux-stable/include/linux/rwlock_types.h +=================================================================== +--- linux-stable.orig/include/linux/rwlock_types.h ++++ linux-stable/include/linux/rwlock_types.h +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +Index: linux-stable/include/linux/spinlock_types.h +=================================================================== +--- linux-stable.orig/include/linux/spinlock_types.h ++++ linux-stable/include/linux/spinlock_types.h +@@ -9,79 +9,9 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif ++#include + +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif +- +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, +-#else +-# define SPIN_DEBUG_INIT(lockname) +-#endif +- +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++#include + + #include + +Index: linux-stable/include/linux/spinlock_types_nort.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/spinlock_types_nort.h +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +Index: linux-stable/include/linux/spinlock_types_raw.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/spinlock_types_raw.h +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif diff --git a/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch b/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch new file mode 100644 index 000000000..0feb7f154 --- /dev/null +++ b/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch @@ -0,0 +1,61 @@ +Subject: stomp_machine: Use mutex_trylock when called from inactive cpu +From: Thomas Gleixner +Date: Wed, 03 Oct 2012 17:21:53 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If the stop machinery is called from inactive CPU we cannot use +mutex_lock, because some other stomp machine invokation might be in +progress and the mutex can be contended. We cannot schedule from this +context, so trylock and loop. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + kernel/stop_machine.c | 13 +++++++++---- + 1 file changed, 9 insertions(+), 4 deletions(-) + +Index: linux-stable/kernel/stop_machine.c +=================================================================== +--- linux-stable.orig/kernel/stop_machine.c ++++ linux-stable/kernel/stop_machine.c +@@ -160,7 +160,7 @@ static DEFINE_PER_CPU(struct cpu_stop_wo + + static void queue_stop_cpus_work(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg, +- struct cpu_stop_done *done) ++ struct cpu_stop_done *done, bool inactive) + { + struct cpu_stop_work *work; + unsigned int cpu; +@@ -177,7 +177,12 @@ static void queue_stop_cpus_work(const s + * Make sure that all work is queued on all cpus before we + * any of the cpus can execute it. + */ +- mutex_lock(&stopper_lock); ++ if (!inactive) { ++ mutex_lock(&stopper_lock); ++ } else { ++ while (!mutex_trylock(&stopper_lock)) ++ cpu_relax(); ++ } + for_each_cpu(cpu, cpumask) + cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); + mutex_unlock(&stopper_lock); +@@ -189,7 +194,7 @@ static int __stop_cpus(const struct cpum + struct cpu_stop_done done; + + cpu_stop_init_done(&done, cpumask_weight(cpumask)); +- queue_stop_cpus_work(cpumask, fn, arg, &done); ++ queue_stop_cpus_work(cpumask, fn, arg, &done, false); + wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } +@@ -564,7 +569,7 @@ int stop_machine_from_inactive_cpu(int ( + set_state(&smdata, STOPMACHINE_PREPARE); + cpu_stop_init_done(&done, num_active_cpus()); + queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, +- &done); ++ &done, true); + ret = stop_machine_cpu_stop(&smdata); + + /* Busy wait for completion. */ diff --git a/debian/patches/features/all/rt/stomp-machine-raw-lock.patch b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch new file mode 100644 index 000000000..4dd84dceb --- /dev/null +++ b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch @@ -0,0 +1,188 @@ +Subject: stomp-machine-raw-lock.patch +From: Thomas Gleixner +Date: Wed, 29 Jun 2011 11:01:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/stop_machine.c | 62 ++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 43 insertions(+), 19 deletions(-) + +Index: linux-stable/kernel/stop_machine.c +=================================================================== +--- linux-stable.orig/kernel/stop_machine.c ++++ linux-stable/kernel/stop_machine.c +@@ -29,12 +29,12 @@ struct cpu_stop_done { + atomic_t nr_todo; /* nr left to execute */ + bool executed; /* actually executed? */ + int ret; /* collected return value */ +- struct completion completion; /* fired if nr_todo reaches 0 */ ++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */ + }; + + /* the actual stopper, one per every possible cpu, enabled on online cpus */ + struct cpu_stopper { +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + }; +@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cp + { + memset(done, 0, sizeof(*done)); + atomic_set(&done->nr_todo, nr_todo); +- init_completion(&done->completion); ++ done->waiter = current; + } + + /* signal completion unless @done is NULL */ +@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct + if (done) { + if (executed) + done->executed = true; +- if (atomic_dec_and_test(&done->nr_todo)) +- complete(&done->completion); ++ if (atomic_dec_and_test(&done->nr_todo)) { ++ wake_up_process(done->waiter); ++ done->waiter = NULL; ++ } + } + } + +@@ -69,7 +71,7 @@ static void cpu_stop_queue_work(unsigned + + unsigned long flags; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + + if (stopper->enabled) { + list_add_tail(&work->list, &stopper->works); +@@ -77,7 +79,23 @@ static void cpu_stop_queue_work(unsigned + } else + cpu_stop_signal_done(work->done, false); + +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); ++} ++ ++static void wait_for_stop_done(struct cpu_stop_done *done) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (atomic_read(&done->nr_todo)) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ /* ++ * We need to wait until cpu_stop_signal_done() has cleared ++ * done->waiter. ++ */ ++ while (done->waiter) ++ cpu_relax(); ++ set_current_state(TASK_RUNNING); + } + + /** +@@ -111,7 +129,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s + + cpu_stop_init_done(&done, 1); + cpu_stop_queue_work(cpu, &work); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -172,7 +190,7 @@ static int __stop_cpus(const struct cpum + + cpu_stop_init_done(&done, cpumask_weight(cpumask)); + queue_stop_cpus_work(cpumask, fn, arg, &done); +- wait_for_completion(&done.completion); ++ wait_for_stop_done(&done); + return done.executed ? done.ret : -ENOENT; + } + +@@ -251,9 +269,9 @@ static int cpu_stop_should_run(unsigned + unsigned long flags; + int run; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + run = !list_empty(&stopper->works); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return run; + } + +@@ -265,13 +283,13 @@ static void cpu_stopper_thread(unsigned + + repeat: + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -303,7 +321,13 @@ repeat: + kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, + ksym_buf), arg); + ++ /* ++ * Make sure that the wakeup and setting done->waiter ++ * to NULL is atomic. ++ */ ++ local_irq_disable(); + cpu_stop_signal_done(done, true); ++ local_irq_enable(); + goto repeat; + } + } +@@ -322,20 +346,20 @@ static void cpu_stop_park(unsigned int c + unsigned long flags; + + /* drain remaining works */ +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + list_for_each_entry(work, &stopper->works, list) + cpu_stop_signal_done(work->done, false); + stopper->enabled = false; +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + } + + static void cpu_stop_unpark(unsigned int cpu) + { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + stopper->enabled = true; +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + } + + static struct smp_hotplug_thread cpu_stop_threads = { +@@ -357,7 +381,7 @@ static int __init cpu_stop_init(void) + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + +@@ -544,7 +568,7 @@ int stop_machine_from_inactive_cpu(int ( + ret = stop_machine_cpu_stop(&smdata); + + /* Busy wait for completion. */ +- while (!completion_done(&done.completion)) ++ while (atomic_read(&done.nr_todo)) + cpu_relax(); + + mutex_unlock(&stop_cpus_mutex); diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch new file mode 100644 index 000000000..116aee89b --- /dev/null +++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch @@ -0,0 +1,64 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:27 -0500 +Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Instead of playing with non-preemption, introduce explicit +startup serialization. This is more robust and cleaner as +well. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + kernel/stop_machine.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +Index: linux-stable/kernel/stop_machine.c +=================================================================== +--- linux-stable.orig/kernel/stop_machine.c ++++ linux-stable/kernel/stop_machine.c +@@ -137,6 +137,7 @@ void stop_one_cpu_nowait(unsigned int cp + + /* static data for stop_cpus */ + static DEFINE_MUTEX(stop_cpus_mutex); ++static DEFINE_MUTEX(stopper_lock); + static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); + + static void queue_stop_cpus_work(const struct cpumask *cpumask, +@@ -155,14 +156,13 @@ static void queue_stop_cpus_work(const s + } + + /* +- * Disable preemption while queueing to avoid getting +- * preempted by a stopper which might wait for other stoppers +- * to enter @fn which can lead to deadlock. ++ * Make sure that all work is queued on all cpus before we ++ * any of the cpus can execute it. + */ +- preempt_disable(); ++ mutex_lock(&stopper_lock); + for_each_cpu(cpu, cpumask) + cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); +- preempt_enable(); ++ mutex_unlock(&stopper_lock); + } + + static int __stop_cpus(const struct cpumask *cpumask, +@@ -279,6 +279,16 @@ repeat: + struct cpu_stop_done *done = work->done; + char ksym_buf[KSYM_NAME_LEN] __maybe_unused; + ++ /* ++ * Wait until the stopper finished scheduling on all ++ * cpus ++ */ ++ mutex_lock(&stopper_lock); ++ /* ++ * Let other cpu threads continue as well ++ */ ++ mutex_unlock(&stopper_lock); ++ + /* cpu stop callbacks are not allowed to sleep */ + preempt_disable(); + diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch new file mode 100644 index 000000000..4d8b0da05 --- /dev/null +++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch @@ -0,0 +1,113 @@ +From: Thomas Gleixner +Date: Thu, 15 Jul 2010 10:29:00 +0200 +Subject: suspend: Prevent might sleep splats +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +timekeeping suspend/resume calls read_persistant_clock() which takes +rtc_lock. That results in might sleep warnings because at that point +we run with interrupts disabled. + +We cannot convert rtc_lock to a raw spinlock as that would trigger +other might sleep warnings. + +As a temporary workaround we disable the might sleep warnings by +setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend() +and restoring it to SYSTEM_RUNNING afer sysdev_resume(). + +Needs to be revisited. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/kernel.h | 1 + + kernel/power/hibernate.c | 7 +++++++ + kernel/power/suspend.c | 4 ++++ + 3 files changed, 12 insertions(+) + +Index: linux-stable/include/linux/kernel.h +=================================================================== +--- linux-stable.orig/include/linux/kernel.h ++++ linux-stable/include/linux/kernel.h +@@ -415,6 +415,7 @@ extern enum system_states { + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +Index: linux-stable/kernel/power/hibernate.c +=================================================================== +--- linux-stable.orig/kernel/power/hibernate.c ++++ linux-stable/kernel/power/hibernate.c +@@ -275,6 +275,8 @@ static int create_image(int platform_mod + + local_irq_disable(); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (error) { + printk(KERN_ERR "PM: Some system devices failed to power down, " +@@ -302,6 +304,7 @@ static int create_image(int platform_mod + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -427,6 +430,7 @@ static int resume_target_kernel(bool pla + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + + error = syscore_suspend(); + if (error) +@@ -460,6 +464,7 @@ static int resume_target_kernel(bool pla + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -542,6 +547,7 @@ int hibernation_platform_enter(void) + goto Platform_finish; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + syscore_suspend(); + if (pm_wakeup_pending()) { + error = -EAGAIN; +@@ -554,6 +560,7 @@ int hibernation_platform_enter(void) + + Power_up: + syscore_resume(); ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + enable_nonboot_cpus(); + +Index: linux-stable/kernel/power/suspend.c +=================================================================== +--- linux-stable.orig/kernel/power/suspend.c ++++ linux-stable/kernel/power/suspend.c +@@ -217,6 +217,8 @@ static int suspend_enter(suspend_state_t + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (!error) { + *wakeup = pm_wakeup_pending(); +@@ -227,6 +229,8 @@ static int suspend_enter(suspend_state_t + syscore_resume(); + } + ++ system_state = SYSTEM_RUNNING; ++ + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); + diff --git a/debian/patches/features/all/rt/sysctl-include-atomic-h.patch b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch new file mode 100644 index 000000000..cb3acf9bb --- /dev/null +++ b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch @@ -0,0 +1,22 @@ +Subject: sysctl-include-fix.patch +From: Thomas Gleixner +Date: Mon, 14 Nov 2011 10:52:34 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/sysctl.h | 1 + + 1 file changed, 1 insertion(+) + +Index: linux-stable/include/linux/sysctl.h +=================================================================== +--- linux-stable.orig/include/linux/sysctl.h ++++ linux-stable/include/linux/sysctl.h +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + + /* For the /proc/sys support */ diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch new file mode 100644 index 000000000..3cf28b5ee --- /dev/null +++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch @@ -0,0 +1,50 @@ +Subject: add /sys/kernel/realtime entry +From: Clark Williams +Date: Sat Jul 30 21:55:53 2011 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Add a /sys/kernel entry to indicate that the kernel is a +realtime kernel. + +Clark says that he needs this for udev rules, udev needs to evaluate +if its a PREEMPT_RT kernel a few thousand times and parsing uname +output is too slow or so. + +Are there better solutions? Should it exist and return 0 on !-rt? + +Signed-off-by: Clark Williams +Signed-off-by: Peter Zijlstra +--- + kernel/ksysfs.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +Index: linux-stable/kernel/ksysfs.c +=================================================================== +--- linux-stable.orig/kernel/ksysfs.c ++++ linux-stable/kernel/ksysfs.c +@@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); + + #endif /* CONFIG_KEXEC */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -197,6 +206,9 @@ static struct attribute * kernel_attrs[] + &vmcoreinfo_attr.attr, + #endif + &rcu_expedited_attr.attr, ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch new file mode 100644 index 000000000..046757111 --- /dev/null +++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -0,0 +1,407 @@ +Subject: tasklet: Prevent tasklets from going into infinite spin in RT +From: Ingo Molnar +Date: Tue Nov 29 20:18:22 2011 -0500 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads, +and spinlocks turn are mutexes. But this can cause issues with +tasks disabling tasklets. A tasklet runs under ksoftirqd, and +if a tasklets are disabled with tasklet_disable(), the tasklet +count is increased. When a tasklet runs, it checks this counter +and if it is set, it adds itself back on the softirq queue and +returns. + +The problem arises in RT because ksoftirq will see that a softirq +is ready to run (the tasklet softirq just re-armed itself), and will +not sleep, but instead run the softirqs again. The tasklet softirq +will still see that the count is non-zero and will not execute +the tasklet and requeue itself on the softirq again, which will +cause ksoftirqd to run it again and again and again. + +It gets worse because ksoftirqd runs as a real-time thread. +If it preempted the task that disabled tasklets, and that task +has migration disabled, or can't run for other reasons, the tasklet +softirq will never run because the count will never be zero, and +ksoftirqd will go into an infinite loop. As an RT task, it this +becomes a big problem. + +This is a hack solution to have tasklet_disable stop tasklets, and +when a tasklet runs, instead of requeueing the tasklet softirqd +it delays it. When tasklet_enable() is called, and tasklets are +waiting, then the tasklet_enable() will kick the tasklets to continue. +This prevents the lock up from ksoftirq going into an infinite loop. + +[ rostedt@goodmis.org: ported to 3.0-rt ] + +Signed-off-by: Ingo Molnar +Signed-off-by: Steven Rostedt +Signed-off-by: Thomas Gleixner + +--- + include/linux/interrupt.h | 39 ++++---- + kernel/softirq.c | 208 +++++++++++++++++++++++++++++++++------------- + 2 files changed, 170 insertions(+), 77 deletions(-) + +Index: linux-stable/include/linux/interrupt.h +=================================================================== +--- linux-stable.orig/include/linux/interrupt.h ++++ linux-stable/include/linux/interrupt.h +@@ -502,8 +502,9 @@ extern void __send_remote_softirq(struct + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -528,27 +529,36 @@ struct tasklet_struct name = { NULL, 0, + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING /* Tasklet is pending */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) ++{ ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++} ++ + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_clear_bit(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); ++ + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -597,17 +607,8 @@ static inline void tasklet_disable(struc + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic_dec(); +- atomic_dec(&t->count); +-} +- +-static inline void tasklet_hi_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic_dec(); +- atomic_dec(&t->count); +-} ++extern void tasklet_enable(struct tasklet_struct *t); ++extern void tasklet_hi_enable(struct tasklet_struct *t); + + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +Index: linux-stable/kernel/softirq.c +=================================================================== +--- linux-stable.orig/kernel/softirq.c ++++ linux-stable/kernel/softirq.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -662,15 +663,45 @@ struct tasklet_head + static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); + static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); + ++static void inline ++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) ++{ ++ if (tasklet_trylock(t)) { ++again: ++ /* We may have been preempted before tasklet_trylock ++ * and __tasklet_action may have already run. ++ * So double check the sched bit while the takslet ++ * is locked before adding it to the list. ++ */ ++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { ++ t->next = NULL; ++ *head->tail = t; ++ head->tail = &(t->next); ++ raise_softirq_irqoff(nr); ++ tasklet_unlock(t); ++ } else { ++ /* This is subtle. If we hit the corner case above ++ * It is possible that we get preempted right here, ++ * and another task has successfully called ++ * tasklet_schedule(), then this function, and ++ * failed on the trylock. Thus we must be sure ++ * before releasing the tasklet lock, that the ++ * SCHED_BIT is clear. Otherwise the tasklet ++ * may get its SCHED_BIT set, but not added to the ++ * list ++ */ ++ if (!tasklet_tryunlock(t)) ++ goto again; ++ } ++ } ++} ++ + void __tasklet_schedule(struct tasklet_struct *t) + { + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- raise_softirq_irqoff(TASKLET_SOFTIRQ); ++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ); + local_irq_restore(flags); + } + +@@ -681,10 +712,7 @@ void __tasklet_hi_schedule(struct taskle + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ); + local_irq_restore(flags); + } + +@@ -692,50 +720,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); + + void __tasklet_hi_schedule_first(struct tasklet_struct *t) + { +- BUG_ON(!irqs_disabled()); +- +- t->next = __this_cpu_read(tasklet_hi_vec.head); +- __this_cpu_write(tasklet_hi_vec.head, t); +- __raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_hi_schedule(t); + } + + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++void tasklet_enable(struct tasklet_struct *t) + { +- struct tasklet_struct *list; ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_schedule(t); ++} + +- local_irq_disable(); +- list = __this_cpu_read(tasklet_vec.head); +- __this_cpu_write(tasklet_vec.head, NULL); +- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); +- local_irq_enable(); ++EXPORT_SYMBOL(tasklet_enable); ++ ++void tasklet_hi_enable(struct tasklet_struct *t) ++{ ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_hi_schedule(t); ++} ++ ++EXPORT_SYMBOL(tasklet_hi_enable); ++ ++static void ++__tasklet_action(struct softirq_action *a, struct tasklet_struct *list) ++{ ++ int loops = 1000000; + + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); ++ /* ++ * Should always succeed - after a tasklist got on the ++ * list (after getting the SCHED bit set from 0 to 1), ++ * nothing but the tasklet softirq it got queued to can ++ * lock it: ++ */ ++ if (!tasklet_trylock(t)) { ++ WARN_ON(1); ++ continue; + } + +- local_irq_disable(); + t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- __raise_softirq_irqoff(TASKLET_SOFTIRQ); +- local_irq_enable(); ++ ++ /* ++ * If we cannot handle the tasklet because it's disabled, ++ * mark it as pending. tasklet_enable() will later ++ * re-schedule the tasklet. ++ */ ++ if (unlikely(atomic_read(&t->count))) { ++out_disabled: ++ /* implicit unlock: */ ++ wmb(); ++ t->state = TASKLET_STATEF_PENDING; ++ continue; ++ } ++ ++ /* ++ * After this point on the tasklet might be rescheduled ++ * on another CPU, but it can only be added to another ++ * CPU's tasklet list if we unlock the tasklet (which we ++ * dont do yet). ++ */ ++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ WARN_ON(1); ++ ++again: ++ t->func(t->data); ++ ++ /* ++ * Try to unlock the tasklet. We must use cmpxchg, because ++ * another CPU might have scheduled or disabled the tasklet. ++ * We only allow the STATE_RUN -> 0 transition here. ++ */ ++ while (!tasklet_tryunlock(t)) { ++ /* ++ * If it got disabled meanwhile, bail out: ++ */ ++ if (atomic_read(&t->count)) ++ goto out_disabled; ++ /* ++ * If it got scheduled meanwhile, re-execute ++ * the tasklet function: ++ */ ++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ goto again; ++ if (!--loops) { ++ printk("hm, tasklet state: %08lx\n", t->state); ++ WARN_ON(1); ++ tasklet_unlock(t); ++ break; ++ } ++ } + } + } + ++static void tasklet_action(struct softirq_action *a) ++{ ++ struct tasklet_struct *list; ++ ++ local_irq_disable(); ++ list = __get_cpu_var(tasklet_vec).head; ++ __get_cpu_var(tasklet_vec).head = NULL; ++ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; ++ local_irq_enable(); ++ ++ __tasklet_action(a, list); ++} ++ + static void tasklet_hi_action(struct softirq_action *a) + { + struct tasklet_struct *list; +@@ -746,29 +843,7 @@ static void tasklet_hi_action(struct sof + __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); + local_irq_enable(); + +- while (list) { +- struct tasklet_struct *t = list; +- +- list = list->next; +- +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); +- } +- +- local_irq_disable(); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- __raise_softirq_irqoff(HI_SOFTIRQ); +- local_irq_enable(); +- } ++ __tasklet_action(a, list); + } + + +@@ -791,7 +866,7 @@ void tasklet_kill(struct tasklet_struct + + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + do { +- yield(); ++ msleep(1); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } + tasklet_unlock_wait(t); +@@ -995,6 +1070,23 @@ void __init softirq_init(void) + open_softirq(HI_SOFTIRQ, tasklet_hi_action); + } + ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++void tasklet_unlock_wait(struct tasklet_struct *t) ++{ ++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { ++ /* ++ * Hack for now to avoid this busy-loop: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ msleep(1); ++#else ++ barrier(); ++#endif ++ } ++} ++EXPORT_SYMBOL(tasklet_unlock_wait); ++#endif ++ + static int ksoftirqd_should_run(unsigned int cpu) + { + return local_softirq_pending(); diff --git a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch new file mode 100644 index 000000000..d1a99f1f9 --- /dev/null +++ b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch @@ -0,0 +1,71 @@ +Subject: rwlocks: Fix section mismatch +From: John Kacur +Date: Mon, 19 Sep 2011 11:09:27 +0200 (CEST) +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +This fixes the following build error for the preempt-rt kernel. + +make kernel/fork.o + CC kernel/fork.o +kernel/fork.c:90: error: section of ¡tasklist_lock¢ conflicts with previous declaration +make[2]: *** [kernel/fork.o] Error 1 +make[1]: *** [kernel/fork.o] Error 2 + +The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default. +The non-rt kernels explicitly cache align only the tasklist_lock in +kernel/fork.c +That can create a build conflict. This fixes the build problem by making the +non-rt kernels cache align RWLOCKs by default. The side effect is that +the other RWLOCKs are also cache aligned for non-rt. + +This is a short term solution for rt only. +The longer term solution would be to push the cache aligned DEFINE_RWLOCK +to mainline. If there are objections, then we could create a +DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature. + +Comments? Objections? + +Signed-off-by: John Kacur +Cc: Peter Zijlstra +Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6 +Signed-off-by: Thomas Gleixner +--- + include/linux/rwlock_types.h | 3 ++- + kernel/fork.c | 4 ++-- + 2 files changed, 4 insertions(+), 3 deletions(-) + +Index: linux-stable/include/linux/rwlock_types.h +=================================================================== +--- linux-stable.orig/include/linux/rwlock_types.h ++++ linux-stable/include/linux/rwlock_types.h +@@ -47,6 +47,7 @@ typedef struct { + RW_DEP_MAP_INIT(lockname) } + #endif + +-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + + #endif /* __LINUX_RWLOCK_TYPES_H */ +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -94,7 +94,7 @@ int max_threads; /* tunable limit on nr + + DEFINE_PER_CPU(unsigned long, process_counts) = 0; + +-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ ++DEFINE_RWLOCK(tasklist_lock); /* outer */ + + #ifdef CONFIG_PROVE_RCU + int lockdep_tasklist_lock_is_held(void) +@@ -1693,7 +1693,7 @@ SYSCALL_DEFINE0(fork) + #ifdef __ARCH_WANT_SYS_VFORK + SYSCALL_DEFINE0(vfork) + { +- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, ++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, + 0, NULL, NULL); + } + #endif diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch new file mode 100644 index 000000000..98d1fed97 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch @@ -0,0 +1,159 @@ +Subject: timekeeping-split-jiffies-lock.patch +From: Thomas Gleixner +Date: Thu, 14 Feb 2013 22:36:59 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/time/jiffies.c | 7 ++++--- + kernel/time/tick-common.c | 10 ++++++---- + kernel/time/tick-internal.h | 3 ++- + kernel/time/tick-sched.c | 16 ++++++++++------ + kernel/time/timekeeping.c | 6 ++++-- + 5 files changed, 26 insertions(+), 16 deletions(-) + +Index: linux-stable/kernel/time/jiffies.c +=================================================================== +--- linux-stable.orig/kernel/time/jiffies.c ++++ linux-stable/kernel/time/jiffies.c +@@ -67,7 +67,8 @@ static struct clocksource clocksource_ji + .shift = JIFFIES_SHIFT, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -76,9 +77,9 @@ u64 get_jiffies_64(void) + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +Index: linux-stable/kernel/time/tick-common.c +=================================================================== +--- linux-stable.orig/kernel/time/tick-common.c ++++ linux-stable/kernel/time/tick-common.c +@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void) + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } + + update_process_times(user_mode(get_irq_regs())); +@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + +Index: linux-stable/kernel/time/tick-internal.h +=================================================================== +--- linux-stable.orig/kernel/time/tick-internal.h ++++ linux-stable/kernel/time/tick-internal.h +@@ -4,7 +4,8 @@ + #include + #include + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD + +Index: linux-stable/kernel/time/tick-sched.c +=================================================================== +--- linux-stable.orig/kernel/time/tick-sched.c ++++ linux-stable/kernel/time/tick-sched.c +@@ -61,7 +61,8 @@ static void tick_do_update_jiffies64(kti + return; + + /* Reevalute with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -84,7 +85,8 @@ static void tick_do_update_jiffies64(kti + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } + + /* +@@ -94,12 +96,14 @@ static ktime_t tick_init_jiffy_update(vo + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -547,11 +551,11 @@ static ktime_t tick_nohz_stop_sched_tick + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + last_update = last_jiffies_update; + last_jiffies = jiffies; + time_delta = timekeeping_max_deferment(); +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || + arch_needs_cpu(cpu) || irq_work_needs_cpu()) { +Index: linux-stable/kernel/time/timekeeping.c +=================================================================== +--- linux-stable.orig/kernel/time/timekeeping.c ++++ linux-stable/kernel/time/timekeeping.c +@@ -1712,7 +1712,9 @@ EXPORT_SYMBOL(hardpps); + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch new file mode 100644 index 000000000..96487b88c --- /dev/null +++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch @@ -0,0 +1,78 @@ +From: Peter Zijlstra +Date: Fri, 21 Aug 2009 11:56:45 +0200 +Subject: timer: delay waking softirqs from the jiffy tick +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +People were complaining about broken balancing with the recent -rt +series. + +A look at /proc/sched_debug yielded: + +cpu#0, 2393.874 MHz + .nr_running : 0 + .load : 0 + .cpu_load[0] : 177522 + .cpu_load[1] : 177522 + .cpu_load[2] : 177522 + .cpu_load[3] : 177522 + .cpu_load[4] : 177522 +cpu#1, 2393.874 MHz + .nr_running : 4 + .load : 4096 + .cpu_load[0] : 181618 + .cpu_load[1] : 180850 + .cpu_load[2] : 180274 + .cpu_load[3] : 179938 + .cpu_load[4] : 179758 + +Which indicated the cpu_load computation was hosed, the 177522 value +indicates that there is one RT task runnable. Initially I thought the +old problem of calculating the cpu_load from a softirq had re-surfaced, +however looking at the code shows its being done from scheduler_tick(). + +[ we really should fix this RT/cfs interaction some day... ] + +A few trace_printk()s later: + + sirq-timer/1-19 [001] 174.289744: 19: 50:S ==> [001] 0:140:R + -0 [001] 174.290724: enqueue_task_rt: adding task: 19/sirq-timer/1 with load: 177522 + -0 [001] 174.290725: 0:140:R + [001] 19: 50:S sirq-timer/1 + -0 [001] 174.290730: scheduler_tick: current load: 177522 + -0 [001] 174.290732: scheduler_tick: current: 0/swapper + -0 [001] 174.290736: 0:140:R ==> [001] 19: 50:R sirq-timer/1 + sirq-timer/1-19 [001] 174.290741: dequeue_task_rt: removing task: 19/sirq-timer/1 with load: 177522 + sirq-timer/1-19 [001] 174.290743: 19: 50:S ==> [001] 0:140:R + +We see that we always raise the timer softirq before doing the load +calculation. Avoid this by re-ordering the scheduler_tick() call in +update_process_times() to occur before we deal with timers. + +This lowers the load back to sanity and restores regular load-balancing +behaviour. + +Signed-off-by: Peter Zijlstra +Signed-off-by: Thomas Gleixner + +--- + kernel/timer.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -1393,13 +1393,13 @@ void update_process_times(int user_tick) + + /* Note: this timer irq context must be accounted for as well. */ + account_process_tick(p, user_tick); ++ scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(cpu, user_tick); + #ifdef CONFIG_IRQ_WORK + if (in_irq()) + irq_work_run(); + #endif +- scheduler_tick(); + run_posix_cpu_timers(p); + } + diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch new file mode 100644 index 000000000..e40ba8c58 --- /dev/null +++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch @@ -0,0 +1,30 @@ +Subject: timer-fd: Prevent live lock +From: Thomas Gleixner +Date: Wed, 25 Jan 2012 11:08:40 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +If hrtimer_try_to_cancel() requires a retry, then depending on the +priority setting te retry loop might prevent timer callback completion +on RT. Prevent that by waiting for completion on RT, no change for a +non RT kernel. + +Reported-by: Sankara Muthukrishnan +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + fs/timerfd.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: linux-stable/fs/timerfd.c +=================================================================== +--- linux-stable.orig/fs/timerfd.c ++++ linux-stable/fs/timerfd.c +@@ -308,7 +308,7 @@ static int do_timerfd_settime(int ufd, i + if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) + break; + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ hrtimer_wait_for_timer(&ctx->tmr); + } + + /* diff --git a/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch new file mode 100644 index 000000000..d8735fc2a --- /dev/null +++ b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch @@ -0,0 +1,85 @@ +Subject: timer-handle-idle-trylock-in-get-next-timer-irq.patch +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 22:08:38 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/spinlock_rt.h | 12 +++++++++++- + kernel/rtmutex.c | 7 +------ + kernel/timer.c | 9 +++++++-- + 3 files changed, 19 insertions(+), 9 deletions(-) + +Index: linux-stable/include/linux/spinlock_rt.h +=================================================================== +--- linux-stable.orig/include/linux/spinlock_rt.h ++++ linux-stable/include/linux/spinlock_rt.h +@@ -53,7 +53,17 @@ extern void __lockfunc __rt_spin_unlock( + + #define spin_lock_irq(lock) spin_lock(lock) + +-#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ migrate_disable(); \ ++ __locked = spin_do_trylock(lock); \ ++ if (!__locked) \ ++ migrate_enable(); \ ++ __locked; \ ++}) + + #ifdef CONFIG_LOCKDEP + # define spin_lock_nested(lock, subclass) \ +Index: linux-stable/kernel/rtmutex.c +=================================================================== +--- linux-stable.orig/kernel/rtmutex.c ++++ linux-stable/kernel/rtmutex.c +@@ -862,15 +862,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait); + + int __lockfunc rt_spin_trylock(spinlock_t *lock) + { +- int ret; ++ int ret = rt_mutex_trylock(&lock->lock); + +- migrate_disable(); +- ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +- else +- migrate_enable(); +- + return ret; + } + EXPORT_SYMBOL(rt_spin_trylock); +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -1386,9 +1386,10 @@ unsigned long get_next_timer_interrupt(u + /* + * On PREEMPT_RT we cannot sleep here. If the trylock does not + * succeed then we return the worst-case 'expires in 1 tick' +- * value: ++ * value. We use the rt functions here directly to avoid a ++ * migrate_disable() call. + */ +- if (!spin_trylock(&base->lock)) ++ if (!spin_do_trylock(&base->lock)) + return now + 1; + #else + spin_lock(&base->lock); +@@ -1398,7 +1399,11 @@ unsigned long get_next_timer_interrupt(u + base->next_timer = __next_timer_interrupt(base); + expires = base->next_timer; + } ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rt_spin_unlock(&base->lock); ++#else + spin_unlock(&base->lock); ++#endif + + if (time_before_eq(expires, now)) + return now; diff --git a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch new file mode 100644 index 000000000..2b884ed8c --- /dev/null +++ b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch @@ -0,0 +1,71 @@ +Subject: timers: Avoid the switch timers base set to NULL trick on RT +From: Thomas Gleixner +Date: Thu, 21 Jul 2011 15:23:39 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +On RT that code is preemptible, so we cannot assign NULL to timers +base as a preempter would spin forever in lock_timer_base(). + +Signed-off-by: Thomas Gleixner +--- + kernel/timer.c | 40 ++++++++++++++++++++++++++++++++-------- + 1 file changed, 32 insertions(+), 8 deletions(-) + +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -723,6 +723,36 @@ static struct tvec_base *lock_timer_base + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* See the comment in lock_timer_base() */ ++ timer_set_base(timer, NULL); ++ spin_unlock(&old->lock); ++ spin_lock(&new->lock); ++ timer_set_base(timer, new); ++ return new; ++} ++#else ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* ++ * We cannot do the above because we might be preempted and ++ * then the preempter would see NULL and loop forever. ++ */ ++ if (spin_trylock(&new->lock)) { ++ timer_set_base(timer, new); ++ spin_unlock(&old->lock); ++ return new; ++ } ++ return old; ++} ++#endif ++ + static inline int + __mod_timer(struct timer_list *timer, unsigned long expires, + bool pending_only, int pinned) +@@ -761,14 +791,8 @@ __mod_timer(struct timer_list *timer, un + * handler yet has not finished. This also guarantees that + * the timer is serialized wrt itself. + */ +- if (likely(base->running_timer != timer)) { +- /* See the comment in lock_timer_base() */ +- timer_set_base(timer, NULL); +- spin_unlock(&base->lock); +- base = new_base; +- spin_lock(&base->lock); +- timer_set_base(timer, base); +- } ++ if (likely(base->running_timer != timer)) ++ base = switch_timer_base(timer, base, new_base); + } + + timer->expires = expires; diff --git a/debian/patches/features/all/rt/timers-preempt-rt-support.patch b/debian/patches/features/all/rt/timers-preempt-rt-support.patch new file mode 100644 index 000000000..e7ab260d0 --- /dev/null +++ b/debian/patches/features/all/rt/timers-preempt-rt-support.patch @@ -0,0 +1,59 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:30:20 -0500 +Subject: timers: preempt-rt support +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + kernel/timer.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -1358,7 +1358,17 @@ unsigned long get_next_timer_interrupt(u + if (cpu_is_offline(smp_processor_id())) + return expires; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * On PREEMPT_RT we cannot sleep here. If the trylock does not ++ * succeed then we return the worst-case 'expires in 1 tick' ++ * value: ++ */ ++ if (!spin_trylock(&base->lock)) ++ return now + 1; ++#else + spin_lock(&base->lock); ++#endif + if (base->active_timers) { + if (time_before_eq(base->next_timer, base->timer_jiffies)) + base->next_timer = __next_timer_interrupt(base); +@@ -1368,7 +1378,6 @@ unsigned long get_next_timer_interrupt(u + + if (time_before_eq(expires, now)) + return now; +- + return cmp_next_hrtimer_event(now, expires); + } + #endif +@@ -1619,7 +1628,7 @@ static void __cpuinit migrate_timers(int + + BUG_ON(cpu_online(cpu)); + old_base = per_cpu(tvec_bases, cpu); +- new_base = get_cpu_var(tvec_bases); ++ new_base = get_local_var(tvec_bases); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. +@@ -1640,7 +1649,7 @@ static void __cpuinit migrate_timers(int + + spin_unlock(&old_base->lock); + spin_unlock_irq(&new_base->lock); +- put_cpu_var(tvec_bases); ++ put_local_var(tvec_bases); + } + #endif /* CONFIG_HOTPLUG_CPU */ + diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch new file mode 100644 index 000000000..11bc1dab6 --- /dev/null +++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption-improve.patch @@ -0,0 +1,59 @@ +From a57194f115acfc967aa0907bc130e95b68723121 Mon Sep 17 00:00:00 2001 +From: Zhao Hongjiang +Date: Wed, 17 Apr 2013 17:44:16 +0800 +Subject: [PATCH] timers: prepare for full preemption improve +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +wake_up should do nothing on the nort, so we should use wakeup_timer_waiters, +also fix a spell mistake. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Zhao Hongjiang +[bigeasy: s/CONFIG_PREEMPT_RT_BASE/CONFIG_PREEMPT_RT_FULL/] +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/timer.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -78,7 +78,9 @@ struct tvec_root { + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++#ifdef CONFIG_PREEMPT_RT_FULL + wait_queue_head_t wait_for_running_timer; ++#endif + unsigned long timer_jiffies; + unsigned long next_timer; + unsigned long active_timers; +@@ -962,7 +964,7 @@ static void wait_for_running_timer(struc + base->running_timer != timer); + } + +-# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) + #else + static inline void wait_for_running_timer(struct timer_list *timer) + { +@@ -1216,7 +1218,7 @@ static inline void __run_timers(struct t + } + } + } +- wake_up(&base->wait_for_running_timer); ++ wakeup_timer_waiters(base); + spin_unlock_irq(&base->lock); + } + +@@ -1576,7 +1578,9 @@ static int __cpuinit init_timers_cpu(int + base = per_cpu(tvec_bases, cpu); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL + init_waitqueue_head(&base->wait_for_running_timer); ++#endif + + for (j = 0; j < TVN_SIZE; j++) { + INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch new file mode 100644 index 000000000..c24b1ae6f --- /dev/null +++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch @@ -0,0 +1,133 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:34 -0500 +Subject: timers: prepare for full preemption +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When softirqs can be preempted we need to make sure that cancelling +the timer from the active thread can not deadlock vs. a running timer +callback. Add a waitqueue to resolve that. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + include/linux/timer.h | 2 +- + kernel/timer.c | 36 +++++++++++++++++++++++++++++++++--- + 2 files changed, 34 insertions(+), 4 deletions(-) + +Index: linux-stable/include/linux/timer.h +=================================================================== +--- linux-stable.orig/include/linux/timer.h ++++ linux-stable/include/linux/timer.h +@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +Index: linux-stable/kernel/timer.c +=================================================================== +--- linux-stable.orig/kernel/timer.c ++++ linux-stable/kernel/timer.c +@@ -78,6 +78,7 @@ struct tvec_root { + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++ wait_queue_head_t wait_for_running_timer; + unsigned long timer_jiffies; + unsigned long next_timer; + unsigned long active_timers; +@@ -739,12 +740,15 @@ __mod_timer(struct timer_list *timer, un + + debug_activate(timer, expires); + ++ preempt_disable_rt(); + cpu = smp_processor_id(); + + #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) + if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) + cpu = get_nohz_timer_target(); + #endif ++ preempt_enable_rt(); ++ + new_base = per_cpu(tvec_bases, cpu); + + if (base != new_base) { +@@ -945,6 +949,29 @@ void add_timer_on(struct timer_list *tim + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct tvec_base *base = timer->base; ++ ++ if (base->running_timer == timer) ++ wait_event(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactive a timer. + * @timer: the timer to be deactivated +@@ -1002,7 +1029,7 @@ int try_to_del_timer_sync(struct timer_l + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1062,7 +1089,7 @@ int del_timer_sync(struct timer_list *ti + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1179,15 +1206,17 @@ static inline void __run_timers(struct t + if (irqsafe) { + spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock(&base->lock); + } else { + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock_irq(&base->lock); + } + } + } +- base->running_timer = NULL; ++ wake_up(&base->wait_for_running_timer); + spin_unlock_irq(&base->lock); + } + +@@ -1547,6 +1576,7 @@ static int __cpuinit init_timers_cpu(int + base = per_cpu(tvec_bases, cpu); + } + ++ init_waitqueue_head(&base->wait_for_running_timer); + + for (j = 0; j < TVN_SIZE; j++) { + INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch new file mode 100644 index 000000000..aa2db1372 --- /dev/null +++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -0,0 +1,49 @@ +From: Steven Rostedt +Date: Thu, 29 Sep 2011 12:24:30 -0500 +Subject: tracing: Account for preempt off in preempt_schedule() +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +The preempt_schedule() uses the preempt_disable_notrace() version +because it can cause infinite recursion by the function tracer as +the function tracer uses preempt_enable_notrace() which may call +back into the preempt_schedule() code as the NEED_RESCHED is still +set and the PREEMPT_ACTIVE has not been set yet. + +See commit: d1f74e20b5b064a130cd0743a256c2d3cfe84010 that made this +change. + +The preemptoff and preemptirqsoff latency tracers require the first +and last preempt count modifiers to enable tracing. But this skips +the checks. Since we can not convert them back to the non notrace +version, we can use the idle() hooks for the latency tracers here. +That is, the start/stop_critical_timings() works well to manually +start and stop the latency tracer for preempt off timings. + +Signed-off-by: Steven Rostedt +Signed-off-by: Clark Williams +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -3093,7 +3093,16 @@ asmlinkage void __sched notrace preempt_ + + do { + add_preempt_count_notrace(PREEMPT_ACTIVE); ++ /* ++ * The add/subtract must not be traced by the function ++ * tracer. But we still want to account for the ++ * preempt off latency tracer. Since the _notrace versions ++ * of add/subtract skip the accounting for latency tracer ++ * we must force it manually. ++ */ ++ start_critical_timings(); + __schedule(); ++ stop_critical_timings(); + sub_preempt_count_notrace(PREEMPT_ACTIVE); + + /* diff --git a/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch new file mode 100644 index 000000000..273ebdab3 --- /dev/null +++ b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch @@ -0,0 +1,84 @@ +From db7ae440c333156392bc56badc610469a4d522ae Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 8 Apr 2013 16:09:57 +0200 +Subject: [PATCH] kernel/treercu: use a simple waitqueue +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/rcutree.c | 13 +++++++------ + kernel/rcutree.h | 2 +- + 2 files changed, 8 insertions(+), 7 deletions(-) + +Index: linux-stable/kernel/rcutree.c +=================================================================== +--- linux-stable.orig/kernel/rcutree.c ++++ linux-stable/kernel/rcutree.c +@@ -1572,7 +1572,7 @@ static int __noreturn rcu_gp_kthread(voi + + /* Handle grace-period start. */ + for (;;) { +- wait_event_interruptible(rsp->gp_wq, ++ swait_event_interruptible(rsp->gp_wq, + rsp->gp_flags & + RCU_GP_FLAG_INIT); + if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && +@@ -1591,7 +1591,7 @@ static int __noreturn rcu_gp_kthread(voi + } + for (;;) { + rsp->jiffies_force_qs = jiffies + j; +- ret = wait_event_interruptible_timeout(rsp->gp_wq, ++ ret = swait_event_interruptible_timeout(rsp->gp_wq, + (rsp->gp_flags & RCU_GP_FLAG_FQS) || + (!ACCESS_ONCE(rnp->qsmask) && + !rcu_preempt_blocked_readers_cgp(rnp)), +@@ -1629,7 +1629,7 @@ static void rsp_wakeup(struct irq_work * + struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work); + + /* Wake up rcu_gp_kthread() to start the grace period. */ +- wake_up(&rsp->gp_wq); ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -1701,7 +1701,7 @@ static void rcu_report_qs_rsp(struct rcu + { + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + } + + /* +@@ -2266,7 +2266,8 @@ static void force_quiescent_state(struct + } + rsp->gp_flags |= RCU_GP_FLAG_FQS; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -3263,7 +3264,7 @@ static void __init rcu_init_one(struct r + } + + rsp->rda = rda; +- init_waitqueue_head(&rsp->gp_wq); ++ init_swait_head(&rsp->gp_wq); + init_irq_work(&rsp->wakeup_work, rsp_wakeup); + rnp = rsp->level[rcu_num_lvls - 1]; + for_each_possible_cpu(i) { +Index: linux-stable/kernel/rcutree.h +=================================================================== +--- linux-stable.orig/kernel/rcutree.h ++++ linux-stable/kernel/rcutree.h +@@ -388,7 +388,7 @@ struct rcu_state { + unsigned long gpnum; /* Current gp number. */ + unsigned long completed; /* # of last completed gp. */ + struct task_struct *gp_kthread; /* Task for grace periods. */ +- wait_queue_head_t gp_wq; /* Where GP task waits. */ ++ struct swait_head gp_wq; /* Where GP task waits. */ + int gp_flags; /* Commands for GP task. */ + + /* End of fields guarded by root rcu_node's lock. */ diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch new file mode 100644 index 000000000..91050f5bb --- /dev/null +++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -0,0 +1,68 @@ +Subject: [UPSTREAM]net,RT:REmove preemption disabling in netif_rx() +From: Priyanka Jain +Date: Thu, 17 May 2012 09:35:11 +0530 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +1)enqueue_to_backlog() (called from netif_rx) should be + bind to a particluar CPU. This can be achieved by + disabling migration. No need to disable preemption + +2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" + in case of RT. + If preemption is disabled, enqueue_to_backog() is called + in atomic context. And if backlog exceeds its count, + kfree_skb() is called. But in RT, kfree_skb() might + gets scheduled out, so it expects non atomic context. + +3)When CONFIG_PREEMPT_RT_FULL is not defined, + migrate_enable(), migrate_disable() maps to + preempt_enable() and preempt_disable(), so no + change in functionality in case of non-RT. + +-Replace preempt_enable(), preempt_disable() with + migrate_enable(), migrate_disable() respectively +-Replace get_cpu(), put_cpu() with get_cpu_light(), + put_cpu_light() respectively + +Signed-off-by: Priyanka Jain +Acked-by: Rajan Srivastava +Cc: +Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + Testing: Tested successfully on p4080ds(8-core SMP system) + + net/core/dev.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +Index: linux-stable/net/core/dev.c +=================================================================== +--- linux-stable.orig/net/core/dev.c ++++ linux-stable/net/core/dev.c +@@ -3178,7 +3178,7 @@ int netif_rx(struct sk_buff *skb) + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -3188,13 +3188,13 @@ int netif_rx(struct sk_buff *skb) + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } diff --git a/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch new file mode 100644 index 000000000..f3c81ddd8 --- /dev/null +++ b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch @@ -0,0 +1,39 @@ +From: Wu Zhangjin +Date: Mon, 4 Jan 2010 11:33:02 +0800 +Subject: USB: Fix the mouse problem when copying large amounts of data +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +When copying large amounts of data between the USB storage devices and +the hard disk, the USB mouse will not work, this patch fixes it. + +[NOTE: This problem have been found in the Loongson family machines, not +sure whether it is producible on other platforms] + +Signed-off-by: Hu Hongbing +Signed-off-by: Wu Zhangjin + +--- + drivers/usb/host/ohci-hcd.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +Index: linux-stable/drivers/usb/host/ohci-hcd.c +=================================================================== +--- linux-stable.orig/drivers/usb/host/ohci-hcd.c ++++ linux-stable/drivers/usb/host/ohci-hcd.c +@@ -861,9 +861,13 @@ static irqreturn_t ohci_irq (struct usb_ + } + + if (ints & OHCI_INTR_WDH) { +- spin_lock (&ohci->lock); +- dl_done_list (ohci); +- spin_unlock (&ohci->lock); ++ if (ohci->hcca->done_head == 0) { ++ ints &= ~OHCI_INTR_WDH; ++ } else { ++ spin_lock (&ohci->lock); ++ dl_done_list (ohci); ++ spin_unlock (&ohci->lock); ++ } + } + + if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) { diff --git a/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch b/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch new file mode 100644 index 000000000..4f393ff58 --- /dev/null +++ b/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch @@ -0,0 +1,37 @@ +From: Steven Rostedt +Date: Fri, 3 Jul 2009 08:44:26 -0500 +Subject: usb: Use local_irq_*_nort() variants +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +[ tglx: Now that irqf_disabled is dead we should kill that ] + +Signed-off-by: Steven Rostedt +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + drivers/usb/core/hcd.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/drivers/usb/core/hcd.c +=================================================================== +--- linux-stable.orig/drivers/usb/core/hcd.c ++++ linux-stable/drivers/usb/core/hcd.c +@@ -2221,7 +2221,7 @@ irqreturn_t usb_hcd_irq (int irq, void * + * when the first handler doesn't use it. So let's just + * assume it's never used. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) + rc = IRQ_NONE; +@@ -2230,7 +2230,7 @@ irqreturn_t usb_hcd_irq (int irq, void * + else + rc = IRQ_HANDLED; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return rc; + } + EXPORT_SYMBOL_GPL(usb_hcd_irq); diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch new file mode 100644 index 000000000..9e7360952 --- /dev/null +++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch @@ -0,0 +1,32 @@ +From: Thomas Gleixner +Date: Tue, 21 Jul 2009 23:06:05 +0200 +Subject: core: Do not disable interrupts on RT in kernel/users.c +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Use the local_irq_*_nort variants to reduce latencies in RT. The code +is serialized by the locks. No need to disable interrupts. + +Signed-off-by: Thomas Gleixner + +--- + kernel/user.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +Index: linux-stable/kernel/user.c +=================================================================== +--- linux-stable.orig/kernel/user.c ++++ linux-stable/kernel/user.c +@@ -156,11 +156,11 @@ void free_uid(struct user_struct *up) + if (!up) + return; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + struct user_struct *alloc_uid(kuid_t uid) diff --git a/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch b/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch new file mode 100644 index 000000000..a9cce6672 --- /dev/null +++ b/debian/patches/features/all/rt/vtime-split-lock-and-seqcount.patch @@ -0,0 +1,214 @@ +Subject: vtime-split-lock-and-seqcount.patch +From: Thomas Gleixner +Date: Tue, 23 Jul 2013 15:45:51 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/init_task.h | 3 +- + include/linux/sched.h | 3 +- + kernel/fork.c | 3 +- + kernel/sched/cputime.c | 62 +++++++++++++++++++++++++++++----------------- + 4 files changed, 46 insertions(+), 25 deletions(-) + +Index: linux-stable/include/linux/init_task.h +=================================================================== +--- linux-stable.orig/include/linux/init_task.h ++++ linux-stable/include/linux/init_task.h +@@ -145,7 +145,8 @@ extern struct task_group root_task_group + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ +- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ ++ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ ++ .vtime_seq = SEQCNT_ZERO, \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, + #else +Index: linux-stable/include/linux/sched.h +=================================================================== +--- linux-stable.orig/include/linux/sched.h ++++ linux-stable/include/linux/sched.h +@@ -1167,7 +1167,8 @@ struct task_struct { + struct cputime prev_cputime; + #endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_t vtime_seqlock; ++ raw_spinlock_t vtime_lock; ++ seqcount_t vtime_seq; + unsigned long long vtime_snap; + enum { + VTIME_SLEEPING = 0, +Index: linux-stable/kernel/fork.c +=================================================================== +--- linux-stable.orig/kernel/fork.c ++++ linux-stable/kernel/fork.c +@@ -1238,7 +1238,8 @@ static struct task_struct *copy_process( + p->prev_cputime.utime = p->prev_cputime.stime = 0; + #endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_init(&p->vtime_seqlock); ++ raw_spin_lock_init(&p->vtime_lock); ++ seqcount_init(&p->vtime_seq); + p->vtime_snap = 0; + p->vtime_snap_whence = VTIME_SLEEPING; + #endif +Index: linux-stable/kernel/sched/cputime.c +=================================================================== +--- linux-stable.orig/kernel/sched/cputime.c ++++ linux-stable/kernel/sched/cputime.c +@@ -668,9 +668,11 @@ void vtime_account_system(struct task_st + if (!vtime_accounting_enabled()) + return; + +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_account_irq_exit(struct task_struct *tsk) +@@ -678,11 +680,13 @@ void vtime_account_irq_exit(struct task_ + if (!vtime_accounting_enabled()) + return; + +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + if (context_tracking_in_user()) + tsk->vtime_snap_whence = VTIME_USER; + __vtime_account_system(tsk); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_account_user(struct task_struct *tsk) +@@ -694,10 +698,12 @@ void vtime_account_user(struct task_stru + + delta_cpu = get_vtime_delta(tsk); + +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + tsk->vtime_snap_whence = VTIME_SYS; + account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_user_enter(struct task_struct *tsk) +@@ -705,26 +711,32 @@ void vtime_user_enter(struct task_struct + if (!vtime_accounting_enabled()) + return; + +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + tsk->vtime_snap_whence = VTIME_USER; + __vtime_account_system(tsk); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_guest_enter(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + current->flags |= PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_guest_exit(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ raw_spin_lock(&tsk->vtime_lock); ++ write_seqcount_begin(&tsk->vtime_seq); + __vtime_account_system(tsk); + current->flags &= ~PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seq); ++ raw_spin_unlock(&tsk->vtime_lock); + } + + void vtime_account_idle(struct task_struct *tsk) +@@ -741,24 +753,30 @@ bool vtime_accounting_enabled(void) + + void arch_vtime_task_switch(struct task_struct *prev) + { +- write_seqlock(&prev->vtime_seqlock); ++ raw_spin_lock(&prev->vtime_lock); ++ write_seqcount_begin(&prev->vtime_seq); + prev->vtime_snap_whence = VTIME_SLEEPING; +- write_sequnlock(&prev->vtime_seqlock); ++ write_seqcount_end(&prev->vtime_seq); ++ raw_spin_unlock(&prev->vtime_lock); + +- write_seqlock(¤t->vtime_seqlock); ++ raw_spin_lock(¤t->vtime_lock); ++ write_seqcount_begin(¤t->vtime_seq); + current->vtime_snap_whence = VTIME_SYS; + current->vtime_snap = sched_clock_cpu(smp_processor_id()); +- write_sequnlock(¤t->vtime_seqlock); ++ write_seqcount_end(¤t->vtime_seq); ++ raw_spin_unlock(¤t->vtime_lock); + } + + void vtime_init_idle(struct task_struct *t, int cpu) + { + unsigned long flags; + +- write_seqlock_irqsave(&t->vtime_seqlock, flags); ++ raw_spin_lock_irqsave(&t->vtime_lock, flags); ++ write_seqcount_begin(&t->vtime_seq); + t->vtime_snap_whence = VTIME_SYS; + t->vtime_snap = sched_clock_cpu(cpu); +- write_sequnlock_irqrestore(&t->vtime_seqlock, flags); ++ write_seqcount_end(&t->vtime_seq); ++ raw_spin_unlock_irqrestore(&t->vtime_lock, flags); + } + + cputime_t task_gtime(struct task_struct *t) +@@ -767,13 +785,13 @@ cputime_t task_gtime(struct task_struct + cputime_t gtime; + + do { +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seq); + + gtime = t->gtime; + if (t->flags & PF_VCPU) + gtime += vtime_delta(t); + +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seq, seq)); + + return gtime; + } +@@ -796,7 +814,7 @@ fetch_task_cputime(struct task_struct *t + *udelta = 0; + *sdelta = 0; + +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seq); + + if (u_dst) + *u_dst = *u_src; +@@ -820,7 +838,7 @@ fetch_task_cputime(struct task_struct *t + if (t->vtime_snap_whence == VTIME_SYS) + *sdelta = delta; + } +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seq, seq)); + } + + diff --git a/debian/patches/features/all/rt/wait-simple-implementation.patch b/debian/patches/features/all/rt/wait-simple-implementation.patch new file mode 100644 index 000000000..ac2a280d8 --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-implementation.patch @@ -0,0 +1,344 @@ +From: Thomas Gleixner +Date: Mon Dec 12 12:29:04 2011 +0100 +Subject: wait-simple: Simple waitqueue implementation + +wait_queue is a swiss army knife and in most of the cases the +complexity is not needed. For RT waitqueues are a constant source of +trouble as we can't convert the head lock to a raw spinlock due to +fancy and long lasting callbacks. + +Provide a slim version, which allows RT to replace wait queues. This +should go mainline as well, as it lowers memory consumption and +runtime overhead. + +Signed-off-by: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +--- + include/linux/wait-simple.h | 231 ++++++++++++++++++++++++++++++++++++++++++++ + kernel/Makefile | 2 + kernel/wait-simple.c | 68 ++++++++++++ + 3 files changed, 300 insertions(+), 1 deletion(-) + +Index: linux-stable/include/linux/wait-simple.h +=================================================================== +--- /dev/null ++++ linux-stable/include/linux/wait-simple.h +@@ -0,0 +1,231 @@ ++#ifndef _LINUX_WAIT_SIMPLE_H ++#define _LINUX_WAIT_SIMPLE_H ++ ++#include ++#include ++ ++#include ++ ++struct swaiter { ++ struct task_struct *task; ++ struct list_head node; ++}; ++ ++#define DEFINE_SWAITER(name) \ ++ struct swaiter name = { \ ++ .task = current, \ ++ .node = LIST_HEAD_INIT((name).node), \ ++ } ++ ++struct swait_head { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++ ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = { \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ ++ .list = LIST_HEAD_INIT((name).list), \ ++ } ++ ++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); ++ ++#define init_swait_head(swh) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_swait_head((swh), &__key); \ ++ } while (0) ++ ++/* ++ * Waiter functions ++ */ ++static inline bool swaiter_enqueued(struct swaiter *w) ++{ ++ return w->task != NULL; ++} ++ ++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish(struct swait_head *head, struct swaiter *w); ++ ++/* ++ * Adds w to head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++} ++ ++/* ++ * Removes w from head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* ++ * Check whether a head has waiters enqueued ++ */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ return !list_empty(&h->list); ++} ++ ++/* ++ * Wakeup functions ++ */ ++extern int __swait_wake(struct swait_head *head, unsigned int state); ++ ++static inline int swait_wake(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_NORMAL) : 0; ++} ++ ++static inline int swait_wake_interruptible(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_INTERRUPTIBLE) : 0; ++} ++ ++/* ++ * Event API ++ */ ++ ++#define __swait_event(wq, condition) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event(wq, condition) \ ++do { \ ++ if (condition) \ ++ break; \ ++ __swait_event(wq, condition); \ ++} while (0) ++ ++#define __swait_event_interruptible(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++#define __swait_event_interruptible_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_interruptible - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event_interruptible(wq, condition) \ ++({ \ ++ int __ret = 0; \ ++ if (!(condition)) \ ++ __swait_event_interruptible(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define swait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ int __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_interruptible_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define __swait_event_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * @timeout: timeout, in jiffies ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ * ++ * The function returns 0 if the @timeout elapsed, and the remaining ++ * jiffies if the condition evaluated to true before the timeout elapsed. ++ */ ++#define swait_event_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#endif +Index: linux-stable/kernel/Makefile +=================================================================== +--- linux-stable.orig/kernel/Makefile ++++ linux-stable/kernel/Makefile +@@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o + kthread.o wait.o sys_ni.o posix-cpu-timers.o \ + hrtimer.o nsproxy.o srcu.o semaphore.o \ + notifier.o ksysfs.o cred.o \ +- async.o range.o groups.o lglock.o smpboot.o ++ async.o range.o groups.o lglock.o smpboot.o wait-simple.o + + ifdef CONFIG_FUNCTION_TRACER + # Do not trace debug files and internal ftrace files +Index: linux-stable/kernel/wait-simple.c +=================================================================== +--- /dev/null ++++ linux-stable/kernel/wait-simple.c +@@ -0,0 +1,68 @@ ++/* ++ * Simple waitqueues without fancy flags and callbacks ++ * ++ * (C) 2011 Thomas Gleixner ++ * ++ * Based on kernel/wait.c ++ * ++ * For licencing details see kernel-base/COPYING ++ */ ++#include ++#include ++#include ++#include ++ ++void __init_swait_head(struct swait_head *head, struct lock_class_key *key) ++{ ++ raw_spin_lock_init(&head->lock); ++ lockdep_set_class(&head->lock, key); ++ INIT_LIST_HEAD(&head->list); ++} ++EXPORT_SYMBOL(__init_swait_head); ++ ++void swait_prepare(struct swait_head *head, struct swaiter *w, int state) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++ set_current_state(state); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++} ++EXPORT_SYMBOL(swait_prepare); ++ ++void swait_finish(struct swait_head *head, struct swaiter *w) ++{ ++ unsigned long flags; ++ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) { ++ raw_spin_lock_irqsave(&head->lock, flags); ++ __swait_dequeue(w); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ } ++} ++EXPORT_SYMBOL(swait_finish); ++ ++int __swait_wake(struct swait_head *head, unsigned int state) ++{ ++ struct swaiter *curr, *next; ++ unsigned long flags; ++ int woken = 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ ++ list_for_each_entry_safe(curr, next, &head->list, node) { ++ if (wake_up_state(curr->task, state)) { ++ __swait_dequeue(curr); ++ curr->task = NULL; ++ woken++; ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ return woken; ++} ++EXPORT_SYMBOL(__swait_wake); diff --git a/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch new file mode 100644 index 000000000..83626b170 --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch @@ -0,0 +1,214 @@ +Subject: wait-simple: Rework for use with completions +From: Thomas Gleixner +Date: Thu, 10 Jan 2013 11:47:35 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + include/linux/wait-simple.h | 56 +++++++---------------------------- + kernel/wait-simple.c | 69 ++++++++++++++++++++++++++++++++++++++------ + 2 files changed, 72 insertions(+), 53 deletions(-) + +Index: linux-stable/include/linux/wait-simple.h +=================================================================== +--- linux-stable.orig/include/linux/wait-simple.h ++++ linux-stable/include/linux/wait-simple.h +@@ -22,12 +22,14 @@ struct swait_head { + struct list_head list; + }; + +-#define DEFINE_SWAIT_HEAD(name) \ +- struct swait_head name = { \ ++#define SWAIT_HEAD_INITIALIZER(name) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .list = LIST_HEAD_INIT((name).list), \ + } + ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) ++ + extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); + + #define init_swait_head(swh) \ +@@ -40,59 +42,25 @@ extern void __init_swait_head(struct swa + /* + * Waiter functions + */ +-static inline bool swaiter_enqueued(struct swaiter *w) +-{ +- return w->task != NULL; +-} +- ++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); + extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); + extern void swait_finish(struct swait_head *head, struct swaiter *w); + + /* +- * Adds w to head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +-{ +- list_add(&w->node, &head->list); +-} +- +-/* +- * Removes w from head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_dequeue(struct swaiter *w) +-{ +- list_del_init(&w->node); +-} +- +-/* +- * Check whether a head has waiters enqueued +- */ +-static inline bool swait_head_has_waiters(struct swait_head *h) +-{ +- return !list_empty(&h->list); +-} +- +-/* + * Wakeup functions + */ +-extern int __swait_wake(struct swait_head *head, unsigned int state); ++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); ++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); + +-static inline int swait_wake(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_NORMAL) : 0; +-} +- +-static inline int swait_wake_interruptible(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_INTERRUPTIBLE) : 0; +-} ++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) ++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) ++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) ++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) + + /* + * Event API + */ +- + #define __swait_event(wq, condition) \ + do { \ + DEFINE_SWAITER(__wait); \ +Index: linux-stable/kernel/wait-simple.c +=================================================================== +--- linux-stable.orig/kernel/wait-simple.c ++++ linux-stable/kernel/wait-simple.c +@@ -12,6 +12,24 @@ + #include + #include + ++/* Adds w to head->list. Must be called with head->lock locked. */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++} ++ ++/* Removes w from head->list. Must be called with head->lock locked. */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* Check whether a head has waiters enqueued */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ return !list_empty(&h->list); ++} ++ + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) + { + raw_spin_lock_init(&head->lock); +@@ -20,19 +38,31 @@ void __init_swait_head(struct swait_head + } + EXPORT_SYMBOL(__init_swait_head); + ++void swait_prepare_locked(struct swait_head *head, struct swaiter *w) ++{ ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++} ++ + void swait_prepare(struct swait_head *head, struct swaiter *w, int state) + { + unsigned long flags; + + raw_spin_lock_irqsave(&head->lock, flags); +- w->task = current; +- if (list_empty(&w->node)) +- __swait_enqueue(head, w); +- set_current_state(state); ++ swait_prepare_locked(head, w); ++ __set_current_state(state); + raw_spin_unlock_irqrestore(&head->lock, flags); + } + EXPORT_SYMBOL(swait_prepare); + ++void swait_finish_locked(struct swait_head *head, struct swaiter *w) ++{ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) ++ __swait_dequeue(w); ++} ++ + void swait_finish(struct swait_head *head, struct swaiter *w) + { + unsigned long flags; +@@ -46,22 +76,43 @@ void swait_finish(struct swait_head *hea + } + EXPORT_SYMBOL(swait_finish); + +-int __swait_wake(struct swait_head *head, unsigned int state) ++unsigned int ++__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) + { + struct swaiter *curr, *next; +- unsigned long flags; + int woken = 0; + +- raw_spin_lock_irqsave(&head->lock, flags); +- + list_for_each_entry_safe(curr, next, &head->list, node) { + if (wake_up_state(curr->task, state)) { + __swait_dequeue(curr); ++ /* ++ * The waiting task can free the waiter as ++ * soon as curr->task = NULL is written, ++ * without taking any locks. A memory barrier ++ * is required here to prevent the following ++ * store to curr->task from getting ahead of ++ * the dequeue operation. ++ */ ++ smp_wmb(); + curr->task = NULL; +- woken++; ++ if (++woken == num) ++ break; + } + } ++ return woken; ++} ++ ++unsigned int ++__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) ++{ ++ unsigned long flags; ++ int woken; + ++ if (!swait_head_has_waiters(head)) ++ return 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ woken = __swait_wake_locked(head, state, num); + raw_spin_unlock_irqrestore(&head->lock, flags); + return woken; + } diff --git a/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch new file mode 100644 index 000000000..bb7ee2121 --- /dev/null +++ b/debian/patches/features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch @@ -0,0 +1,31 @@ +Subject: hack.patch +From: Thomas Gleixner +Date: Mon, 01 Jul 2013 11:02:42 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/kernel/workqueue.c +=================================================================== +--- linux-stable.orig/kernel/workqueue.c ++++ linux-stable/kernel/workqueue.c +@@ -48,6 +48,7 @@ + #include + #include + #include ++#include + + #include "workqueue_internal.h" + +@@ -1235,7 +1236,7 @@ fail: + local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; +- cpu_relax(); ++ cpu_chill(); + return -EAGAIN; + } + diff --git a/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch new file mode 100644 index 000000000..707041458 --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-distangle-from-rq-lock.patch @@ -0,0 +1,266 @@ +From: Thomas Gleixner +Date: Wed Jun 22 19:47:03 2011 +0200 +Subject: sched: Distangle worker accounting from rqlock + +The worker accounting for cpu bound workers is plugged into the core +scheduler code and the wakeup code. This is not a hard requirement and +can be avoided by keeping track of the state in the workqueue code +itself. + +Keep track of the sleeping state in the worker itself and call the +notifier before entering the core scheduler. There might be false +positives when the task is woken between that call and actually +scheduling, but that's not really different from scheduling and being +woken immediately after switching away. There is also no harm from +updating nr_running when the task returns from scheduling instead of +accounting it in the wakeup code. + +Signed-off-by: Thomas Gleixner +Cc: Peter Zijlstra +Cc: Tejun Heo +Cc: Jens Axboe +Cc: Linus Torvalds +Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de +Signed-off-by: Thomas Gleixner +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +--- + kernel/sched/core.c | 70 +++++++++----------------------------------- + kernel/workqueue.c | 55 ++++++++++++++-------------------- + kernel/workqueue_internal.h | 5 +-- + 3 files changed, 41 insertions(+), 89 deletions(-) + +Index: linux-stable/kernel/sched/core.c +=================================================================== +--- linux-stable.orig/kernel/sched/core.c ++++ linux-stable/kernel/sched/core.c +@@ -1330,10 +1330,6 @@ static void ttwu_activate(struct rq *rq, + { + activate_task(rq, p, en_flags); + p->on_rq = 1; +- +- /* if a worker is waking up, notify workqueue */ +- if (p->flags & PF_WQ_WORKER) +- wq_worker_waking_up(p, cpu_of(rq)); + } + + /* +@@ -1563,42 +1559,6 @@ out: + } + + /** +- * try_to_wake_up_local - try to wake up a local task with rq lock held +- * @p: the thread to be awakened +- * +- * Put @p on the run-queue if it's not already there. The caller must +- * ensure that this_rq() is locked, @p is bound to this_rq() and not +- * the current task. +- */ +-static void try_to_wake_up_local(struct task_struct *p) +-{ +- struct rq *rq = task_rq(p); +- +- if (WARN_ON_ONCE(rq != this_rq()) || +- WARN_ON_ONCE(p == current)) +- return; +- +- lockdep_assert_held(&rq->lock); +- +- if (!raw_spin_trylock(&p->pi_lock)) { +- raw_spin_unlock(&rq->lock); +- raw_spin_lock(&p->pi_lock); +- raw_spin_lock(&rq->lock); +- } +- +- if (!(p->state & TASK_NORMAL)) +- goto out; +- +- if (!p->on_rq) +- ttwu_activate(rq, p, ENQUEUE_WAKEUP); +- +- ttwu_do_wakeup(rq, p, 0); +- ttwu_stat(p, smp_processor_id(), 0); +-out: +- raw_spin_unlock(&p->pi_lock); +-} +- +-/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * +@@ -3155,21 +3115,6 @@ need_resched: + } else { + deactivate_task(rq, prev, DEQUEUE_SLEEP); + prev->on_rq = 0; +- +- /* +- * If a worker went to sleep, notify and ask workqueue +- * whether it wants to wake up a task to maintain +- * concurrency. +- * Only call wake up if prev isn't blocked on a sleeping +- * spin lock. +- */ +- if (prev->flags & PF_WQ_WORKER && !prev->saved_state) { +- struct task_struct *to_wakeup; +- +- to_wakeup = wq_worker_sleeping(prev, cpu); +- if (to_wakeup) +- try_to_wake_up_local(to_wakeup); +- } + } + switch_count = &prev->nvcsw; + } +@@ -3212,6 +3157,14 @@ static inline void sched_submit_work(str + { + if (!tsk->state || tsk_is_pi_blocked(tsk)) + return; ++ ++ /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ */ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ + /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. +@@ -3220,12 +3173,19 @@ static inline void sched_submit_work(str + blk_schedule_flush_plug(tsk); + } + ++static inline void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++} ++ + asmlinkage void __sched schedule(void) + { + struct task_struct *tsk = current; + + sched_submit_work(tsk); + __schedule(); ++ sched_update_worker(tsk); + } + EXPORT_SYMBOL(schedule); + +Index: linux-stable/kernel/workqueue.c +=================================================================== +--- linux-stable.orig/kernel/workqueue.c ++++ linux-stable/kernel/workqueue.c +@@ -789,44 +789,31 @@ static void wake_up_worker(struct worker + } + + /** +- * wq_worker_waking_up - a worker is waking up +- * @task: task waking up +- * @cpu: CPU @task is waking up to ++ * wq_worker_running - a worker is running again ++ * @task: task returning from sleep + * +- * This function is called during try_to_wake_up() when a worker is +- * being awoken. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) ++ * This function is called when a worker returns from schedule() + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu) ++void wq_worker_running(struct task_struct *task) + { + struct worker *worker = kthread_data(task); + +- if (!(worker->flags & WORKER_NOT_RUNNING)) { +- WARN_ON_ONCE(worker->pool->cpu != cpu); ++ if (!worker->sleeping) ++ return; ++ if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(&worker->pool->nr_running); +- } ++ worker->sleeping = 0; + } + + /** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep +- * @cpu: CPU in question, must be the current CPU number +- * +- * This function is called during schedule() when a busy worker is +- * going to sleep. Worker on the same cpu can be woken up by +- * returning pointer to its task. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) +- * +- * RETURNS: +- * Worker task on @cpu to wake up, %NULL if none. ++ * This function is called from schedule() when a busy worker is ++ * going to sleep. + */ +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) ++void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task), *to_wakeup = NULL; ++ struct worker *next, *worker = kthread_data(task); + struct worker_pool *pool; + + /* +@@ -835,14 +822,15 @@ struct task_struct *wq_worker_sleeping(s + * checking NOT_RUNNING. + */ + if (worker->flags & WORKER_NOT_RUNNING) +- return NULL; ++ return; + + pool = worker->pool; + +- /* this can only happen on the local cpu */ +- if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) +- return NULL; ++ if (WARN_ON_ONCE(worker->sleeping)) ++ return; + ++ worker->sleeping = 1; ++ spin_lock_irq(&pool->lock); + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). +@@ -855,9 +843,12 @@ struct task_struct *wq_worker_sleeping(s + * lock is safe. + */ + if (atomic_dec_and_test(&pool->nr_running) && +- !list_empty(&pool->worklist)) +- to_wakeup = first_worker(pool); +- return to_wakeup ? to_wakeup->task : NULL; ++ !list_empty(&pool->worklist)) { ++ next = first_worker(pool); ++ if (next) ++ wake_up_process(next->task); ++ } ++ spin_unlock_irq(&pool->lock); + } + + /** +Index: linux-stable/kernel/workqueue_internal.h +=================================================================== +--- linux-stable.orig/kernel/workqueue_internal.h ++++ linux-stable/kernel/workqueue_internal.h +@@ -41,6 +41,7 @@ struct worker { + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ + int id; /* I: worker id */ ++ int sleeping; /* None */ + + /* + * Opaque string set with work_set_desc(). Printed out with task +@@ -66,7 +67,7 @@ static inline struct worker *current_wq_ + * Scheduler hooks for concurrency managed workqueue. Only to be used from + * sched.c and workqueue.c. + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu); +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); ++void wq_worker_running(struct task_struct *task); ++void wq_worker_sleeping(struct task_struct *task); + + #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch new file mode 100644 index 000000000..56158831c --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch @@ -0,0 +1,153 @@ +Subject: Use local irq lock instead of irq disable regions +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:42:26 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 33 ++++++++++++++++++--------------- + 1 file changed, 18 insertions(+), 15 deletions(-) + +Index: linux-stable/kernel/workqueue.c +=================================================================== +--- linux-stable.orig/kernel/workqueue.c ++++ linux-stable/kernel/workqueue.c +@@ -47,6 +47,7 @@ + #include + #include + #include ++#include + + #include "workqueue_internal.h" + +@@ -306,6 +307,8 @@ EXPORT_SYMBOL_GPL(system_unbound_wq); + struct workqueue_struct *system_freezable_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_wq); + ++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); ++ + static int worker_thread(void *__worker); + static void copy_workqueue_attrs(struct workqueue_attrs *to, + const struct workqueue_attrs *from); +@@ -1063,9 +1066,9 @@ static void put_pwq_unlocked(struct pool + * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- spin_lock_irq(&pwq->pool->lock); ++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); + put_pwq(pwq); +- spin_unlock_irq(&pwq->pool->lock); ++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + } + } + +@@ -1165,7 +1168,7 @@ static int try_to_grab_pending(struct wo + struct worker_pool *pool; + struct pool_workqueue *pwq; + +- local_irq_save(*flags); ++ local_lock_irqsave(pendingb_lock, *flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1229,7 +1232,7 @@ static int try_to_grab_pending(struct wo + spin_unlock(&pool->lock); + fail: + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; + cpu_relax(); +@@ -1301,7 +1304,7 @@ static void __queue_work(int cpu, struct + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + debug_work_activate(work); + +@@ -1406,14 +1409,14 @@ bool queue_work_on(int cpu, struct workq + bool ret = false; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock,flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_work_on); +@@ -1480,14 +1483,14 @@ bool queue_delayed_work_on(int cpu, stru + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock, flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_delayed_work_on); +@@ -1522,7 +1525,7 @@ bool mod_delayed_work_on(int cpu, struct + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -2855,7 +2858,7 @@ static bool __cancel_work_timer(struct w + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + + flush_work(work); + clear_work_data(work); +@@ -2900,10 +2903,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_irq_disable(); ++ local_lock_irq(pendingb_lock); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- local_irq_enable(); ++ local_unlock_irq(pendingb_lock); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -2934,7 +2937,7 @@ bool cancel_delayed_work(struct delayed_ + + set_work_pool_and_clear_pending(&dwork->work, + get_work_pool_id(&dwork->work)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(cancel_delayed_work); +@@ -4373,7 +4376,7 @@ unsigned int work_busy(struct work_struc + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- rcu_read_lock() ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { + spin_lock_irqsave(&pool->lock, flags); diff --git a/debian/patches/features/all/rt/workqueue-use-rcu.patch b/debian/patches/features/all/rt/workqueue-use-rcu.patch new file mode 100644 index 000000000..49a9fabd6 --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-use-rcu.patch @@ -0,0 +1,311 @@ +Subject: workqueue: Use normal rcu +From: Thomas Gleixner +Date: Wed, 24 Jul 2013 15:26:54 +0200 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +There is no need for sched_rcu. The undocumented reason why sched_rcu +is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by +abusing the fact that sched_rcu reader side critical sections are also +protected by preempt or irq disabled regions. + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 85 +++++++++++++++++++++++++++++------------------------ + 1 file changed, 47 insertions(+), 38 deletions(-) + +Index: linux-stable/kernel/workqueue.c +=================================================================== +--- linux-stable.orig/kernel/workqueue.c ++++ linux-stable/kernel/workqueue.c +@@ -128,11 +128,11 @@ enum { + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -177,7 +177,7 @@ struct worker_pool { + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -206,7 +206,7 @@ struct pool_workqueue { + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -314,14 +314,14 @@ static void copy_workqueue_attrs(struct + #include + + #define assert_rcu_or_pool_mutex() \ +- rcu_lockdep_assert(rcu_read_lock_sched_held() || \ ++ rcu_lockdep_assert(rcu_read_lock_held() || \ + lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- rcu_lockdep_assert(rcu_read_lock_sched_held() || \ ++ rcu_lockdep_assert(rcu_read_lock_held() || \ + lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #ifdef CONFIG_LOCKDEP + #define assert_manager_or_pool_lock(pool) \ +@@ -343,7 +343,7 @@ static void copy_workqueue_attrs(struct + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -376,7 +376,7 @@ static void copy_workqueue_attrs(struct + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -524,7 +524,7 @@ static int worker_pool_assign_id(struct + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called either with pwq_lock held or sched RCU read locked. ++ * This must be called either with pwq_lock held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + */ +@@ -628,8 +628,8 @@ static struct pool_workqueue *get_work_p + * Return the worker_pool @work was last associated with. %NULL if none. + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -1060,7 +1060,7 @@ static void put_pwq_unlocked(struct pool + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ + spin_lock_irq(&pwq->pool->lock); +@@ -1184,6 +1184,7 @@ static int try_to_grab_pending(struct wo + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1222,10 +1223,12 @@ static int try_to_grab_pending(struct wo + set_work_pool_and_keep_pending(work, pool->id); + + spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } + spin_unlock(&pool->lock); + fail: ++ rcu_read_unlock(); + local_irq_restore(*flags); + if (work_is_canceling(work)) + return -ENOENT; +@@ -1306,6 +1309,8 @@ static void __queue_work(int cpu, struct + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ ++ rcu_read_lock(); + retry: + if (req_cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); +@@ -1362,10 +1367,8 @@ retry: + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1381,7 +1384,9 @@ retry: + + insert_work(pwq, work, worklist, work_flags); + ++out: + spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -2763,14 +2768,14 @@ static bool start_flush_work(struct work + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2797,10 +2802,11 @@ static bool start_flush_work(struct work + else + lock_map_acquire_read(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); +- ++ rcu_read_unlock(); + return true; + already_gone: + spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -3111,7 +3117,8 @@ static ssize_t wq_pool_ids_show(struct d + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -3119,7 +3126,8 @@ static ssize_t wq_pool_ids_show(struct d + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +@@ -3476,7 +3484,7 @@ static void rcu_free_pool(struct rcu_hea + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3523,8 +3531,8 @@ static void put_unbound_pool(struct work + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3628,7 +3636,7 @@ static void pwq_unbound_release_workfn(s + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one +@@ -4326,7 +4334,8 @@ bool workqueue_congested(int cpu, struct + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4337,7 +4346,8 @@ bool workqueue_congested(int cpu, struct + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4363,16 +4373,15 @@ unsigned int work_busy(struct work_struc + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock() + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); +- ++ rcu_read_unlock(); + return ret; + } + EXPORT_SYMBOL_GPL(work_busy); +@@ -4817,16 +4826,16 @@ bool freeze_workqueues_busy(void) + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch new file mode 100644 index 000000000..0b1439dff --- /dev/null +++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch @@ -0,0 +1,115 @@ +Subject: x86: crypto: Reduce preempt disabled regions +From: Peter Zijlstra +Date: Mon, 14 Nov 2011 18:19:27 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Restrict the preempt disabled regions to the actual floating point +operations and enable preemption for the administrative actions. + +This is necessary on RT to avoid that kfree and other operations are +called with preemption disabled. + +Reported-and-tested-by: Carsten Emde +Signed-off-by: Peter Zijlstra +Cc: stable-rt@vger.kernel.org +Signed-off-by: Thomas Gleixner +--- + arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++----------- + 1 file changed, 13 insertions(+), 11 deletions(-) + +Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c +=================================================================== +--- linux-stable.orig/arch/x86/crypto/aesni-intel_glue.c ++++ linux-stable/arch/x86/crypto/aesni-intel_glue.c +@@ -252,14 +252,14 @@ static int ecb_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -276,14 +276,14 @@ static int ecb_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -300,14 +300,14 @@ static int cbc_encrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -324,14 +324,14 @@ static int cbc_decrypt(struct blkcipher_ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -364,18 +364,20 @@ static int ctr_crypt(struct blkcipher_de + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } diff --git a/debian/patches/features/all/rt/x86-disable-debug-stack.patch b/debian/patches/features/all/rt/x86-disable-debug-stack.patch new file mode 100644 index 000000000..a9b2ac5c2 --- /dev/null +++ b/debian/patches/features/all/rt/x86-disable-debug-stack.patch @@ -0,0 +1,109 @@ +From: Andi Kleen +Date: Fri, 3 Jul 2009 08:44:10 -0500 +Subject: x86: Disable IST stacks for debug/int 3/stack fault for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Normally the x86-64 trap handlers for debug/int 3/stack fault run +on a special interrupt stack to make them more robust +when dealing with kernel code. + +The PREEMPT_RT kernel can sleep in locks even while allocating +GFP_ATOMIC memory. When one of these trap handlers needs to send +real time signals for ptrace it allocates memory and could then +try to to schedule. But it is not allowed to schedule on a +IST stack. This can cause warnings and hangs. + +This patch disables the IST stacks for these handlers for PREEMPT_RT +kernel. Instead let them run on the normal process stack. + +The kernel only really needs the ISTs here to make kernel debuggers more +robust in case someone sets a break point somewhere where the stack is +invalid. But there are no kernel debuggers in the standard kernel +that do this. + +It also means kprobes cannot be set in situations with invalid stack; +but that sounds like a reasonable restriction. + +The stack fault change could minimally impact oops quality, but not very +much because stack faults are fairly rare. + +A better solution would be to use similar logic as the NMI "paranoid" +path: check if signal is for user space, if yes go back to entry.S, switch stack, +call sync_regs, then do the signal sending etc. + +But this patch is much simpler and should work too with minimal impact. + +Signed-off-by: Andi Kleen +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +--- + arch/x86/include/asm/page_64_types.h | 21 +++++++++++++++------ + arch/x86/kernel/cpu/common.c | 2 ++ + arch/x86/kernel/dumpstack_64.c | 4 ++++ + 3 files changed, 21 insertions(+), 6 deletions(-) + +Index: linux-stable/arch/x86/include/asm/page_64_types.h +=================================================================== +--- linux-stable.orig/arch/x86/include/asm/page_64_types.h ++++ linux-stable/arch/x86/include/asm/page_64_types.h +@@ -14,12 +14,21 @@ + #define IRQ_STACK_ORDER 2 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) + +-#define STACKFAULT_STACK 1 +-#define DOUBLEFAULT_STACK 2 +-#define NMI_STACK 3 +-#define DEBUG_STACK 4 +-#define MCE_STACK 5 +-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define STACKFAULT_STACK 0 ++# define DOUBLEFAULT_STACK 1 ++# define NMI_STACK 2 ++# define DEBUG_STACK 0 ++# define MCE_STACK 3 ++# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */ ++#else ++# define STACKFAULT_STACK 1 ++# define DOUBLEFAULT_STACK 2 ++# define NMI_STACK 3 ++# define DEBUG_STACK 4 ++# define MCE_STACK 5 ++# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ ++#endif + + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) +Index: linux-stable/arch/x86/kernel/cpu/common.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/cpu/common.c ++++ linux-stable/arch/x86/kernel/cpu/common.c +@@ -1104,7 +1104,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu + */ + static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, ++#if DEBUG_STACK > 0 + [DEBUG_STACK - 1] = DEBUG_STKSZ ++#endif + }; + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks +Index: linux-stable/arch/x86/kernel/dumpstack_64.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/dumpstack_64.c ++++ linux-stable/arch/x86/kernel/dumpstack_64.c +@@ -21,10 +21,14 @@ + (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) + + static char x86_stack_ids[][8] = { ++#if DEBUG_STACK > 0 + [ DEBUG_STACK-1 ] = "#DB", ++#endif + [ NMI_STACK-1 ] = "NMI", + [ DOUBLEFAULT_STACK-1 ] = "#DF", ++#if STACKFAULT_STACK > 0 + [ STACKFAULT_STACK-1 ] = "#SS", ++#endif + [ MCE_STACK-1 ] = "#MC", + #if DEBUG_STKSZ > EXCEPTION_STKSZ + [ N_EXCEPTION_STACKS ... diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch new file mode 100644 index 000000000..f9f7f258f --- /dev/null +++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch @@ -0,0 +1,29 @@ +From: Ingo Molnar +Date: Fri, 3 Jul 2009 08:29:27 -0500 +Subject: x86: Do not unmask io_apic when interrupt is in progress +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +With threaded interrupts we might see an interrupt in progress on +migration. Do not unmask it when this is the case. + +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner + +--- + arch/x86/kernel/apic/io_apic.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/x86/kernel/apic/io_apic.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/apic/io_apic.c ++++ linux-stable/arch/x86/kernel/apic/io_apic.c +@@ -2391,7 +2391,8 @@ static bool io_apic_level_ack_pending(st + static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) + { + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + mask_ioapic(cfg); + return true; + } diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch new file mode 100644 index 000000000..968c83e6f --- /dev/null +++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch @@ -0,0 +1,28 @@ +Subject: x86-kvm-require-const-tsc-for-rt.patch +From: Thomas Gleixner +Date: Sun, 06 Nov 2011 12:26:18 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/kvm/x86.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +Index: linux-stable/arch/x86/kvm/x86.c +=================================================================== +--- linux-stable.orig/arch/x86/kvm/x86.c ++++ linux-stable/arch/x86/kvm/x86.c +@@ -5326,6 +5326,13 @@ int kvm_arch_init(void *opaque) + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; diff --git a/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch b/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch new file mode 100644 index 000000000..36e25a159 --- /dev/null +++ b/debian/patches/features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch @@ -0,0 +1,167 @@ +From df12896518bc6db6a717de580116a07cdd19fbd9 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Thu, 11 Apr 2013 14:33:34 -0400 +Subject: [PATCH 4/5] x86/mce: Defer mce wakeups to threads for PREEMPT_RT +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +We had a customer report a lockup on a 3.0-rt kernel that had the +following backtrace: + +[ffff88107fca3e80] rt_spin_lock_slowlock at ffffffff81499113 +[ffff88107fca3f40] rt_spin_lock at ffffffff81499a56 +[ffff88107fca3f50] __wake_up at ffffffff81043379 +[ffff88107fca3f80] mce_notify_irq at ffffffff81017328 +[ffff88107fca3f90] intel_threshold_interrupt at ffffffff81019508 +[ffff88107fca3fa0] smp_threshold_interrupt at ffffffff81019fc1 +[ffff88107fca3fb0] threshold_interrupt at ffffffff814a1853 + +It actually bugged because the lock was taken by the same owner that +already had that lock. What happened was the thread that was setting +itself on a wait queue had the lock when an MCE triggered. The MCE +interrupt does a wake up on its wait list and grabs the same lock. + +NOTE: THIS IS NOT A BUG ON MAINLINE + +Sorry for yelling, but as I Cc'd mainline maintainers I want them to +know that this is an PREEMPT_RT bug only. I only Cc'd them for advice. + +On PREEMPT_RT the wait queue locks are converted from normal +"spin_locks" into an rt_mutex (see the rt_spin_lock_slowlock above). +These are not to be taken by hard interrupt context. This usually isn't +a problem as most all interrupts in PREEMPT_RT are converted into +schedulable threads. Unfortunately that's not the case with the MCE irq. + +As wait queue locks are notorious for long hold times, we can not +convert them to raw_spin_locks without causing issues with -rt. But +Thomas has created a "simple-wait" structure that uses raw spin locks +which may have been a good fit. + +Unfortunately, wait queues are not the only issue, as the mce_notify_irq +also does a schedule_work(), which grabs the workqueue spin locks that +have the exact same issue. + +Thus, this patch I'm proposing is to move the actual work of the MCE +interrupt into a helper thread that gets woken up on the MCE interrupt +and does the work in a schedulable context. + +NOTE: THIS PATCH ONLY CHANGES THE BEHAVIOR WHEN PREEMPT_RT IS SET + +Oops, sorry for yelling again, but I want to stress that I keep the same +behavior of mainline when PREEMPT_RT is not set. Thus, this only changes +the MCE behavior when PREEMPT_RT is configured. + +Signed-off-by: Steven Rostedt +[bigeasy@linutronix: make mce_notify_work() a proper prototype, use + kthread_run()] +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/kernel/cpu/mcheck/mce.c | 75 ++++++++++++++++++++++++++++++++------- + 1 file changed, 62 insertions(+), 13 deletions(-) + +Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c ++++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1343,26 +1344,72 @@ static void mce_do_trigger(struct work_s + + static DECLARE_WORK(mce_trigger_work, mce_do_trigger); + +-/* +- * Notify the user(s) about new machine check events. +- * Can be called from interrupt context, but not from machine check/NMI +- * context. +- */ +-int mce_notify_irq(void) ++static void __mce_notify_work(void) + { + /* Not more than two messages every minute */ + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + +- if (test_and_clear_bit(0, &mce_need_notify)) { +- /* wake processes polling /dev/mcelog */ +- wake_up_interruptible(&mce_chrdev_wait); ++ /* wake processes polling /dev/mcelog */ ++ wake_up_interruptible(&mce_chrdev_wait); ++ ++ /* ++ * There is no risk of missing notifications because ++ * work_pending is always cleared before the function is ++ * executed. ++ */ ++ if (mce_helper[0] && !work_pending(&mce_trigger_work)) ++ schedule_work(&mce_trigger_work); ++ ++ if (__ratelimit(&ratelimit)) ++ pr_info(HW_ERR "Machine check events logged\n"); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++struct task_struct *mce_notify_helper; ++ ++static int mce_notify_helper_thread(void *unused) ++{ ++ while (1) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ if (kthread_should_stop()) ++ break; ++ __mce_notify_work(); ++ } ++ return 0; ++} ++ ++static int mce_notify_work_init(void) ++{ ++ mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL, ++ "mce-notify"); ++ if (!mce_notify_helper) ++ return -ENOMEM; + +- if (mce_helper[0]) +- schedule_work(&mce_trigger_work); ++ return 0; ++} + +- if (__ratelimit(&ratelimit)) +- pr_info(HW_ERR "Machine check events logged\n"); ++static void mce_notify_work(void) ++{ ++ wake_up_process(mce_notify_helper); ++} ++#else ++static void mce_notify_work(void) ++{ ++ __mce_notify_work(); ++} ++static inline int mce_notify_work_init(void) { return 0; } ++#endif + ++/* ++ * Notify the user(s) about new machine check events. ++ * Can be called from interrupt context, but not from machine check/NMI ++ * context. ++ */ ++int mce_notify_irq(void) ++{ ++ if (test_and_clear_bit(0, &mce_need_notify)) { ++ mce_notify_work(); + return 1; + } + return 0; +@@ -2424,6 +2471,8 @@ static __init int mcheck_init_device(voi + /* register character device /dev/mcelog */ + misc_register(&mce_chrdev_device); + ++ err = mce_notify_work_init(); ++ + return err; + } + device_initcall_sync(mcheck_init_device); diff --git a/debian/patches/features/all/rt/x86-mce-fix-mce-timer-interval.patch b/debian/patches/features/all/rt/x86-mce-fix-mce-timer-interval.patch new file mode 100644 index 000000000..adb537efb --- /dev/null +++ b/debian/patches/features/all/rt/x86-mce-fix-mce-timer-interval.patch @@ -0,0 +1,51 @@ +From b69721d1af317f491ae7090d338d82528745612a Mon Sep 17 00:00:00 2001 +From: Mike Galbraith +Date: Wed, 29 May 2013 13:52:13 +0200 +Subject: [PATCH] x86/mce: fix mce timer interval +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Seems mce timer fire at the wrong frequency in -rt kernels since roughly +forever due to 32 bit overflow. 3.8-rt is also missing a multiplier. + +Add missing us -> ns conversion and 32 bit overflow prevention. + +Cc: stable-rt@vger.kernel.org +Signed-off-by: Mike Galbraith +[bigeasy: use ULL instead of u64 cast] +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/kernel/cpu/mcheck/mce.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c ++++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +@@ -1291,7 +1291,8 @@ static enum hrtimer_restart mce_timer_fn + __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { +- hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); ++ hrtimer_forward_now(timer, ns_to_ktime( ++ jiffies_to_usecs(iv) * 1000ULL)); + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +@@ -1319,7 +1320,7 @@ void mce_timer_kick(unsigned long interv + } + } else { + hrtimer_start_range_ns(t, +- ns_to_ktime(jiffies_to_usecs(interval) * 1000), ++ ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL), + 0, HRTIMER_MODE_REL_PINNED); + } + if (interval < iv) +@@ -1641,7 +1642,7 @@ static void mce_start_timer(unsigned int + if (mca_cfg.ignore_ce || !iv) + return; + +- hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), + 0, HRTIMER_MODE_REL_PINNED); + } + diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch new file mode 100644 index 000000000..4b9737258 --- /dev/null +++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch @@ -0,0 +1,179 @@ +From: Thomas Gleixner +Date: Mon, 13 Dec 2010 16:33:39 +0100 +Subject: x86: Convert mce timer to hrtimer +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +mce_timer is started in atomic contexts of cpu bringup. This results +in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to +avoid this. + +Signed-off-by: Thomas Gleixner + +--- + arch/x86/kernel/cpu/mcheck/mce.c | 57 ++++++++++++++++++++++----------------- + 1 file changed, 33 insertions(+), 24 deletions(-) + +Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c ++++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1256,7 +1257,7 @@ void mce_log_therm_throt_event(__u64 sta + static unsigned long check_interval = 5 * 60; /* 5 minutes */ + + static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); + + static unsigned long mce_adjust_timer_default(unsigned long interval) + { +@@ -1266,13 +1267,10 @@ static unsigned long mce_adjust_timer_de + static unsigned long (*mce_adjust_timer)(unsigned long interval) = + mce_adjust_timer_default; + +-static void mce_timer_fn(unsigned long data) ++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); + unsigned long iv; + +- WARN_ON(smp_processor_id() != data); +- + if (mce_available(__this_cpu_ptr(&cpu_info))) { + machine_check_poll(MCP_TIMESTAMP, + &__get_cpu_var(mce_poll_banks)); +@@ -1293,9 +1291,10 @@ static void mce_timer_fn(unsigned long d + __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { +- t->expires = jiffies + iv; +- add_timer_on(t, smp_processor_id()); ++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); ++ return HRTIMER_RESTART; + } ++ return HRTIMER_NORESTART; + } + + /* +@@ -1303,28 +1302,37 @@ static void mce_timer_fn(unsigned long d + */ + void mce_timer_kick(unsigned long interval) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); +- unsigned long when = jiffies + interval; ++ struct hrtimer *t = &__get_cpu_var(mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); + +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer_pinned(t, when); ++ if (hrtimer_active(t)) { ++ s64 exp; ++ s64 intv_us; ++ ++ intv_us = jiffies_to_usecs(interval); ++ exp = ktime_to_us(hrtimer_expires_remaining(t)); ++ if (intv_us < exp) { ++ hrtimer_cancel(t); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(intv_us * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); ++ } + } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(jiffies_to_usecs(interval) * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); + } + if (interval < iv) + __this_cpu_write(mce_next_interval, interval); + } + +-/* Must not be called in IRQ context where del_timer_sync() can deadlock */ ++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ + static void mce_timer_delete_all(void) + { + int cpu; + + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } + + static void mce_do_trigger(struct work_struct *work) +@@ -1624,7 +1632,7 @@ static void __mcheck_cpu_init_vendor(str + } + } + +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = mce_adjust_timer(check_interval * HZ); + +@@ -1633,16 +1641,17 @@ static void mce_start_timer(unsigned int + if (mca_cfg.ignore_ce || !iv) + return; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); + } + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); ++ struct hrtimer *t = &__get_cpu_var(mce_timer); + unsigned int cpu = smp_processor_id(); + +- setup_timer(t, mce_timer_fn, cpu); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_timer_fn; + mce_start_timer(cpu, t); + } + +@@ -2299,6 +2308,8 @@ static void __cpuinit mce_disable_cpu(vo + if (!mce_available(__this_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(&__get_cpu_var(mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + for (i = 0; i < mca_cfg.banks; i++) { +@@ -2325,6 +2336,7 @@ static void __cpuinit mce_reenable_cpu(v + if (b->init) + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2332,7 +2344,6 @@ static int __cpuinit + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: +@@ -2348,11 +2359,9 @@ mce_cpu_callback(struct notifier_block * + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); + break; + case CPU_DOWN_FAILED: + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); + break; + } + diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch new file mode 100644 index 000000000..805c5fa16 --- /dev/null +++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch @@ -0,0 +1,188 @@ +Subject: x86-preempt-lazy.patch +From: Thomas Gleixner +Date: Thu, 01 Nov 2012 11:03:47 +0100 +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Signed-off-by: Thomas Gleixner +--- + arch/x86/Kconfig | 1 + + arch/x86/include/asm/thread_info.h | 6 ++++++ + arch/x86/kernel/asm-offsets.c | 1 + + arch/x86/kernel/entry_32.S | 18 +++++++++++++----- + arch/x86/kernel/entry_64.S | 24 +++++++++++++++--------- + 5 files changed, 36 insertions(+), 14 deletions(-) + +Index: linux-stable/arch/x86/Kconfig +=================================================================== +--- linux-stable.orig/arch/x86/Kconfig ++++ linux-stable/arch/x86/Kconfig +@@ -121,6 +121,7 @@ config X86 + select OLD_SIGACTION if X86_32 + select COMPAT_OLD_SIGACTION if IA32_EMULATION + select RTC_LIB ++ select HAVE_PREEMPT_LAZY + + config INSTRUCTION_DECODER + def_bool y +Index: linux-stable/arch/x86/include/asm/thread_info.h +=================================================================== +--- linux-stable.orig/arch/x86/include/asm/thread_info.h ++++ linux-stable/arch/x86/include/asm/thread_info.h +@@ -30,6 +30,8 @@ struct thread_info { + __u32 cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => lazy preemptable, ++ <0 => BUG */ + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +@@ -81,6 +83,7 @@ struct thread_info { + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ + #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ + #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +@@ -106,6 +109,7 @@ struct thread_info { + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) + #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) + #define _TIF_UPROBE (1 << TIF_UPROBE) +@@ -156,6 +160,8 @@ struct thread_info { + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #define PREEMPT_ACTIVE 0x10000000 + + #ifdef CONFIG_X86_32 +Index: linux-stable/arch/x86/kernel/asm-offsets.c +=================================================================== +--- linux-stable.orig/arch/x86/kernel/asm-offsets.c ++++ linux-stable/arch/x86/kernel/asm-offsets.c +@@ -33,6 +33,7 @@ void common(void) { + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); + OFFSET(TI_preempt_count, thread_info, preempt_count); ++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +Index: linux-stable/arch/x86/kernel/entry_32.S +=================================================================== +--- linux-stable.orig/arch/x86/kernel/entry_32.S ++++ linux-stable/arch/x86/kernel/entry_32.S +@@ -364,14 +364,22 @@ ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? + jnz restore_all +-need_resched: + movl TI_flags(%ebp), %ecx # need_resched set ? + testb $_TIF_NEED_RESCHED, %cl ++ jnz 1f ++ ++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? ++ jnz restore_all ++ testl $_TIF_NEED_RESCHED_LAZY, %ecx + jz restore_all +- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? ++ ++1: testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all + call preempt_schedule_irq +- jmp need_resched ++ movl TI_flags(%ebp), %ecx # need_resched set ? ++ testl $_TIF_NEED_RESCHED_MASK, %ecx ++ jnz 1b ++ jmp restore_all + END(resume_kernel) + #endif + CFI_ENDPROC +@@ -607,7 +615,7 @@ ENDPROC(system_call) + ALIGN + RING0_PTREGS_FRAME # can't unwind into user space anyway + work_pending: +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jz work_notifysig + work_resched: + call schedule +@@ -620,7 +628,7 @@ work_resched: + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? + jz restore_all +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jnz work_resched + + work_notifysig: # deal with pending signals and +Index: linux-stable/arch/x86/kernel/entry_64.S +=================================================================== +--- linux-stable.orig/arch/x86/kernel/entry_64.S ++++ linux-stable/arch/x86/kernel/entry_64.S +@@ -673,8 +673,8 @@ sysret_check: + /* Handle reschedules */ + /* edx: work, edi: workmask */ + sysret_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc sysret_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz sysret_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -786,8 +786,8 @@ GLOBAL(int_with_check) + /* First do a reschedule test. */ + /* edx: work, edi: workmask */ + int_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc int_very_careful ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz int_very_careful + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1086,8 +1086,8 @@ bad_iret: + /* edi: workmask, edx: work */ + retint_careful: + CFI_RESTORE_STATE +- bt $TIF_NEED_RESCHED,%edx +- jnc retint_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz retint_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -1120,9 +1120,15 @@ retint_signal: + ENTRY(retint_kernel) + cmpl $0,TI_preempt_count(%rcx) + jnz retint_restore_args +- bt $TIF_NEED_RESCHED,TI_flags(%rcx) ++ bt $TIF_NEED_RESCHED,TI_flags(%rcx) ++ jc 1f ++ ++ cmpl $0,TI_preempt_lazy_count(%rcx) ++ jnz retint_restore_args ++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) + jnc retint_restore_args +- bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ ++ ++1: bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + jnc retint_restore_args + call preempt_schedule_irq + jmp exit_intr +@@ -1524,7 +1530,7 @@ paranoid_userspace: + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ +- testl $_TIF_NEED_RESCHED,%ebx ++ testl $_TIF_NEED_RESCHED_MASK,%ebx + jnz paranoid_schedule + movl %ebx,%edx /* arg3: thread flags */ + TRACE_IRQS_ON diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch new file mode 100644 index 000000000..18978db85 --- /dev/null +++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch @@ -0,0 +1,50 @@ +From: Thomas Gleixner +Date: Thu, 16 Dec 2010 14:25:18 +0100 +Subject: x86: stackprotector: Avoid random pool on rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +CPU bringup calls into the random pool to initialize the stack +canary. During boot that works nicely even on RT as the might sleep +checks are disabled. During CPU hotplug the might sleep checks +trigger. Making the locks in random raw is a major PITA, so avoid the +call on RT is the only sensible solution. This is basically the same +randomness which we get during boot where the random pool has no +entropy and we rely on the TSC randomnness. + +Reported-by: Carsten Emde +Signed-off-by: Thomas Gleixner + +--- + arch/x86/include/asm/stackprotector.h | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +Index: linux-stable/arch/x86/include/asm/stackprotector.h +=================================================================== +--- linux-stable.orig/arch/x86/include/asm/stackprotector.h ++++ linux-stable/arch/x86/include/asm/stackprotector.h +@@ -57,7 +57,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -68,8 +68,16 @@ static __always_inline void boot_init_st + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of ++ * it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = __native_read_tsc(); + canary += tsc + (tsc << 32UL); + diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch new file mode 100644 index 000000000..ad8cfcd2a --- /dev/null +++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch @@ -0,0 +1,40 @@ +From: Thomas Gleixner +Date: Sun, 26 Jul 2009 02:21:32 +0200 +Subject: x86: Use generic rwsem_spinlocks on -rt +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.4-rt1.tar.xz + +Simplifies the separation of anon_rw_semaphores and rw_semaphores for +-rt. + +Signed-off-by: Thomas Gleixner + +--- + arch/x86/Kconfig | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +Index: linux-stable/arch/x86/Kconfig +=================================================================== +--- linux-stable.orig/arch/x86/Kconfig ++++ linux-stable/arch/x86/Kconfig +@@ -177,8 +177,11 @@ config ARCH_MAY_HAVE_PC_FDC + def_bool y + depends on ISA_DMA_API + ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_CALIBRATE_DELAY + def_bool y +@@ -460,7 +463,7 @@ config X86_MDFLD + select MFD_INTEL_MSIC + ---help--- + Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin +- Internet Device(MID) platform. ++ Internet Device(MID) platform. + Unlike standard x86 PCs, Medfield does not have many legacy devices + nor standard legacy replacement devices/features. e.g. Medfield does + not contain i8259, i8254, HPET, legacy BIOS, most of the io ports. diff --git a/debian/patches/series-rt b/debian/patches/series-rt new file mode 100644 index 000000000..a27cffa46 --- /dev/null +++ b/debian/patches/series-rt @@ -0,0 +1,623 @@ +########################################################### +# DELTA against a known Linus release +########################################################### + +############################################################ +# UPSTREAM changes queued +############################################################ + +############################################################ +# UPSTREAM FIXES, patches pending +############################################################ + +############################################################ +# Stuff broken upstream, patches submitted +############################################################ + +############################################################ +# Stuff which needs addressing upstream, but requires more +# information +############################################################ +#x86-hpet-disable-msi-on-lenovo-w510.patch + +############################################################ +# Stuff broken upstream, need to be sent +############################################################ + +############################################################ +# Submitted on LKML +############################################################ + +# SPARC part of erly printk consolidation +features/all/rt/early-printk-consolidate.patch + +# SRCU +features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch + +############################################################ +# Submitted to mips ML +############################################################ + +############################################################ +# Submitted to ARM ML +############################################################ + +############################################################ +# Submitted to PPC ML +############################################################ + +############################################################ +# Submitted on LKML +############################################################ + +############################################################ +# Submitted to net-dev +############################################################ + +features/all/rt/net-dev-make-devnet-rename-seq-static.patch + +# Check again +# Revert upstream changes to cpsw +features/all/rt/revert-f9a8f83b04e0c362a2fc660dbad980d24af209fc.patch +features/all/rt/revert-f646968f8f7c624587de729115d802372b9063dd.patch +features/all/rt/revert-80d5c3689b886308247da295a228a54df49a44f6.patch +# Fix it really +features/all/rt/cpsw-net-cpsw-use-a-lock-around-source-testing.patch +features/all/rt/cpsw-net-cpsw-Use-fallback-for-active_slave.patch + +############################################################ +# Pending in tip +############################################################ + +############################################################ +# Stuff which should go upstream ASAP +############################################################ + +# SCHED +features/all/rt/idle-state.patch +features/all/rt/might-sleep-check-for-idle.patch +features/all/rt/sched-better-debug-output-for-might-sleep.patch +features/all/rt/sched-adjust-reset-on-fork-always.patch +features/all/rt/sched-enqueue-to-head.patch +features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch + +# SCHED BLOCK/WQ +features/all/rt/block-shorten-interrupt-disabled-regions.patch + +# Timekeeping split jiffies lock. Needs a good argument :) +features/all/rt/timekeeping-split-jiffies-lock.patch + +# CHECKME: Should local_irq_enable() generally do a preemption check ? +features/all/rt/vtime-split-lock-and-seqcount.patch +features/all/rt/mips-enable-interrupts-in-signal.patch + +# Tracing +features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch + +# PTRACE/SIGNAL crap +features/all/rt/signal-revert-ptrace-preempt-magic.patch + +# ARM lock annotation +features/all/rt/arm-convert-boot-lock-to-raw.patch + +# PREEMPT_ENABLE_NO_RESCHED + +# SIGNALS / POSIXTIMERS +features/all/rt/posix-timers-no-broadcast.patch +features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +features/all/rt/oleg-signal-rt-fix.patch + +# SCHED + +# GENERIC CMPXCHG + +# SHORTEN PREEMPT DISABLED +features/all/rt/drivers-random-reduce-preempt-disabled-region.patch + +# CLOCKSOURCE +features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch +features/all/rt/clocksource-tclib-allow-higher-clockrates.patch + +# DRIVERS NET +features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch +features/all/rt/drivers-net-8139-disable-irq-nosync.patch + +# PREEMPT + +# PAGEFAULT DISABLE +features/all/rt/mm-prepare-pf-disable-discoupling.patch +features/all/rt/arch-use-pagefault-disabled.patch +features/all/rt/peter_zijlstra-frob-pagefault_disable.patch +features/all/rt/peterz-raw_pagefault_disable.patch +features/all/rt/filemap-fix-up.patch +features/all/rt/mm-remove-preempt-count-from-pf.patch + +# PM +features/all/rt/suspend-prevernt-might-sleep-splats.patch + +# MM/LISTS +features/all/rt/list-add-list-last-entry.patch +features/all/rt/mm-page-alloc-use-list-last-entry.patch +features/all/rt/mm-slab-move-debug-out.patch + +# INCLUDE MESS +features/all/rt/pid-h-include-atomic-h.patch +features/all/rt/sysctl-include-atomic-h.patch + +# NETWORKING +features/all/rt/net-flip-lock-dep-thingy.patch + +# SOFTIRQ +features/all/rt/softirq-thread-do-softirq.patch +features/all/rt/softirq-split-out-code.patch + +# X86 +features/all/rt/x86-io-apic-migra-no-unmask.patch +features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch + +# RCU + +# LOCKING INIT FIXES + +# PCI +features/all/rt/pci-access-use-__wake_up_all_locked.patch + +# WORKQUEUE + + +##################################################### +# Stuff which should go mainline, but wants some care +##################################################### + +# SEQLOCK + +# ANON RW SEMAPHORES + +# TRACING +features/all/rt/latency-hist.patch + +# HW LATENCY DETECTOR - this really wants a rewrite +features/all/rt/hwlatdetect.patch + +################################################## +# REAL RT STUFF starts here +################################################## + +# Add RT to version +features/all/rt/localversion.patch + +# PRINTK +features/all/rt/printk-kill.patch +features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch + +# BASE RT CONFIG +features/all/rt/rt-preempt-base-config.patch + +# WARN/BUG_ON_RT +features/all/rt/bug-rt-dependend-variants.patch + +# LOCAL_IRQ_RT/NON_RT +features/all/rt/local-irq-rt-depending-variants.patch + +# PREEMPT NORT +features/all/rt/preempt-nort-rt-variants.patch + +# ANNOTATE local_irq_disable sites +features/all/rt/ata-disable-interrupts-if-non-rt.patch +features/all/rt/ide-use-nort-local-irq-variants.patch +features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch +features/all/rt/inpt-gameport-use-local-irq-nort.patch +#acpi-use-local-irq-nort.patch +features/all/rt/user-use-local-irq-nort.patch +features/all/rt/resource-counters-use-localirq-nort.patch +features/all/rt/usb-hcd-use-local-irq-nort.patch +features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch + +# Sigh +features/all/rt/signal-fix-up-rcu-wreckage.patch + +# ANNOTATE BUG/WARNON +features/all/rt/net-wireless-warn-nort.patch + +# BIT SPINLOCKS - SIGH +features/all/rt/mm-cgroup-page-bit-spinlock.patch +features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch +features/all/rt/fs-jbd-replace-bh_state-lock.patch + +# GENIRQ +features/all/rt/list_bl.h-make-list-head-locking-RT-safe.patch +features/all/rt/genirq-nodebug-shirq.patch +features/all/rt/genirq-disable-irqpoll-on-rt.patch +features/all/rt/genirq-force-threading.patch + +# DRIVERS NET +features/all/rt/drivers-net-fix-livelock-issues.patch +features/all/rt/drivers-net-vortex-fix-locking-issues.patch +features/all/rt/drivers-net-gianfar-make-rt-aware.patch + +# DRIVERS USB +# Revisit. Looks weird +features/all/rt/usb-fix-mouse-problem-copying-large-data.patch + +# LOCAL_IRQ_LOCKS +features/all/rt/local-var.patch +features/all/rt/rt-local-irq-lock.patch +features/all/rt/cpu-rt-variants.patch + +# MM SLAB +features/all/rt/mm-slab-wrap-functions.patch +features/all/rt/mm-slab-more-lock-breaks.patch + +# MM PAGE_ALLOC +features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch +features/all/rt/mm-page-alloc-fix.patch + +# MM SWAP +features/all/rt/mm-convert-swap-to-percpu-locked.patch + +# MM vmstat +features/all/rt/mm-make-vmstat-rt-aware.patch + +# MM memory +#mm-memory-rt.patch - ZAP... is unused +features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch +features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch + +# MM bounce +features/all/rt/mm-bounce-local-irq-save-nort.patch + +# MM SLAB only +features/all/rt/mm-allow-slab-rt.patch + +# MM SLUB +features/all/rt/mm-enable-slub.patch +features/all/rt/slub-enable-irqs-for-no-wait.patch +features/all/rt/slub_delay_ctor_on_rt.patch + +# MM disable slab +features/all/rt/mm-disable-slab-on-rt.patch + +# Revisit for avr/frv/ia64/mn10300/sh/sparc ... +#mm-quicklists-percpu-locked.patch + +# MM +features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch + +# RADIX TREE +features/all/rt/radix-tree-rt-aware.patch + +# PANIC +features/all/rt/panic-disable-random-on-rt.patch + +# IPC +features/all/rt/ipc-make-rt-aware.patch +features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch + +# RELAY +features/all/rt/relay-fix-timer-madness.patch + +# NETWORKING + +# WORKQUEUE SIGH + +# TIMERS +features/all/rt/timers-prepare-for-full-preemption.patch +features/all/rt/timers-prepare-for-full-preemption-improve.patch +features/all/rt/timers-preempt-rt-support.patch +#timers-mov-printk_tick-to-soft-interrupt.patch +features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch +features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch + +# More PRINTK +#rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch + +# HRTIMERS +features/all/rt/hrtimers-prepare-full-preemption.patch +features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch +features/all/rt/timer-fd-avoid-live-lock.patch +features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch + +# POSIX-CPU-TIMERS +features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch +features/all/rt/posix-timers-shorten-cpu-timers-thread.patch +features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch + +# SCHEDULER +features/all/rt/sched-delay-put-task.patch +features/all/rt/sched-limit-nr-migrate.patch +features/all/rt/sched-mmdrop-delayed.patch +features/all/rt/sched-rt-mutex-wakeup.patch +features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch +# CHECKME sched-load-balance-break-on-rq-contention.patch +features/all/rt/sched-cond-resched.patch +features/all/rt/cond-resched-softirq-rt.patch +features/all/rt/cond-resched-lock-rt-tweak.patch +features/all/rt/sched-disable-ttwu-queue.patch +features/all/rt/sched-disable-rt-group-sched-on-rt.patch +features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch +features/all/rt/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch + +# STOP MACHINE +features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch +#stomp-machine-mark-stomper-thread.patch +features/all/rt/stomp-machine-raw-lock.patch +features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch + +# MIGRATE DISABLE AND PER CPU +features/all/rt/hotplug-light-get-online-cpus.patch +features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch +features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch +features/all/rt/sched-migrate-disable.patch +features/all/rt/hotplug-use-migrate-disable.patch +features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch + +features/all/rt/ftrace-migrate-disable-tracing.patch +features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch + +features/all/rt/migrate-disable-rt-variant.patch +features/all/rt/peter_zijlstra-frob-migrate_disable.patch +features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch +features/all/rt/sched-rt-fix-migrate_enable-thinko.patch +features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch +features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch +features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch +features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch +features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch + +# FTRACE +# XXX checkme ftrace-crap.patch +# CHECKME rt-ring-buffer-convert-reader_lock-from-raw_spin_lock-into-spin_lock.patch +# CHECKME rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch + +# NETWORKING +features/all/rt/net-netif_rx_ni-migrate-disable.patch + +# NOHZ +features/all/rt/softirq-sanitize-softirq-pending.patch + +# LOCKDEP +features/all/rt/lockdep-no-softirq-accounting-on-rt.patch + +# SOFTIRQ local lock +features/all/rt/mutex-no-spin-on-rt.patch +features/all/rt/softirq-local-lock.patch +features/all/rt/softirq-export-in-serving-softirq.patch +features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch +# XXX checkme softirq-fix-unplug-deadlock.patch +features/all/rt/softirq-disable-softirq-stacks-for-rt.patch +features/all/rt/softirq-make-fifo.patch +features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch + +# LOCAL VARS and GETCPU STUFF +features/all/rt/local-vars-migrate-disable.patch + +# RAID5 +features/all/rt/md-raid5-percpu-handling-rt-aware.patch + +# FUTEX/RTMUTEX +features/all/rt/rtmutex-futex-prepare-rt.patch +features/all/rt/futex-requeue-pi-fix.patch +features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch + +# RTMUTEX +features/all/rt/rtmutex-lock-killable.patch +features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch +features/all/rt/spinlock-types-separate-raw.patch +features/all/rt/rtmutex-avoid-include-hell.patch +features/all/rt/rt-add-rt-spinlock-to-headers.patch +features/all/rt/rt-add-rt-to-mutex-headers.patch +features/all/rt/rwsem-add-rt-variant.patch +features/all/rt/rt-add-rt-locks.patch +features/all/rt/percpu-rwsem-compilefix.patch + +# RTMUTEX Fallout +features/all/rt/tasklist-lock-fix-section-conflict.patch + +# NOHZ/RTMUTEX +features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch + +# RCU +features/all/rt/peter_zijlstra-frob-rcu.patch +features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +features/all/rt/rcu-tiny-merge-bh.patch +features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch + +# LGLOCKS - lovely +features/all/rt/lglocks-rt.patch + +# DRIVERS SERIAL +features/all/rt/drivers-serial-cleanup-locking-for-rt.patch +features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch +features/all/rt/drivers-tty-fix-omap-lock-crap.patch +features/all/rt/drivers-tty-pl011-irq-disable-madness.patch +features/all/rt/rt-serial-warn-fix.patch + +# FS +features/all/rt/fs-namespace-preemption-fix.patch +features/all/rt/mm-protect-activate-switch-mm.patch +features/all/rt/fs-block-rt-support.patch +features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch +features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch +features/all/rt/fs-fscache-remove-spin_lock-from-the-condition-in-wh.patch + +# X86 +features/all/rt/x86-mce-timer-hrtimer.patch +features/all/rt/x86-mce-fix-mce-timer-interval.patch +features/all/rt/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch +features/all/rt/x86-stackprot-no-random-on-rt.patch +features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch +features/all/rt/x86-disable-debug-stack.patch + +# CPU get light +features/all/rt/epoll-use-get-cpu-light.patch +features/all/rt/mm-vmalloc-use-get-cpu-light.patch + +# CPU CHILL +features/all/rt/rt-introduce-cpu-chill.patch + +# BLOCK LIVELOCK PREVENTION +features/all/rt/block-use-cpu-chill.patch + +# FS LIVELOCK PREVENTION +features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch +features/all/rt/net-use-cpu-chill.patch + +# WORKQUEUE more fixes +features/all/rt/workqueue-use-rcu.patch +features/all/rt/workqueue-use-locallock.patch +features/all/rt/work-queue-work-around-irqsafe-timer-optimization.patch +features/all/rt/workqueue-distangle-from-rq-lock.patch + +# IDR +features/all/rt/idr-use-local-lock-for-protection.patch + +# DEBUGOBJECTS +features/all/rt/debugobjects-rt.patch + +# JUMPLABEL +features/all/rt/jump-label-rt.patch + +# NET +features/all/rt/skbufhead-raw-lock.patch + +# PERF +features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch + +# CONSOLE. NEEDS more thought !!! +features/all/rt/printk-rt-aware.patch +features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch + +# POWERC +features/all/rt/power-use-generic-rwsem-on-rt.patch +features/all/rt/power-disable-highmem-on-rt.patch + +# ARM +features/all/rt/arm-disable-highmem-on-rt.patch +features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch + +# MIPS +features/all/rt/mips-disable-highmem-on-rt.patch + +# NETWORK livelock fix +features/all/rt/net-tx-action-avoid-livelock-on-rt.patch + +# NETWORK DEBUGGING AID +features/all/rt/ping-sysrq.patch + +# KGDB +features/all/rt/kgb-serial-hackaround.patch + +# SYSFS - RT indicator +features/all/rt/sysfs-realtime-entry.patch + +# KMAP/HIGHMEM +features/all/rt/mm-rt-kmap-atomic-scheduling.patch +features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch +features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch +features/all/rt/arm-enable-highmem-for-rt.patch + +# IPC +features/all/rt/ipc-sem-rework-semaphore-wakeups.patch + +# SYSRQ + +# KVM require constant freq TSC (smp function call -> cpufreq) +features/all/rt/x86-kvm-require-const-tsc-for-rt.patch + +# SCSI/FCOE +features/all/rt/scsi-fcoe-rt-aware.patch + +# X86 crypto +features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch + +# Device mapper +features/all/rt/dm-make-rt-aware.patch + +# ACPI +features/all/rt/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch + +# CPUMASK OFFSTACK +features/all/rt/cpumask-disable-offstack-on-rt.patch + +# RANDOM +features/all/rt/random-make-it-work-on-rt.patch + +# SEQLOCKS +features/all/rt/seqlock-prevent-rt-starvation.patch + +# SCHEDULER +features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch + +# HOTPLUG +features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch +features/all/rt/cpu-rt-rework-cpu-down.patch +features/all/rt/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch +features/all/rt/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch + +# SCSCI QLA2xxx +features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch + +# NET +features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch +features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch +features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch +features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch +features/all/rt/net-make-devnet_rename_seq-a-mutex.patch + +# CRYPTO +features/all/rt/peterz-srcu-crypto-chain.patch + +# LOCKDEP +features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch +features/all/rt/rt-rw-lockdep-annotations.patch + +# PERF +features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch + +# SOFTIRQ +features/all/rt/softirq-preempt-fix-3-re.patch +features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch +features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch +features/all/rt/softirq-split-handling-function.patch +features/all/rt/softirq-split-locks.patch +features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch + +# RCU +features/all/rt/rcu-tiny-solve-rt-mistery.patch +features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch + +# PREEMPT LAZY +features/all/rt/preempt-lazy-support.patch +features/all/rt/x86-preempt-lazy.patch +features/all/rt/arm-preempt-lazy-support.patch +features/all/rt/powerpc-preempt-lazy-support.patch + +# DRIVERS +features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch +features/all/rt/mmci-remove-bogus-irq-save.patch + +# I915 +features/all/rt/i915_compile_fix.patch +features/all/rt/gpu-i915-allow-the-user-not-to-do-the-wbinvd.patch +features/all/rt/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch + +# SIMPLE WAITQUEUE +features/all/rt/wait-simple-implementation.patch +features/all/rt/wait-simple-rework-for-completions.patch + +features/all/rt/rcutiny-use-simple-waitqueue.patch +features/all/rt/treercu-use-simple-waitqueue.patch +features/all/rt/rcu-more-swait-conversions.patch + +features/all/rt/completion-use-simple-wait-queues.patch + +# New stuff +# Revisit: We need this in other places as well +features/all/rt/move_sched_delayed_work_to_helper.patch + +# Enable full RT +features/all/rt/kconfig-disable-a-few-options-rt.patch +features/all/rt/kconfig-preempt-rt-full.patch