diff --git a/debian/changelog b/debian/changelog index f476167bf..1583e002c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -10,6 +10,7 @@ linux (3.8.5-1~experimental.1) UNRELEASED; urgency=low * udeb: Add hid-generic, hid-holtek-kbd, hid-lenovo-tpkbd, hid-roccat-isku, hid-roccat-lua, hid-roccat-savu to input-modules * cdc_ncm,cdc_mbim: Use NCM by default + * [rt] Update to 3.8.4-rt2 and reenable -- Ben Hutchings Wed, 20 Mar 2013 23:32:20 +0000 diff --git a/debian/config/defines b/debian/config/defines index f8db79725..1bba971fd 100644 --- a/debian/config/defines +++ b/debian/config/defines @@ -28,7 +28,7 @@ featuresets: rt [featureset-rt_base] -enabled: false +enabled: true [description] part-long-up: This kernel is not suitable for SMP (multi-processor, diff --git a/debian/patches/features/all/rt/0001-kernel-srcu-merge-common-code-into-a-macro.patch b/debian/patches/features/all/rt/0001-kernel-srcu-merge-common-code-into-a-macro.patch new file mode 100644 index 000000000..cb44c1a06 --- /dev/null +++ b/debian/patches/features/all/rt/0001-kernel-srcu-merge-common-code-into-a-macro.patch @@ -0,0 +1,36 @@ +From db28051c97688cfceaa9a2cea0202af74bb64fdc Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 19 Mar 2013 14:41:04 +0100 +Subject: [PATCH 1/2] kernel/srcu: merge common code into a macro + +DEFINE_SRCU() and DEFINE_STATIC_SRCU() does the same thing except for +the "static" attribute. This patch moves the common pieces into +_DEFINE_SRCU() which is used by the the former macros either adding the +static attribute or not. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/srcu.h | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/include/linux/srcu.h ++++ b/include/linux/srcu.h +@@ -102,13 +102,13 @@ void process_srcu(struct work_struct *wo + * define and init a srcu struct at build time. + * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. + */ +-#define DEFINE_SRCU(name) \ ++#define _DEFINE_SRCU(name, mod) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ mod struct srcu_struct name = \ ++ __SRCU_STRUCT_INIT(name); + +-#define DEFINE_STATIC_SRCU(name) \ +- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- static struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++#define DEFINE_SRCU(name) _DEFINE_SRCU(name, ) ++#define DEFINE_STATIC_SRCU(name) _DEFINE_SRCU(name, static) + + /** + * call_srcu() - Queue a callback for invocation after an SRCU grace period diff --git a/debian/patches/features/all/rt/0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch b/debian/patches/features/all/rt/0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch new file mode 100644 index 000000000..4a1597d84 --- /dev/null +++ b/debian/patches/features/all/rt/0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch @@ -0,0 +1,83 @@ +From c31a0c052205e3ec24efc3fe18ef70c3e913f2d4 Mon Sep 17 00:00:00 2001 +From: Stephen Warren +Date: Mon, 11 Feb 2013 14:15:32 -0700 +Subject: [PATCH] of: fix recursive locking in of_get_next_available_child() + +of_get_next_available_child() acquires devtree_lock, then calls +of_device_is_available() which calls of_get_property() which calls +of_find_property() which tries to re-acquire devtree_lock, thus causing +deadlock. + +To avoid this, create a new __of_device_is_available() which calls +__of_get_property() instead, which calls __of_find_property(), which +does not take the lock,. Update of_get_next_available_child() to call +the new __of_device_is_available() since it already owns the lock. + +Signed-off-by: Stephen Warren +Signed-off-by: Grant Likely +--- + drivers/of/base.c | 30 +++++++++++++++++++++++++----- + 1 file changed, 25 insertions(+), 5 deletions(-) + +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -307,19 +307,19 @@ int of_machine_is_compatible(const char + EXPORT_SYMBOL(of_machine_is_compatible); + + /** +- * of_device_is_available - check if a device is available for use ++ * __of_device_is_available - check if a device is available for use + * +- * @device: Node to check for availability ++ * @device: Node to check for availability, with locks already held + * + * Returns 1 if the status property is absent or set to "okay" or "ok", + * 0 otherwise + */ +-int of_device_is_available(const struct device_node *device) ++static int __of_device_is_available(const struct device_node *device) + { + const char *status; + int statlen; + +- status = of_get_property(device, "status", &statlen); ++ status = __of_get_property(device, "status", &statlen); + if (status == NULL) + return 1; + +@@ -330,6 +330,26 @@ int of_device_is_available(const struct + + return 0; + } ++ ++/** ++ * of_device_is_available - check if a device is available for use ++ * ++ * @device: Node to check for availability ++ * ++ * Returns 1 if the status property is absent or set to "okay" or "ok", ++ * 0 otherwise ++ */ ++int of_device_is_available(const struct device_node *device) ++{ ++ unsigned long flags; ++ int res; ++ ++ raw_spin_lock_irqsave(&devtree_lock, flags); ++ res = __of_device_is_available(device); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); ++ return res; ++ ++} + EXPORT_SYMBOL(of_device_is_available); + + /** +@@ -421,7 +441,7 @@ struct device_node *of_get_next_availabl + raw_spin_lock(&devtree_lock); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) { +- if (!of_device_is_available(next)) ++ if (!__of_device_is_available(next)) + continue; + if (of_node_get(next)) + break; diff --git a/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch new file mode 100644 index 000000000..391499168 --- /dev/null +++ b/debian/patches/features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch @@ -0,0 +1,100 @@ +From 3f09905a6a65ed4fcf8e664abf044c91b2ce7b27 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Tue, 19 Mar 2013 14:44:30 +0100 +Subject: [PATCH 2/2] kernel/SRCU: provide a static initializer + +There are macros for static initializer for the three out of four +possible notifier types, that are: + ATOMIC_NOTIFIER_HEAD() + BLOCKING_NOTIFIER_HEAD() + RAW_NOTIFIER_HEAD() + +This patch provides a static initilizer for the forth type to make it +complete. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/notifier.h | 26 +++++++++++++++++++++----- + include/linux/srcu.h | 6 +++--- + 2 files changed, 24 insertions(+), 8 deletions(-) + +--- a/include/linux/notifier.h ++++ b/include/linux/notifier.h +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + struct notifier_block { +@@ -85,7 +83,7 @@ struct srcu_notifier_head { + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -98,7 +96,13 @@ extern void srcu_init_notifier_head(stru + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ ++ ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -110,6 +114,18 @@ extern void srcu_init_notifier_head(stru + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++ ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) ++ ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) ++ + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +--- a/include/linux/srcu.h ++++ b/include/linux/srcu.h +@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -105,7 +105,7 @@ void process_srcu(struct work_struct *wo + #define _DEFINE_SRCU(name, mod) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ + mod struct srcu_struct name = \ +- __SRCU_STRUCT_INIT(name); ++ __SRCU_STRUCT_INIT(name, name##_srcu_array); + + #define DEFINE_SRCU(name) _DEFINE_SRCU(name, ) + #define DEFINE_STATIC_SRCU(name) _DEFINE_SRCU(name, static) diff --git a/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch new file mode 100644 index 000000000..c6d8735ba --- /dev/null +++ b/debian/patches/features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch @@ -0,0 +1,23 @@ +From 65513f34449eedb6b84c24a3583266534c1627e4 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 17:09:55 +0100 +Subject: [PATCH 2/6] x86/highmem: add a "already used pte" check + +This is a copy from kmap_atomic_prot(). + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/x86/mm/iomap_32.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -65,6 +65,8 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ + #ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; + #endif diff --git a/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch new file mode 100644 index 000000000..71a8b20c4 --- /dev/null +++ b/debian/patches/features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch @@ -0,0 +1,28 @@ +From e2ca4d092d9c6e6b07b465b4d81da207bbcc7437 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Mon, 11 Mar 2013 21:37:27 +0100 +Subject: [PATCH 3/6] arm/highmem: flush tlb on unmap + +The tlb should be flushed on unmap and thus make the mapping entry +invalid. This is only done in the non-debug case which does not look +right. + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/arm/mm/highmem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -95,10 +95,10 @@ void __kunmap_atomic(void *kvaddr) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); +- set_top_pte(vaddr, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_top_pte(vaddr, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ diff --git a/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch new file mode 100644 index 000000000..8f6323f55 --- /dev/null +++ b/debian/patches/features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch @@ -0,0 +1,44 @@ +From eef09918aff670a6162d2ae5fe87b393698ef57d Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 1 Mar 2013 11:17:42 +0100 +Subject: [PATCH 5/6] futex: Ensure lock/unlock symetry versus pi_lock and + hash bucket lock + +In exit_pi_state_list() we have the following locking construct: + + spin_lock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); + + ... + spin_unlock(&hb->lock); + +In !RT this works, but on RT the migrate_enable() function which is +called from spin_unlock() sees atomic context due to the held pi_lock +and just decrements the migrate_disable_atomic counter of the +task. Now the next call to migrate_disable() sees the counter being +negative and issues a warning. That check should be in +migrate_enable() already. + +Fix this by dropping pi_lock before unlocking hb->lock and reaquire +pi_lock after that again. This is safe as the loop code reevaluates +head again under the pi_lock. + +Reported-by: Yong Zhang +Signed-off-by: Thomas Gleixner +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/futex.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -568,7 +568,9 @@ void exit_pi_state_list(struct task_stru + * task still owns the PI-state: + */ + if (head->next != next) { ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + continue; + } + diff --git a/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch new file mode 100644 index 000000000..38da04189 --- /dev/null +++ b/debian/patches/features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch @@ -0,0 +1,77 @@ +From b72b514282ffad0d665ea94932b968f388304079 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 19:01:05 +0100 +Subject: [PATCH] HACK: printk: drop the logbuf_lock more often + +The lock is hold with irgs off. The latency drops 500us+ on my arm bugs +with a "full" buffer after executing "dmesg" on the shell. + +Signed-off-by: Sebastian Andrzej Siewior +--- + kernel/printk.c | 27 ++++++++++++++++++++++++++- + 1 file changed, 26 insertions(+), 1 deletion(-) + +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -1072,6 +1072,7 @@ static int syslog_print_all(char __user + { + char *text; + int len = 0; ++ int attempts = 0; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); + if (!text) +@@ -1083,7 +1084,14 @@ static int syslog_print_all(char __user + u64 seq; + u32 idx; + enum log_flags prev; +- ++ int num_msg; ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + if (clear_seq < log_first_seq) { + /* messages are gone, move to first available one */ + clear_seq = log_first_seq; +@@ -1104,6 +1112,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1117,6 +1133,14 @@ static int syslog_print_all(char __user + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1158,6 +1182,7 @@ static int syslog_print_all(char __user + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + raw_spin_unlock_irq(&logbuf_lock); + + kfree(text); diff --git a/debian/patches/features/all/rt/acpi-use-local-irq-nort.patch b/debian/patches/features/all/rt/acpi-use-local-irq-nort.patch index 45818366b..f1dcb8cd3 100644 --- a/debian/patches/features/all/rt/acpi-use-local-irq-nort.patch +++ b/debian/patches/features/all/rt/acpi-use-local-irq-nort.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner arch/x86/include/asm/acpi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/arch/x86/include/asm/acpi.h -=================================================================== ---- linux-stable.orig/arch/x86/include/asm/acpi.h -+++ linux-stable/arch/x86/include/asm/acpi.h +--- a/arch/x86/include/asm/acpi.h ++++ b/arch/x86/include/asm/acpi.h @@ -51,8 +51,8 @@ #define ACPI_ASM_MACROS diff --git a/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch index 537c4acf5..961cc0c37 100644 --- a/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch +++ b/debian/patches/features/all/rt/arch-use-pagefault-disabled.patch @@ -30,10 +30,8 @@ Signed-off-by: Thomas Gleixner arch/xtensa/mm/fault.c | 2 +- 22 files changed, 26 insertions(+), 23 deletions(-) -Index: linux-stable/arch/alpha/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/alpha/mm/fault.c -+++ linux-stable/arch/alpha/mm/fault.c +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c @@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns /* If we're in an interrupt context, or have no user context, @@ -43,10 +41,8 @@ Index: linux-stable/arch/alpha/mm/fault.c goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC -Index: linux-stable/arch/arm/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/arm/mm/fault.c -+++ linux-stable/arch/arm/mm/fault.c +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c @@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -56,10 +52,8 @@ Index: linux-stable/arch/arm/mm/fault.c goto no_context; /* -Index: linux-stable/arch/avr32/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/avr32/mm/fault.c -+++ linux-stable/arch/avr32/mm/fault.c +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c @@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l * If we're in an interrupt or have no user context, we must * not take the fault... @@ -70,10 +64,8 @@ Index: linux-stable/arch/avr32/mm/fault.c goto no_context; local_irq_enable(); -Index: linux-stable/arch/cris/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/cris/mm/fault.c -+++ linux-stable/arch/cris/mm/fault.c +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c @@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str * user context, we must not take the fault. */ @@ -83,10 +75,8 @@ Index: linux-stable/arch/cris/mm/fault.c goto no_context; retry: -Index: linux-stable/arch/frv/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/frv/mm/fault.c -+++ linux-stable/arch/frv/mm/fault.c +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -96,10 +86,8 @@ Index: linux-stable/arch/frv/mm/fault.c goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/ia64/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/ia64/mm/fault.c -+++ linux-stable/arch/ia64/mm/fault.c +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c @@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres /* * If we're in an interrupt or have no user context, we must not take the fault.. @@ -109,10 +97,8 @@ Index: linux-stable/arch/ia64/mm/fault.c goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP -Index: linux-stable/arch/m32r/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/m32r/mm/fault.c -+++ linux-stable/arch/m32r/mm/fault.c +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c @@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user context or are running in an * atomic region then we must not take the fault.. @@ -122,11 +108,9 @@ Index: linux-stable/arch/m32r/mm/fault.c goto bad_area_nosemaphore; /* When running in the kernel we expect faults to occur only to -Index: linux-stable/arch/m68k/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/m68k/mm/fault.c -+++ linux-stable/arch/m68k/mm/fault.c -@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -135,10 +119,8 @@ Index: linux-stable/arch/m68k/mm/fault.c goto no_context; retry: -Index: linux-stable/arch/microblaze/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/microblaze/mm/fault.c -+++ linux-stable/arch/microblaze/mm/fault.c +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c @@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs, if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; @@ -148,10 +130,8 @@ Index: linux-stable/arch/microblaze/mm/fault.c if (kernel_mode(regs)) goto bad_area_nosemaphore; -Index: linux-stable/arch/mips/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/mips/mm/fault.c -+++ linux-stable/arch/mips/mm/fault.c +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault( * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -161,11 +141,9 @@ Index: linux-stable/arch/mips/mm/fault.c goto bad_area_nosemaphore; retry: -Index: linux-stable/arch/mn10300/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/mn10300/mm/fault.c -+++ linux-stable/arch/mn10300/mm/fault.c -@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_ +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -173,11 +151,9 @@ Index: linux-stable/arch/mn10300/mm/fault.c + if (in_atomic() || !mm || current->pagefault_disabled) goto no_context; - down_read(&mm->mmap_sem); -Index: linux-stable/arch/parisc/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/parisc/mm/fault.c -+++ linux-stable/arch/parisc/mm/fault.c + retry: +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c @@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long acc_type; int fault; @@ -187,11 +163,9 @@ Index: linux-stable/arch/parisc/mm/fault.c goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/powerpc/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/powerpc/mm/fault.c -+++ linux-stable/arch/powerpc/mm/fault.c -@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -259,7 +259,7 @@ int __kprobes do_page_fault(struct pt_re if (!arch_irq_disabled_regs(regs)) local_irq_enable(); @@ -200,11 +174,9 @@ Index: linux-stable/arch/powerpc/mm/fault.c if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, -Index: linux-stable/arch/s390/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/s390/mm/fault.c -+++ linux-stable/arch/s390/mm/fault.c -@@ -286,7 +286,8 @@ static inline int do_exception(struct pt +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -296,7 +296,8 @@ static inline int do_exception(struct pt * user context. */ fault = VM_FAULT_BADCONTEXT; @@ -214,20 +186,18 @@ Index: linux-stable/arch/s390/mm/fault.c goto out; address = trans_exc_code & __FAIL_ADDR_MASK; -@@ -423,7 +424,8 @@ void __kprobes do_asce_exception(struct - unsigned long trans_exc_code; +@@ -435,7 +436,8 @@ void __kprobes do_asce_exception(struct + clear_tsk_thread_flag(current, TIF_PER_TRAP); trans_exc_code = regs->int_parm_long; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) + if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || -+ current->pagefault_disabled)) ++ current->pagefault_disabled())); goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/score/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/score/mm/fault.c -+++ linux-stable/arch/score/mm/fault.c +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c @@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -237,11 +207,9 @@ Index: linux-stable/arch/score/mm/fault.c goto bad_area_nosemaphore; down_read(&mm->mmap_sem); -Index: linux-stable/arch/sh/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/sh/mm/fault.c -+++ linux-stable/arch/sh/mm/fault.c -@@ -445,7 +445,7 @@ asmlinkage void __kprobes do_page_fault( +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -440,7 +440,7 @@ asmlinkage void __kprobes do_page_fault( * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ @@ -250,10 +218,8 @@ Index: linux-stable/arch/sh/mm/fault.c bad_area_nosemaphore(regs, error_code, address); return; } -Index: linux-stable/arch/sparc/mm/fault_32.c -=================================================================== ---- linux-stable.orig/arch/sparc/mm/fault_32.c -+++ linux-stable/arch/sparc/mm/fault_32.c +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c @@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -263,11 +229,9 @@ Index: linux-stable/arch/sparc/mm/fault_32.c goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -Index: linux-stable/arch/sparc/mm/fault_64.c -=================================================================== ---- linux-stable.orig/arch/sparc/mm/fault_64.c -+++ linux-stable/arch/sparc/mm/fault_64.c -@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fau +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -321,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fau * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -276,11 +240,9 @@ Index: linux-stable/arch/sparc/mm/fault_64.c goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -Index: linux-stable/arch/tile/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/tile/mm/fault.c -+++ linux-stable/arch/tile/mm/fault.c -@@ -359,7 +359,7 @@ static int handle_page_fault(struct pt_r +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -360,7 +360,7 @@ static int handle_page_fault(struct pt_r * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ @@ -289,10 +251,8 @@ Index: linux-stable/arch/tile/mm/fault.c vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } -Index: linux-stable/arch/um/kernel/trap.c -=================================================================== ---- linux-stable.orig/arch/um/kernel/trap.c -+++ linux-stable/arch/um/kernel/trap.c +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c @@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr * If the fault was during atomic operation, don't take the fault, just * fail. @@ -302,11 +262,9 @@ Index: linux-stable/arch/um/kernel/trap.c goto out_nosemaphore; retry: -Index: linux-stable/arch/x86/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/x86/mm/fault.c -+++ linux-stable/arch/x86/mm/fault.c -@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsi +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -1108,7 +1108,7 @@ __do_page_fault(struct pt_regs *regs, un * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ @@ -315,10 +273,8 @@ Index: linux-stable/arch/x86/mm/fault.c bad_area_nosemaphore(regs, error_code, address); return; } -Index: linux-stable/arch/xtensa/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/xtensa/mm/fault.c -+++ linux-stable/arch/xtensa/mm/fault.c +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c @@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) /* If we're in an interrupt or have no user * context, we must not take the fault.. diff --git a/debian/patches/features/all/rt/arm-allow-irq-threading.patch b/debian/patches/features/all/rt/arm-allow-irq-threading.patch index 57573a0bc..ad9fa3112 100644 --- a/debian/patches/features/all/rt/arm-allow-irq-threading.patch +++ b/debian/patches/features/all/rt/arm-allow-irq-threading.patch @@ -10,15 +10,13 @@ Signed-off-by: Thomas Gleixner arch/arm/Kconfig | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/arch/arm/Kconfig -=================================================================== ---- linux-stable.orig/arch/arm/Kconfig -+++ linux-stable/arch/arm/Kconfig -@@ -40,6 +40,7 @@ config ARM - select GENERIC_IRQ_SHOW - select ARCH_WANT_IPC_PARSE_VERSION +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -17,6 +17,7 @@ config ARM + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER select HARDIRQS_SW_RESEND + select IRQ_FORCED_THREADING - select CPU_PM if (SUSPEND || CPU_IDLE) - select GENERIC_PCI_IOMAP - select HAVE_BPF_JIT + select HAVE_AOUT + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL + select HAVE_ARCH_KGDB diff --git a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch index ac3700b4c..38f059eda 100644 --- a/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch +++ b/debian/patches/features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch @@ -14,11 +14,9 @@ Signed-off-by: Thomas Gleixner arch/arm/mach-at91/at91sam926x_time.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) -Index: linux-stable/arch/arm/mach-at91/at91rm9200_time.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-at91/at91rm9200_time.c -+++ linux-stable/arch/arm/mach-at91/at91rm9200_time.c -@@ -130,6 +130,7 @@ clkevt32k_mode(enum clock_event_mode mod +--- a/arch/arm/mach-at91/at91rm9200_time.c ++++ b/arch/arm/mach-at91/at91rm9200_time.c +@@ -134,6 +134,7 @@ clkevt32k_mode(enum clock_event_mode mod break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: @@ -26,11 +24,9 @@ Index: linux-stable/arch/arm/mach-at91/at91rm9200_time.c case CLOCK_EVT_MODE_RESUME: irqmask = 0; break; -Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-at91/at91sam926x_time.c -+++ linux-stable/arch/arm/mach-at91/at91sam926x_time.c -@@ -67,7 +67,7 @@ static struct clocksource pit_clk = { +--- a/arch/arm/mach-at91/at91sam926x_time.c ++++ b/arch/arm/mach-at91/at91sam926x_time.c +@@ -77,7 +77,7 @@ static struct clocksource pit_clk = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -39,7 +35,7 @@ Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ -@@ -76,6 +76,8 @@ pit_clkevt_mode(enum clock_event_mode mo +@@ -86,6 +86,8 @@ pit_clkevt_mode(enum clock_event_mode mo { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -48,7 +44,7 @@ Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c /* update clocksource counter */ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN -@@ -88,6 +90,7 @@ pit_clkevt_mode(enum clock_event_mode mo +@@ -98,6 +100,7 @@ pit_clkevt_mode(enum clock_event_mode mo case CLOCK_EVT_MODE_UNUSED: /* disable irq, leaving the clocksource active */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); diff --git a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch index 2b7a40909..b649cd405 100644 --- a/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch +++ b/debian/patches/features/all/rt/arm-at91-tclib-default-to-tclib-timer-for-rt.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner drivers/misc/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/drivers/misc/Kconfig -=================================================================== ---- linux-stable.orig/drivers/misc/Kconfig -+++ linux-stable/drivers/misc/Kconfig -@@ -73,6 +73,7 @@ config AB8500_PWM +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -63,6 +63,7 @@ config ATMEL_PWM config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) @@ -23,7 +21,7 @@ Index: linux-stable/drivers/misc/Kconfig help Select this if you want a library to allocate the Timer/Counter blocks found on many Atmel processors. This facilitates using -@@ -105,7 +106,7 @@ config ATMEL_TCB_CLKSRC_BLOCK +@@ -95,7 +96,7 @@ config ATMEL_TCB_CLKSRC_BLOCK config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK bool "TC Block use 32 KiHz clock" depends on ATMEL_TCB_CLKSRC diff --git a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch index 53109428e..91be8d97f 100644 --- a/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch +++ b/debian/patches/features/all/rt/arm-convert-boot-lock-to-raw.patch @@ -23,24 +23,23 @@ Signed-off-by: Thomas Gleixner arch/arm/mach-exynos/platsmp.c | 12 ++++++------ arch/arm/mach-msm/platsmp.c | 10 +++++----- arch/arm/mach-omap2/omap-smp.c | 10 +++++----- + arch/arm/mach-spear13xx/platsmp.c | 10 +++++----- arch/arm/mach-ux500/platsmp.c | 10 +++++----- arch/arm/plat-versatile/platsmp.c | 10 +++++----- - 5 files changed, 26 insertions(+), 26 deletions(-) + 6 files changed, 31 insertions(+), 31 deletions(-) -Index: linux-stable/arch/arm/mach-exynos/platsmp.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-exynos/platsmp.c -+++ linux-stable/arch/arm/mach-exynos/platsmp.c -@@ -62,7 +62,7 @@ static void __iomem *scu_base_addr(void) +--- a/arch/arm/mach-exynos/platsmp.c ++++ b/arch/arm/mach-exynos/platsmp.c +@@ -71,7 +71,7 @@ static void __iomem *scu_base_addr(void) return (void __iomem *)(S5P_VA_SCU); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); - void __cpuinit platform_secondary_init(unsigned int cpu) + static void __cpuinit exynos_secondary_init(unsigned int cpu) { -@@ -82,8 +82,8 @@ void __cpuinit platform_secondary_init(u +@@ -91,8 +91,8 @@ static void __cpuinit exynos_secondary_i /* * Synchronise with the boot thread. */ @@ -50,8 +49,8 @@ Index: linux-stable/arch/arm/mach-exynos/platsmp.c + raw_spin_unlock(&boot_lock); } - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -94,7 +94,7 @@ int __cpuinit boot_secondary(unsigned in + static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -104,7 +104,7 @@ static int __cpuinit exynos_boot_seconda * Set synchronisation state between this boot processor * and the secondary one */ @@ -60,7 +59,7 @@ Index: linux-stable/arch/arm/mach-exynos/platsmp.c /* * The secondary processor is waiting to be released from -@@ -123,7 +123,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -133,7 +133,7 @@ static int __cpuinit exynos_boot_seconda if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); @@ -69,7 +68,7 @@ Index: linux-stable/arch/arm/mach-exynos/platsmp.c return -ETIMEDOUT; } } -@@ -151,7 +151,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -161,7 +161,7 @@ static int __cpuinit exynos_boot_seconda * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ @@ -78,20 +77,18 @@ Index: linux-stable/arch/arm/mach-exynos/platsmp.c return pen_release != -1 ? -ENOSYS : 0; } -Index: linux-stable/arch/arm/mach-msm/platsmp.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-msm/platsmp.c -+++ linux-stable/arch/arm/mach-msm/platsmp.c -@@ -40,7 +40,7 @@ extern void msm_secondary_startup(void); - */ - volatile int pen_release = -1; +--- a/arch/arm/mach-msm/platsmp.c ++++ b/arch/arm/mach-msm/platsmp.c +@@ -31,7 +31,7 @@ + + extern void msm_secondary_startup(void); -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static inline int get_core_count(void) { -@@ -70,8 +70,8 @@ void __cpuinit platform_secondary_init(u +@@ -58,8 +58,8 @@ static void __cpuinit msm_secondary_init /* * Synchronise with the boot thread. */ @@ -102,7 +99,7 @@ Index: linux-stable/arch/arm/mach-msm/platsmp.c } static __cpuinit void prepare_cold_cpu(unsigned int cpu) -@@ -108,7 +108,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -96,7 +96,7 @@ static int __cpuinit msm_boot_secondary( * set synchronisation state between this boot processor * and the secondary one */ @@ -111,7 +108,7 @@ Index: linux-stable/arch/arm/mach-msm/platsmp.c /* * The secondary processor is waiting to be released from -@@ -142,7 +142,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -130,7 +130,7 @@ static int __cpuinit msm_boot_secondary( * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ @@ -120,11 +117,9 @@ Index: linux-stable/arch/arm/mach-msm/platsmp.c return pen_release != -1 ? -ENOSYS : 0; } -Index: linux-stable/arch/arm/mach-omap2/omap-smp.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-omap2/omap-smp.c -+++ linux-stable/arch/arm/mach-omap2/omap-smp.c -@@ -42,7 +42,7 @@ +--- a/arch/arm/mach-omap2/omap-smp.c ++++ b/arch/arm/mach-omap2/omap-smp.c +@@ -45,7 +45,7 @@ u16 pm44xx_errata; /* SCU base address */ static void __iomem *scu_base; @@ -133,7 +128,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-smp.c void __iomem *omap4_get_scu_base(void) { -@@ -73,8 +73,8 @@ void __cpuinit platform_secondary_init(u +@@ -76,8 +76,8 @@ static void __cpuinit omap4_secondary_in /* * Synchronise with the boot thread. */ @@ -143,8 +138,8 @@ Index: linux-stable/arch/arm/mach-omap2/omap-smp.c + raw_spin_unlock(&boot_lock); } - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -87,7 +87,7 @@ int __cpuinit boot_secondary(unsigned in + static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -90,7 +90,7 @@ static int __cpuinit omap4_boot_secondar * Set synchronisation state between this boot processor * and the secondary one */ @@ -153,7 +148,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-smp.c /* * Update the AuxCoreBoot0 with boot state for secondary core. -@@ -131,7 +131,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -163,7 +163,7 @@ static int __cpuinit omap4_boot_secondar * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ @@ -162,20 +157,18 @@ Index: linux-stable/arch/arm/mach-omap2/omap-smp.c return 0; } -Index: linux-stable/arch/arm/mach-ux500/platsmp.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-ux500/platsmp.c -+++ linux-stable/arch/arm/mach-ux500/platsmp.c -@@ -56,7 +56,7 @@ static void __iomem *scu_base_addr(void) - return NULL; - } +--- a/arch/arm/mach-spear13xx/platsmp.c ++++ b/arch/arm/mach-spear13xx/platsmp.c +@@ -21,7 +21,7 @@ + #include + #include -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); - void __cpuinit platform_secondary_init(unsigned int cpu) - { -@@ -76,8 +76,8 @@ void __cpuinit platform_secondary_init(u + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); + +@@ -44,8 +44,8 @@ static void __cpuinit spear13xx_secondar /* * Synchronise with the boot thread. */ @@ -185,8 +178,8 @@ Index: linux-stable/arch/arm/mach-ux500/platsmp.c + raw_spin_unlock(&boot_lock); } - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -88,7 +88,7 @@ int __cpuinit boot_secondary(unsigned in + static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -56,7 +56,7 @@ static int __cpuinit spear13xx_boot_seco * set synchronisation state between this boot processor * and the secondary one */ @@ -195,7 +188,7 @@ Index: linux-stable/arch/arm/mach-ux500/platsmp.c /* * The secondary processor is waiting to be released from -@@ -109,7 +109,7 @@ int __cpuinit boot_secondary(unsigned in +@@ -83,7 +83,7 @@ static int __cpuinit spear13xx_boot_seco * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ @@ -204,20 +197,18 @@ Index: linux-stable/arch/arm/mach-ux500/platsmp.c return pen_release != -1 ? -ENOSYS : 0; } -Index: linux-stable/arch/arm/plat-versatile/platsmp.c -=================================================================== ---- linux-stable.orig/arch/arm/plat-versatile/platsmp.c -+++ linux-stable/arch/arm/plat-versatile/platsmp.c -@@ -38,7 +38,7 @@ static void __cpuinit write_pen_release( - outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); +--- a/arch/arm/mach-ux500/platsmp.c ++++ b/arch/arm/mach-ux500/platsmp.c +@@ -50,7 +50,7 @@ static void __iomem *scu_base_addr(void) + return NULL; } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); - void __cpuinit platform_secondary_init(unsigned int cpu) + static void __cpuinit ux500_secondary_init(unsigned int cpu) { -@@ -58,8 +58,8 @@ void __cpuinit platform_secondary_init(u +@@ -70,8 +70,8 @@ static void __cpuinit ux500_secondary_in /* * Synchronise with the boot thread. */ @@ -227,17 +218,57 @@ Index: linux-stable/arch/arm/plat-versatile/platsmp.c + raw_spin_unlock(&boot_lock); } - int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in - * Set synchronisation state between this boot processor + static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -82,7 +82,7 @@ static int __cpuinit ux500_boot_secondar + * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* - * This is really belt and braces; we hold unintended secondary -@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned in + * The secondary processor is waiting to be released from +@@ -103,7 +103,7 @@ static int __cpuinit ux500_boot_secondar + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +--- a/arch/arm/plat-versatile/platsmp.c ++++ b/arch/arm/plat-versatile/platsmp.c +@@ -32,7 +32,7 @@ static void __cpuinit write_pen_release( + outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __cpuinit versatile_secondary_init(unsigned int cpu) + { +@@ -52,8 +52,8 @@ void __cpuinit versatile_secondary_init( + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -64,7 +64,7 @@ int __cpuinit versatile_boot_secondary(u + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -94,7 +94,7 @@ int __cpuinit versatile_boot_secondary(u * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ diff --git a/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch index 93e46b88f..344b38102 100644 --- a/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch +++ b/debian/patches/features/all/rt/arm-disable-highmem-on-rt.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/arm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/arch/arm/Kconfig -=================================================================== ---- linux-stable.orig/arch/arm/Kconfig -+++ linux-stable/arch/arm/Kconfig -@@ -1747,7 +1747,7 @@ config HAVE_ARCH_PFN_VALID +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1752,7 +1752,7 @@ config HAVE_ARCH_PFN_VALID config HIGHMEM bool "High Memory Support" diff --git a/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch new file mode 100644 index 000000000..e803c178a --- /dev/null +++ b/debian/patches/features/all/rt/arm-enable-highmem-for-rt.patch @@ -0,0 +1,140 @@ +Subject: arm-enable-highmem-for-rt.patch +From: Thomas Gleixner +Date: Wed, 13 Feb 2013 11:03:11 +0100 + +Signed-off-by: Thomas Gleixner +--- + arch/arm/Kconfig | 2 - + arch/arm/include/asm/switch_to.h | 9 ++++++++ + arch/arm/mm/highmem.c | 41 +++++++++++++++++++++++++++++++++++++-- + include/linux/highmem.h | 1 + 4 files changed, 50 insertions(+), 3 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1752,7 +1752,7 @@ config HAVE_ARCH_PFN_VALID + + config HIGHMEM + bool "High Memory Support" +- depends on MMU && !PREEMPT_RT_FULL ++ depends on MMU + help + The address space of ARM processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address +--- a/arch/arm/include/asm/switch_to.h ++++ b/arch/arm/include/asm/switch_to.h +@@ -3,6 +3,14 @@ + + #include + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ ++ + /* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'. schedule() itself +@@ -12,6 +20,7 @@ extern struct task_struct *__switch_to(s + + #define switch_to(prev,next,last) \ + do { \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +--- a/arch/arm/mm/highmem.c ++++ b/arch/arm/mm/highmem.c +@@ -38,6 +38,7 @@ EXPORT_SYMBOL(kunmap); + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; +@@ -76,7 +77,10 @@ void *kmap_atomic(struct page *page) + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_top_pte(vaddr, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -93,6 +97,9 @@ void __kunmap_atomic(void *kvaddr) + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + #else +@@ -110,6 +117,7 @@ EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + +@@ -121,7 +129,10 @@ void *kmap_atomic_pfn(unsigned long pfn) + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_top_pte(vaddr))); + #endif +- set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_top_pte(vaddr, pte); + + return (void *)vaddr; + } +@@ -135,3 +146,29 @@ struct page *kmap_atomic_to_page(const v + + return pte_page(get_top_pte(vaddr)); + } ++ ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_top_pte(__fix_to_virt(FIX_KMAP_BEGIN + idx), ++ next_p->kmap_pte[i]); ++ } ++} ++#endif +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include + diff --git a/debian/patches/features/all/rt/arm-mark-pmu-interupt-no-thread.patch b/debian/patches/features/all/rt/arm-mark-pmu-interupt-no-thread.patch index 65a9900f0..6f97ee0f8 100644 --- a/debian/patches/features/all/rt/arm-mark-pmu-interupt-no-thread.patch +++ b/debian/patches/features/all/rt/arm-mark-pmu-interupt-no-thread.patch @@ -2,24 +2,22 @@ Subject: arm: Mark pmu interupt IRQF_NO_THREAD From: Thomas Gleixner Date: Wed, 16 Mar 2011 14:45:31 +0100 -PMU interrupt must not be threaded. Remove IRQF_DISABLED while at it -as we run all handlers with interrupts disabled anyway. +PMU interrupts must not be threaded. Signed-off-by: Thomas Gleixner --- - arch/arm/kernel/perf_event.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + arch/arm/kernel/perf_event_cpu.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/arch/arm/kernel/perf_event.c -=================================================================== ---- linux-stable.orig/arch/arm/kernel/perf_event.c -+++ linux-stable/arch/arm/kernel/perf_event.c -@@ -430,7 +430,7 @@ armpmu_reserve_hardware(struct arm_pmu * +--- a/arch/arm/kernel/perf_event_cpu.c ++++ b/arch/arm/kernel/perf_event_cpu.c +@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct ar + continue; } - err = request_irq(irq, handle_irq, -- IRQF_DISABLED | IRQF_NOBALANCING, -+ IRQF_NOBALANCING | IRQF_NO_THREAD, - "arm-pmu", armpmu); +- err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", ++ err = request_irq(irq, handler, ++ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", + cpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", diff --git a/debian/patches/features/all/rt/arm-omap-make-wakeupgen_lock-raw.patch b/debian/patches/features/all/rt/arm-omap-make-wakeupgen_lock-raw.patch index 7faf47b22..74a258ae4 100644 --- a/debian/patches/features/all/rt/arm-omap-make-wakeupgen_lock-raw.patch +++ b/debian/patches/features/all/rt/arm-omap-make-wakeupgen_lock-raw.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/arm/mach-omap2/omap-wakeupgen.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) -Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c -=================================================================== ---- linux-stable.orig/arch/arm/mach-omap2/omap-wakeupgen.c -+++ linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c -@@ -45,7 +45,7 @@ +--- a/arch/arm/mach-omap2/omap-wakeupgen.c ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c +@@ -46,7 +46,7 @@ static void __iomem *wakeupgen_base; static void __iomem *sar_base; @@ -20,7 +18,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int max_irqs = MAX_IRQS; -@@ -133,9 +133,9 @@ static void wakeupgen_mask(struct irq_da +@@ -134,9 +134,9 @@ static void wakeupgen_mask(struct irq_da { unsigned long flags; @@ -32,7 +30,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c } /* -@@ -145,9 +145,9 @@ static void wakeupgen_unmask(struct irq_ +@@ -146,9 +146,9 @@ static void wakeupgen_unmask(struct irq_ { unsigned long flags; @@ -44,7 +42,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c } #ifdef CONFIG_HOTPLUG_CPU -@@ -188,7 +188,7 @@ static void wakeupgen_irqmask_all(unsign +@@ -189,7 +189,7 @@ static void wakeupgen_irqmask_all(unsign { unsigned long flags; @@ -53,7 +51,7 @@ Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c if (set) { _wakeupgen_save_masks(cpu); _wakeupgen_set_all(cpu, WKG_MASK_ALL); -@@ -196,7 +196,7 @@ static void wakeupgen_irqmask_all(unsign +@@ -197,7 +197,7 @@ static void wakeupgen_irqmask_all(unsign _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); _wakeupgen_restore_masks(cpu); } diff --git a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch index 669ea6c18..d1e990bd2 100644 --- a/debian/patches/features/all/rt/arm-preempt-lazy-support.patch +++ b/debian/patches/features/all/rt/arm-preempt-lazy-support.patch @@ -7,26 +7,22 @@ Signed-off-by: Thomas Gleixner arch/arm/Kconfig | 1 + arch/arm/include/asm/thread_info.h | 3 +++ arch/arm/kernel/asm-offsets.c | 1 + - arch/arm/kernel/entry-armv.S | 8 ++++++++ + arch/arm/kernel/entry-armv.S | 13 +++++++++++-- arch/arm/kernel/signal.c | 3 ++- - 5 files changed, 15 insertions(+), 1 deletion(-) + 5 files changed, 18 insertions(+), 3 deletions(-) -Index: linux-stable/arch/arm/Kconfig -=================================================================== ---- linux-stable.orig/arch/arm/Kconfig -+++ linux-stable/arch/arm/Kconfig -@@ -50,6 +50,7 @@ config ARM - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER - select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -47,6 +47,7 @@ config ARM + select HAVE_MEMBLOCK + select HAVE_OPROFILE if (HAVE_PERF_EVENTS) + select HAVE_PERF_EVENTS + select HAVE_PREEMPT_LAZY - help - The ARM series is a line of low-power-consumption RISC chip designs - licensed by ARM Ltd and targeted at embedded applications and -Index: linux-stable/arch/arm/include/asm/thread_info.h -=================================================================== ---- linux-stable.orig/arch/arm/include/asm/thread_info.h -+++ linux-stable/arch/arm/include/asm/thread_info.h + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_UID16 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h @@ -50,6 +50,7 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ @@ -35,26 +31,24 @@ Index: linux-stable/arch/arm/include/asm/thread_info.h mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ -@@ -146,6 +147,7 @@ extern int vfp_restore_user_hwstate(stru +@@ -148,6 +149,7 @@ extern int vfp_restore_user_hwstate(stru #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_NEED_RESCHED_LAZY 3 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 - #define TIF_POLLING_NRFLAG 16 -@@ -158,6 +160,7 @@ extern int vfp_restore_user_hwstate(stru + #define TIF_SYSCALL_TRACEPOINT 10 +@@ -160,6 +162,7 @@ extern int vfp_restore_user_hwstate(stru #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) -Index: linux-stable/arch/arm/kernel/asm-offsets.c -=================================================================== ---- linux-stable.orig/arch/arm/kernel/asm-offsets.c -+++ linux-stable/arch/arm/kernel/asm-offsets.c + #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +--- a/arch/arm/kernel/asm-offsets.c ++++ b/arch/arm/kernel/asm-offsets.c @@ -50,6 +50,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); @@ -63,24 +57,30 @@ Index: linux-stable/arch/arm/kernel/asm-offsets.c DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); -Index: linux-stable/arch/arm/kernel/entry-armv.S -=================================================================== ---- linux-stable.orig/arch/arm/kernel/entry-armv.S -+++ linux-stable/arch/arm/kernel/entry-armv.S -@@ -221,6 +221,12 @@ __irq_svc: - movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED - blne svc_preempt -+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -216,11 +216,18 @@ __irq_svc: + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 -+ movne r0, #0 @ force flags to 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY -+ blne svc_preempt + blne svc_preempt ++1: #endif #ifdef CONFIG_TRACE_IRQFLAGS -@@ -240,6 +246,8 @@ svc_preempt: +@@ -240,6 +247,8 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED @@ -89,11 +89,9 @@ Index: linux-stable/arch/arm/kernel/entry-armv.S moveq pc, r8 @ go again b 1b #endif -Index: linux-stable/arch/arm/kernel/signal.c -=================================================================== ---- linux-stable.orig/arch/arm/kernel/signal.c -+++ linux-stable/arch/arm/kernel/signal.c -@@ -639,7 +639,8 @@ asmlinkage int +--- a/arch/arm/kernel/signal.c ++++ b/arch/arm/kernel/signal.c +@@ -638,7 +638,8 @@ asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { do { diff --git a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch index 6436e2c39..6088cd83e 100644 --- a/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch +++ b/debian/patches/features/all/rt/ata-disable-interrupts-if-non-rt.patch @@ -12,10 +12,8 @@ Signed-off-by: Thomas Gleixner drivers/ata/libata-sff.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) -Index: linux-stable/drivers/ata/libata-sff.c -=================================================================== ---- linux-stable.orig/drivers/ata/libata-sff.c -+++ linux-stable/drivers/ata/libata-sff.c +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str unsigned long flags; unsigned int consumed; diff --git a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch index 082a86de2..910d5ac8d 100644 --- a/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch +++ b/debian/patches/features/all/rt/block-shorten-interrupt-disabled-regions.patch @@ -43,41 +43,21 @@ Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de Signed-off-by: Thomas Gleixner --- - block/blk-core.c | 20 ++++++++------------ - 1 file changed, 8 insertions(+), 12 deletions(-) + block/blk-core.c | 12 ++---------- + 1 file changed, 2 insertions(+), 10 deletions(-) -Index: linux-stable/block/blk-core.c -=================================================================== ---- linux-stable.orig/block/blk-core.c -+++ linux-stable/block/blk-core.c -@@ -304,7 +304,11 @@ void __blk_run_queue(struct request_queu - { - if (unlikely(blk_queue_stopped(q))) - return; -- -+ /* -+ * q->request_fn() can drop q->queue_lock and reenable -+ * interrupts, but must return with q->queue_lock held and -+ * interrupts disabled. -+ */ - q->request_fn(q); - } - EXPORT_SYMBOL(__blk_run_queue); -@@ -2902,11 +2906,11 @@ static void queue_unplugged(struct reque - * this lock). - */ - if (from_schedule) { -- spin_unlock(q->queue_lock); -+ spin_unlock_irq(q->queue_lock); +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -2929,7 +2929,7 @@ static void queue_unplugged(struct reque blk_run_queue_async(q); - } else { + else __blk_run_queue(q); -- spin_unlock(q->queue_lock); -+ spin_unlock_irq(q->queue_lock); - } - +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); } -@@ -2956,7 +2960,6 @@ EXPORT_SYMBOL(blk_check_plugged); + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -2977,7 +2977,6 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; @@ -85,7 +65,7 @@ Index: linux-stable/block/blk-core.c struct request *rq; LIST_HEAD(list); unsigned int depth; -@@ -2977,11 +2980,6 @@ void blk_flush_plug_list(struct blk_plug +@@ -2998,11 +2997,6 @@ void blk_flush_plug_list(struct blk_plug q = NULL; depth = 0; @@ -97,7 +77,7 @@ Index: linux-stable/block/blk-core.c while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); -@@ -2994,7 +2992,7 @@ void blk_flush_plug_list(struct blk_plug +@@ -3015,7 +3009,7 @@ void blk_flush_plug_list(struct blk_plug queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; @@ -106,7 +86,7 @@ Index: linux-stable/block/blk-core.c } /* -@@ -3021,8 +3019,6 @@ void blk_flush_plug_list(struct blk_plug +@@ -3042,8 +3036,6 @@ void blk_flush_plug_list(struct blk_plug */ if (q) queue_unplugged(q, depth, from_schedule); diff --git a/debian/patches/features/all/rt/block-use-cpu-chill.patch b/debian/patches/features/all/rt/block-use-cpu-chill.patch new file mode 100644 index 000000000..9237f690c --- /dev/null +++ b/debian/patches/features/all/rt/block-use-cpu-chill.patch @@ -0,0 +1,45 @@ +Subject: block: Use cpu_chill() for retry loops +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 18:28:26 +0100 + +Retry loops on RT might loop forever when the modifying side was +preempted. Steven also observed a live lock when there was a +concurrent priority boosting going on. + +Use cpu_chill() instead of cpu_relax() to let the system +make progress. + +Signed-off-by: Thomas Gleixner +Cc: stable-rt@vger.kernel.org +--- + block/blk-ioc.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/block/blk-ioc.c ++++ b/block/blk-ioc.c +@@ -8,6 +8,7 @@ + #include + #include /* for max_pfn/max_low_pfn */ + #include ++#include + + #include "blk.h" + +@@ -110,7 +111,7 @@ static void ioc_release_fn(struct work_s + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -188,7 +189,7 @@ retry: + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } diff --git a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch index 35a80e1ce..5f7a1e350 100644 --- a/debian/patches/features/all/rt/bug-rt-dependend-variants.patch +++ b/debian/patches/features/all/rt/bug-rt-dependend-variants.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner include/asm-generic/bug.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) -Index: linux-stable/include/asm-generic/bug.h -=================================================================== ---- linux-stable.orig/include/asm-generic/bug.h -+++ linux-stable/include/asm-generic/bug.h +--- a/include/asm-generic/bug.h ++++ b/include/asm-generic/bug.h @@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha # define WARN_ON_SMP(x) ({0;}) #endif diff --git a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch index 98561a41b..a4502c70e 100644 --- a/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch +++ b/debian/patches/features/all/rt/clocksource-tclib-allow-higher-clockrates.patch @@ -13,10 +13,8 @@ Signed-off-by: Thomas Gleixner drivers/misc/Kconfig | 11 +++++++-- 2 files changed, 35 insertions(+), 20 deletions(-) -Index: linux-stable/drivers/clocksource/tcb_clksrc.c -=================================================================== ---- linux-stable.orig/drivers/clocksource/tcb_clksrc.c -+++ linux-stable/drivers/clocksource/tcb_clksrc.c +--- a/drivers/clocksource/tcb_clksrc.c ++++ b/drivers/clocksource/tcb_clksrc.c @@ -23,8 +23,7 @@ * this 32 bit free-running counter. the second channel is not used. * @@ -73,7 +71,7 @@ Index: linux-stable/drivers/clocksource/tcb_clksrc.c __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); -@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt = +@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt = .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, @@ -132,11 +130,9 @@ Index: linux-stable/drivers/clocksource/tcb_clksrc.c return 0; } arch_initcall(tcb_clksrc_init); -Index: linux-stable/drivers/misc/Kconfig -=================================================================== ---- linux-stable.orig/drivers/misc/Kconfig -+++ linux-stable/drivers/misc/Kconfig -@@ -88,8 +88,7 @@ config ATMEL_TCB_CLKSRC +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -78,8 +78,7 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel @@ -146,7 +142,7 @@ Index: linux-stable/drivers/misc/Kconfig config ATMEL_TCB_CLKSRC_BLOCK int -@@ -103,6 +102,14 @@ config ATMEL_TCB_CLKSRC_BLOCK +@@ -93,6 +92,14 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. @@ -160,4 +156,4 @@ Index: linux-stable/drivers/misc/Kconfig + config IBM_ASM tristate "Device driver for IBM RSA service processor" - depends on X86 && PCI && INPUT && EXPERIMENTAL + depends on X86 && PCI && INPUT diff --git a/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch new file mode 100644 index 000000000..f7d9d0135 --- /dev/null +++ b/debian/patches/features/all/rt/completion-use-simple-wait-queues.patch @@ -0,0 +1,155 @@ +Subject: completion: Use simple wait queues +From: Thomas Gleixner +Date: Fri, 11 Jan 2013 11:23:51 +0100 + +Completions have no long lasting callbacks and therefor do not need +the complex waitqueue variant. Use simple waitqueues which reduces the +contention on the waitqueue lock. + +Signed-off-by: Thomas Gleixner +--- + include/linux/completion.h | 8 ++++---- + include/linux/uprobes.h | 1 + + kernel/sched/core.c | 34 +++++++++++++++++----------------- + 3 files changed, 22 insertions(+), 21 deletions(-) + +--- a/include/linux/completion.h ++++ b/include/linux/completion.h +@@ -8,7 +8,7 @@ + * See kernel/sched.c for details. + */ + +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +24,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +73,7 @@ struct completion { + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_head(&x->wait); + } + + extern void wait_for_completion(struct completion *); +--- a/include/linux/uprobes.h ++++ b/include/linux/uprobes.h +@@ -26,6 +26,7 @@ + + #include + #include ++#include + + struct vm_area_struct; + struct mm_struct; +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3487,10 +3487,10 @@ void complete(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done++; +- __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 1); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -3507,10 +3507,10 @@ void complete_all(struct completion *x) + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; +- __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ __swait_wake_locked(&x->wait, TASK_NORMAL, 0); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -3518,20 +3518,20 @@ static inline long __sched + do_wait_for_common(struct completion *x, long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DEFINE_SWAITER(wait); + +- __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ swait_prepare_locked(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = schedule_timeout(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ swait_finish_locked(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -3544,9 +3544,9 @@ wait_for_common(struct completion *x, lo + { + might_sleep(); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + return timeout; + } + +@@ -3677,12 +3677,12 @@ bool try_wait_for_completion(struct comp + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -3700,10 +3700,10 @@ bool completion_done(struct completion * + unsigned long flags; + int ret = 1; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(completion_done); diff --git a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch index e99b6bad1..2dff484c8 100644 --- a/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch +++ b/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -2689,7 +2689,7 @@ extern int _cond_resched(void); +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2719,7 +2719,7 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); diff --git a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch index f3905a2dd..dd146ee91 100644 --- a/debian/patches/features/all/rt/cond-resched-softirq-rt.patch +++ b/debian/patches/features/all/rt/cond-resched-softirq-rt.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 2 ++ 2 files changed, 6 insertions(+) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -2700,12 +2700,16 @@ extern int __cond_resched_lock(spinlock_ +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2730,12 +2730,16 @@ extern int __cond_resched_lock(spinlock_ __cond_resched_lock(lock); \ }) @@ -29,11 +27,9 @@ Index: linux-stable/include/linux/sched.h /* * Does a critical section need to be broken due to another -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -4832,6 +4832,7 @@ int __cond_resched_lock(spinlock_t *lock +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4366,6 +4366,7 @@ int __cond_resched_lock(spinlock_t *lock } EXPORT_SYMBOL(__cond_resched_lock); @@ -41,7 +37,7 @@ Index: linux-stable/kernel/sched/core.c int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); -@@ -4845,6 +4846,7 @@ int __sched __cond_resched_softirq(void) +@@ -4379,6 +4380,7 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); diff --git a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch index 297791e6f..80ed6929c 100644 --- a/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch +++ b/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch @@ -22,10 +22,8 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c +--- a/kernel/cpu.c ++++ b/kernel/cpu.c @@ -51,7 +51,12 @@ static int cpu_hotplug_disabled; static struct { @@ -85,12 +83,16 @@ Index: linux-stable/kernel/cpu.c } EXPORT_SYMBOL_GPL(get_online_cpus); -@@ -177,10 +194,10 @@ void put_online_cpus(void) +@@ -177,14 +194,14 @@ void put_online_cpus(void) { if (cpu_hotplug.active_writer == current) return; - mutex_lock(&cpu_hotplug.lock); + + hotplug_lock(); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); - mutex_unlock(&cpu_hotplug.lock); @@ -98,7 +100,7 @@ Index: linux-stable/kernel/cpu.c } EXPORT_SYMBOL_GPL(put_online_cpus); -@@ -212,11 +229,11 @@ static void cpu_hotplug_begin(void) +@@ -216,11 +233,11 @@ static void cpu_hotplug_begin(void) cpu_hotplug.active_writer = current; for (;;) { @@ -112,7 +114,7 @@ Index: linux-stable/kernel/cpu.c schedule(); } } -@@ -224,7 +241,7 @@ static void cpu_hotplug_begin(void) +@@ -228,7 +245,7 @@ static void cpu_hotplug_begin(void) static void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; diff --git a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch index de4f8a516..7a6496a4a 100644 --- a/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch +++ b/debian/patches/features/all/rt/cpu-rt-rework-cpu-down.patch @@ -50,15 +50,13 @@ Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 7 + - kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++--------- + kernel/cpu.c | 241 +++++++++++++++++++++++++++++++++++++++++--------- kernel/sched/core.c | 82 ++++++++++++++++- - 3 files changed, 285 insertions(+), 44 deletions(-) + 3 files changed, 285 insertions(+), 45 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1952,6 +1952,10 @@ extern void do_set_cpus_allowed(struct t +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1977,6 +1977,10 @@ extern void do_set_cpus_allowed(struct t extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -69,7 +67,7 @@ Index: linux-stable/include/linux/sched.h #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -@@ -1964,6 +1968,9 @@ static inline int set_cpus_allowed_ptr(s +@@ -1989,6 +1993,9 @@ static inline int set_cpus_allowed_ptr(s return -EINVAL; return 0; } @@ -79,10 +77,8 @@ Index: linux-stable/include/linux/sched.h #endif #ifdef CONFIG_NO_HZ -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c +--- a/kernel/cpu.c ++++ b/kernel/cpu.c @@ -51,12 +51,7 @@ static int cpu_hotplug_disabled; static struct { @@ -395,20 +391,24 @@ Index: linux-stable/kernel/cpu.c } EXPORT_SYMBOL_GPL(get_online_cpus); -@@ -194,10 +347,10 @@ void put_online_cpus(void) - { +@@ -195,14 +348,13 @@ void put_online_cpus(void) if (cpu_hotplug.active_writer == current) return; + - hotplug_lock(); + mutex_lock(&cpu_hotplug.lock); + if (WARN_ON(!cpu_hotplug.refcount)) + cpu_hotplug.refcount++; /* try to fix things up */ + if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); - hotplug_unlock(); +- + mutex_unlock(&cpu_hotplug.lock); - } EXPORT_SYMBOL_GPL(put_online_cpus); -@@ -229,11 +382,11 @@ static void cpu_hotplug_begin(void) + +@@ -233,11 +385,11 @@ static void cpu_hotplug_begin(void) cpu_hotplug.active_writer = current; for (;;) { @@ -422,7 +422,7 @@ Index: linux-stable/kernel/cpu.c schedule(); } } -@@ -241,7 +394,7 @@ static void cpu_hotplug_begin(void) +@@ -245,7 +397,7 @@ static void cpu_hotplug_begin(void) static void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; @@ -431,9 +431,9 @@ Index: linux-stable/kernel/cpu.c } #else /* #if CONFIG_HOTPLUG_CPU */ -@@ -416,6 +569,9 @@ static int __ref _cpu_down(unsigned int - goto out_release; +@@ -421,6 +573,9 @@ static int __ref _cpu_down(unsigned int } + smpboot_park_threads(cpu); + /* Notifiers are done. Don't let any more tasks pin this CPU. */ + cpu_unplug_sync(cpu); @@ -441,11 +441,9 @@ Index: linux-stable/kernel/cpu.c err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3418,7 +3418,7 @@ void migrate_disable(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2896,7 +2896,7 @@ void migrate_disable(void) { struct task_struct *p = current; @@ -454,7 +452,7 @@ Index: linux-stable/kernel/sched/core.c #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif -@@ -3449,7 +3449,7 @@ void migrate_enable(void) +@@ -2927,7 +2927,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; @@ -463,7 +461,7 @@ Index: linux-stable/kernel/sched/core.c #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif -@@ -5341,6 +5341,84 @@ void do_set_cpus_allowed(struct task_str +@@ -4872,6 +4872,84 @@ void do_set_cpus_allowed(struct task_str cpumask_copy(&p->cpus_allowed, new_mask); } diff --git a/debian/patches/features/all/rt/cpu-rt-variants.patch b/debian/patches/features/all/rt/cpu-rt-variants.patch index e6eb3b888..a344edacd 100644 --- a/debian/patches/features/all/rt/cpu-rt-variants.patch +++ b/debian/patches/features/all/rt/cpu-rt-variants.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/smp.h | 8 ++++++++ 1 file changed, 8 insertions(+) -Index: linux-stable/include/linux/smp.h -=================================================================== ---- linux-stable.orig/include/linux/smp.h -+++ linux-stable/include/linux/smp.h +--- a/include/linux/smp.h ++++ b/include/linux/smp.h @@ -218,6 +218,14 @@ static inline void kick_all_cpus_sync(vo #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() diff --git a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch index 9c819c533..c8728a67a 100644 --- a/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch +++ b/debian/patches/features/all/rt/cpumask-disable-offstack-on-rt.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner lib/Kconfig | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/arch/x86/Kconfig -=================================================================== ---- linux-stable.orig/arch/x86/Kconfig -+++ linux-stable/arch/x86/Kconfig -@@ -757,7 +757,7 @@ config IOMMU_HELPER +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -775,7 +775,7 @@ config IOMMU_HELPER config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL @@ -24,11 +22,9 @@ Index: linux-stable/arch/x86/Kconfig ---help--- Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. -Index: linux-stable/lib/Kconfig -=================================================================== ---- linux-stable.orig/lib/Kconfig -+++ linux-stable/lib/Kconfig -@@ -312,6 +312,7 @@ config CHECK_SIGNATURE +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -315,6 +315,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS diff --git a/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch b/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch deleted file mode 100644 index d54a4a11b..000000000 --- a/debian/patches/features/all/rt/crypto-make-core-static-and-init-scru-early.patch +++ /dev/null @@ -1,51 +0,0 @@ -Subject: crypto: Make core builtin and init srcu early -From: Thomas Gleixner -Date: Fri, 12 Oct 2012 11:09:19 +0100 - -When the scru notifier is not initialized before the first user we -crash. - -[ 0.281119] BUG: unable to handle kernel NULL pointer dereference at (null) -[ 0.281124] IP: [] __srcu_read_lock+0x2f/0x79 - -Make the core code built-in for now and enfore early init. - -FIXME: Create a static initializer for this. - -Signed-off-by: Thomas Gleixner ---- - crypto/Kconfig | 2 +- - crypto/api.c | 7 +++++++ - 2 files changed, 8 insertions(+), 1 deletion(-) - -Index: linux-stable/crypto/Kconfig -=================================================================== ---- linux-stable.orig/crypto/Kconfig -+++ linux-stable/crypto/Kconfig -@@ -13,7 +13,7 @@ source "crypto/async_tx/Kconfig" - # Cryptographic API Configuration - # - menuconfig CRYPTO -- tristate "Cryptographic API" -+ bool "Cryptographic API" - help - This option provides the core Cryptographic API. - -Index: linux-stable/crypto/api.c -=================================================================== ---- linux-stable.orig/crypto/api.c -+++ linux-stable/crypto/api.c -@@ -34,6 +34,13 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); - struct srcu_notifier_head crypto_chain; - EXPORT_SYMBOL_GPL(crypto_chain); - -+static int __init crypto_api_init(void) -+{ -+ srcu_init_notifier_head(&crypto_chain); -+ return 0; -+} -+core_initcall(crypto_api_init); -+ - static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) - { - atomic_inc(&alg->cra_refcnt); diff --git a/debian/patches/features/all/rt/debugobjects-rt.patch b/debian/patches/features/all/rt/debugobjects-rt.patch index bc3cba66c..3e58f6f56 100644 --- a/debian/patches/features/all/rt/debugobjects-rt.patch +++ b/debian/patches/features/all/rt/debugobjects-rt.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner lib/debugobjects.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) -Index: linux-stable/lib/debugobjects.c -=================================================================== ---- linux-stable.orig/lib/debugobjects.c -+++ linux-stable/lib/debugobjects.c +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c @@ -309,7 +309,10 @@ __debug_object_init(void *addr, struct d struct debug_obj *obj; unsigned long flags; diff --git a/debian/patches/features/all/rt/dm-make-rt-aware.patch b/debian/patches/features/all/rt/dm-make-rt-aware.patch index 2e11eb5e9..e71d4851c 100644 --- a/debian/patches/features/all/rt/dm-make-rt-aware.patch +++ b/debian/patches/features/all/rt/dm-make-rt-aware.patch @@ -13,11 +13,9 @@ Signed-off-by: Thomas Gleixner drivers/md/dm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/drivers/md/dm.c -=================================================================== ---- linux-stable.orig/drivers/md/dm.c -+++ linux-stable/drivers/md/dm.c -@@ -1692,14 +1692,14 @@ static void dm_request_fn(struct request +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1677,14 +1677,14 @@ static void dm_request_fn(struct request if (map_request(ti, clone, md)) goto requeued; diff --git a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch index e17452f98..32dc2b1a3 100644 --- a/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch +++ b/debian/patches/features/all/rt/drivers-net-8139-disable-irq-nosync.patch @@ -12,10 +12,8 @@ Signed-off-by: Thomas Gleixner drivers/net/ethernet/realtek/8139too.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/drivers/net/ethernet/realtek/8139too.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/realtek/8139too.c -+++ linux-stable/drivers/net/ethernet/realtek/8139too.c +--- a/drivers/net/ethernet/realtek/8139too.c ++++ b/drivers/net/ethernet/realtek/8139too.c @@ -2216,7 +2216,7 @@ static void rtl8139_poll_controller(stru struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; diff --git a/debian/patches/features/all/rt/drivers-net-at91-make-mdio-protection-rt-safe.patch b/debian/patches/features/all/rt/drivers-net-at91-make-mdio-protection-rt-safe.patch deleted file mode 100644 index 33e850efd..000000000 --- a/debian/patches/features/all/rt/drivers-net-at91-make-mdio-protection-rt-safe.patch +++ /dev/null @@ -1,54 +0,0 @@ -From: Thomas Gleixner -Date: Tue, 17 Nov 2009 12:02:43 +0100 -Subject: drivers: net: at91_ether: Make mdio protection -rt safe - -Neither the phy interrupt nor the timer callback which updates the -link status in absense of a phy interrupt are taking lp->lock which -serializes the MDIO access. This works on mainline as at91 is an UP -machine. On preempt-rt the timer callback can run even in the -spin_lock_irq(&lp->lock) protected code pathes because spin_lock_irq -is neither disabling interrupts nor disabling preemption. - -Fix this by adding proper locking to at91ether_phy_interrupt() and -at91_check_ether() which serializes the access on -rt. - -Signed-off-by: Thomas Gleixner - ---- - drivers/net/ethernet/cadence/at91_ether.c | 5 +++++ - 1 file changed, 5 insertions(+) - -Index: linux-stable/drivers/net/ethernet/cadence/at91_ether.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/cadence/at91_ether.c -+++ linux-stable/drivers/net/ethernet/cadence/at91_ether.c -@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru - struct net_device *dev = (struct net_device *) dev_id; - struct at91_private *lp = netdev_priv(dev); - unsigned int phy; -+ unsigned long flags; - -+ spin_lock_irqsave(&lp->lock, flags); - /* - * This hander is triggered on both edges, but the PHY chips expect - * level-triggering. We therefore have to check if the PHY actually has -@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru - - done: - disable_mdi(lp); -+ spin_unlock_irqrestore(&lp->lock, flags); - - return IRQ_HANDLED; - } -@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne - struct net_device *dev = (struct net_device *) dev_id; - struct at91_private *lp = netdev_priv(dev); - -+ spin_lock_irq(&lp->lock); - enable_mdi(lp); - update_linkspeed(dev, 1); - disable_mdi(lp); -+ spin_unlock_irq(&lp->lock); - - mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); - } diff --git a/debian/patches/features/all/rt/drivers-net-ehea-mark-rx-irq-no-thread.patch b/debian/patches/features/all/rt/drivers-net-ehea-mark-rx-irq-no-thread.patch deleted file mode 100644 index 942a767c1..000000000 --- a/debian/patches/features/all/rt/drivers-net-ehea-mark-rx-irq-no-thread.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Darren Hart -Date: Tue, 18 May 2010 14:33:07 -0700 -Subject: drivers: net: ehea: Make rx irq handler non-threaded (IRQF_NO_THREAD) - -The underlying hardware is edge triggered but presented by XICS as level -triggered. The edge triggered interrupts are not reissued after masking. This -is not a problem in mainline which does not mask the interrupt (relying on the -EOI mechanism instead). The threaded interrupts in PREEMPT_RT do mask the -interrupt, and can lose interrupts that occurred while masked, resulting in a -hung ethernet interface. - -The receive handler simply calls napi_schedule(), as such, there is no -significant additional overhead in making this non-threaded, since we either -wakeup the threaded irq handler to call napi_schedule(), or just call -napi_schedule() directly to wakeup the softirqs. As the receive handler is -lockless, there is no need to convert any of the ehea spinlock_t's to -raw_spinlock_t's. - -Without this patch, a simple scp file copy loop would fail quickly (usually -seconds). We have over two hours of sustained scp activity with the patch -applied. - -Credit goes to Will Schmidt for lots of instrumentation and tracing which -clarified the scenario and to Thomas Gleixner for the incredibly simple -solution. - -Signed-off-by: Darren Hart -Acked-by: Will Schmidt -Cc: Jan-Bernd Themann -Cc: Nivedita Singhvi -Cc: Brian King -Cc: Michael Ellerman -Cc: Doug Maxey -LKML-Reference: <4BF30793.5070300@us.ibm.com> -Signed-off-by: Thomas Gleixner - ---- - drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/ibm/ehea/ehea_main.c -+++ linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c -@@ -1308,7 +1308,7 @@ static int ehea_reg_interrupts(struct ne - "%s-queue%d", dev->name, i); - ret = ibmebus_request_irq(pr->eq->attr.ist1, - ehea_recv_irq_handler, -- IRQF_DISABLED, pr->int_send_name, -+ IRQF_NO_THREAD, pr->int_send_name, - pr); - if (ret) { - netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", diff --git a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch index e4bfc617b..0d9fc5cf2 100644 --- a/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch +++ b/debian/patches/features/all/rt/drivers-net-fix-livelock-issues.patch @@ -21,11 +21,9 @@ Signed-off-by: Thomas Gleixner drivers/net/rionet.c | 6 +----- 7 files changed, 9 insertions(+), 31 deletions(-) -Index: linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c -+++ linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c -@@ -2122,11 +2122,7 @@ static netdev_tx_t atl1c_xmit_frame(stru +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +@@ -2171,11 +2171,7 @@ static netdev_tx_t atl1c_xmit_frame(stru } tpd_req = atl1c_cal_tpd_req(skb); @@ -38,10 +36,8 @@ Index: linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ -Index: linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c -+++ linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1803,8 +1803,7 @@ static netdev_tx_t atl1e_xmit_frame(stru return NETDEV_TX_OK; } @@ -52,11 +48,9 @@ Index: linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ -Index: linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/chelsio/cxgb/sge.c -+++ linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c -@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb +--- a/drivers/net/ethernet/chelsio/cxgb/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb/sge.c +@@ -1666,8 +1666,7 @@ static int t1_sge_tx(struct sk_buff *skb struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; @@ -66,10 +60,8 @@ Index: linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c reclaim_completed_tx(sge, q); -Index: linux-stable/drivers/net/ethernet/neterion/s2io.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/neterion/s2io.c -+++ linux-stable/drivers/net/ethernet/neterion/s2io.c +--- a/drivers/net/ethernet/neterion/s2io.c ++++ b/drivers/net/ethernet/neterion/s2io.c @@ -4088,12 +4088,7 @@ static netdev_tx_t s2io_xmit(struct sk_b [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; @@ -84,11 +76,9 @@ Index: linux-stable/drivers/net/ethernet/neterion/s2io.c if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { -Index: linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c -+++ linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c -@@ -2159,10 +2159,8 @@ static int pch_gbe_xmit_frame(struct sk_ +--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +@@ -2114,10 +2114,8 @@ static int pch_gbe_xmit_frame(struct sk_ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; unsigned long flags; @@ -101,10 +91,8 @@ Index: linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); -Index: linux-stable/drivers/net/ethernet/tehuti/tehuti.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/tehuti/tehuti.c -+++ linux-stable/drivers/net/ethernet/tehuti/tehuti.c +--- a/drivers/net/ethernet/tehuti/tehuti.c ++++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1630,13 +1630,8 @@ static netdev_tx_t bdx_tx_transmit(struc unsigned long flags; @@ -121,11 +109,9 @@ Index: linux-stable/drivers/net/ethernet/tehuti/tehuti.c /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ -Index: linux-stable/drivers/net/rionet.c -=================================================================== ---- linux-stable.orig/drivers/net/rionet.c -+++ linux-stable/drivers/net/rionet.c -@@ -178,11 +178,7 @@ static int rionet_start_xmit(struct sk_b +--- a/drivers/net/rionet.c ++++ b/drivers/net/rionet.c +@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b unsigned long flags; int add_num = 1; @@ -137,4 +123,4 @@ Index: linux-stable/drivers/net/rionet.c + spin_lock_irqsave(&rnet->tx_lock, flags); if (is_multicast_ether_addr(eth->h_dest)) - add_num = nact; + add_num = nets[rnet->mport->id].nact; diff --git a/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch index cdb4a5adc..53030894f 100644 --- a/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch +++ b/debian/patches/features/all/rt/drivers-net-gianfar-make-rt-aware.patch @@ -15,11 +15,9 @@ Tested-by: Xianghua Xiao drivers/net/ethernet/freescale/gianfar.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/freescale/gianfar.c -+++ linux-stable/drivers/net/ethernet/freescale/gianfar.c -@@ -1652,7 +1652,7 @@ void stop_gfar(struct net_device *dev) +--- a/drivers/net/ethernet/freescale/gianfar.c ++++ b/drivers/net/ethernet/freescale/gianfar.c +@@ -1663,7 +1663,7 @@ void stop_gfar(struct net_device *dev) /* Lock it down */ @@ -28,7 +26,7 @@ Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c lock_tx_qs(priv); lock_rx_qs(priv); -@@ -1660,7 +1660,7 @@ void stop_gfar(struct net_device *dev) +@@ -1671,7 +1671,7 @@ void stop_gfar(struct net_device *dev) unlock_rx_qs(priv); unlock_tx_qs(priv); @@ -37,7 +35,7 @@ Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { -@@ -2938,7 +2938,7 @@ static void adjust_link(struct net_devic +@@ -2951,7 +2951,7 @@ static void adjust_link(struct net_devic struct phy_device *phydev = priv->phydev; int new_state = 0; @@ -46,7 +44,7 @@ Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c lock_tx_qs(priv); if (phydev->link) { -@@ -3007,7 +3007,7 @@ static void adjust_link(struct net_devic +@@ -3020,7 +3020,7 @@ static void adjust_link(struct net_devic if (new_state && netif_msg_link(priv)) phy_print_status(phydev); unlock_tx_qs(priv); diff --git a/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch b/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch index b83d282b7..78beb2b0c 100644 --- a/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch +++ b/debian/patches/features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner drivers/net/ethernet/dec/tulip/tulip_core.c | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/dec/tulip/tulip_core.c -+++ linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c -@@ -1948,6 +1948,7 @@ static void __devexit tulip_remove_one ( +--- a/drivers/net/ethernet/dec/tulip/tulip_core.c ++++ b/drivers/net/ethernet/dec/tulip/tulip_core.c +@@ -1943,6 +1943,7 @@ static void tulip_remove_one(struct pci_ pci_iounmap(pdev, tp->base_addr); free_netdev (dev); pci_release_regions (pdev); diff --git a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch index 6f8b3e472..4dd389811 100644 --- a/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch +++ b/debian/patches/features/all/rt/drivers-net-vortex-fix-locking-issues.patch @@ -17,10 +17,8 @@ Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar -Index: linux-stable/drivers/net/ethernet/3com/3c59x.c -=================================================================== ---- linux-stable.orig/drivers/net/ethernet/3com/3c59x.c -+++ linux-stable/drivers/net/ethernet/3com/3c59x.c +--- a/drivers/net/ethernet/3com/3c59x.c ++++ b/drivers/net/ethernet/3com/3c59x.c @@ -843,9 +843,9 @@ static void poll_vortex(struct net_devic { struct vortex_private *vp = netdev_priv(dev); @@ -33,7 +31,7 @@ Index: linux-stable/drivers/net/ethernet/3com/3c59x.c } #endif -@@ -1920,12 +1920,12 @@ static void vortex_tx_timeout(struct net +@@ -1919,12 +1919,12 @@ static void vortex_tx_timeout(struct net * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; diff --git a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch index e76754c08..7b85cecb5 100644 --- a/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch +++ b/debian/patches/features/all/rt/drivers-random-reduce-preempt-disabled-region.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner drivers/char/random.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) -Index: linux-stable/drivers/char/random.c -=================================================================== ---- linux-stable.orig/drivers/char/random.c -+++ linux-stable/drivers/char/random.c -@@ -679,9 +679,12 @@ static void add_timer_randomness(struct +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -676,9 +676,12 @@ static void add_timer_randomness(struct preempt_disable(); /* if over the trickle threshold, use only 1 in 4096 samples */ if (input_pool.entropy_count > trickle_thresh && @@ -30,7 +28,7 @@ Index: linux-stable/drivers/char/random.c sample.jiffies = jiffies; sample.cycles = get_cycles(); sample.num = num; -@@ -722,8 +725,6 @@ static void add_timer_randomness(struct +@@ -719,8 +722,6 @@ static void add_timer_randomness(struct credit_entropy_bits(&input_pool, min_t(int, fls(delta>>1), 11)); } diff --git a/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch index a6ddc990a..b8c297eb7 100644 --- a/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch +++ b/debian/patches/features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch @@ -5,45 +5,24 @@ Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded Signed-off-by: Ingo Molnar --- - drivers/tty/serial/8250/8250.c | 2 ++ - drivers/tty/tty_buffer.c | 4 ++++ - 2 files changed, 6 insertions(+) + drivers/tty/tty_buffer.c | 5 +++++ + 1 file changed, 5 insertions(+) -Index: linux-stable/drivers/tty/serial/8250/8250.c -=================================================================== ---- linux-stable.orig/drivers/tty/serial/8250/8250.c -+++ linux-stable/drivers/tty/serial/8250/8250.c -@@ -1549,12 +1549,14 @@ static irqreturn_t serial8250_interrupt( - - l = l->next; - -+#ifndef CONFIG_PREEMPT_RT_FULL - if (l == i->head && pass_counter++ > PASS_LIMIT) { - /* If we hit this, we're dead. */ - printk_ratelimited(KERN_ERR - "serial8250: too much work for irq%d\n", irq); - break; - } -+#endif - } while (l != end); - - spin_unlock(&i->lock); -Index: linux-stable/drivers/tty/tty_buffer.c -=================================================================== ---- linux-stable.orig/drivers/tty/tty_buffer.c -+++ linux-stable/drivers/tty/tty_buffer.c -@@ -538,10 +538,14 @@ void tty_flip_buffer_push(struct tty_str - tty->buf.tail->commit = tty->buf.tail->used; - spin_unlock_irqrestore(&tty->buf.lock, flags); +--- a/drivers/tty/tty_buffer.c ++++ b/drivers/tty/tty_buffer.c +@@ -566,10 +566,15 @@ void tty_flip_buffer_push(struct tty_str + buf->tail->commit = buf->tail->used; + spin_unlock_irqrestore(&buf->lock, flags); +#ifndef CONFIG_PREEMPT_RT_FULL if (tty->low_latency) - flush_to_ldisc(&tty->buf.work); + flush_to_ldisc(&buf->work); else - schedule_work(&tty->buf.work); + schedule_work(&buf->work); +#else -+ flush_to_ldisc(&tty->buf.work); ++ flush_to_ldisc(&buf->work); +#endif ++ } EXPORT_SYMBOL(tty_flip_buffer_push); diff --git a/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch index 3a6363324..0ad518ea3 100644 --- a/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch +++ b/debian/patches/features/all/rt/drivers-serial-cleanup-locking-for-rt.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner drivers/tty/serial/8250/8250.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) -Index: linux-stable/drivers/tty/serial/8250/8250.c -=================================================================== ---- linux-stable.orig/drivers/tty/serial/8250/8250.c -+++ linux-stable/drivers/tty/serial/8250/8250.c -@@ -2773,14 +2773,10 @@ serial8250_console_write(struct console +--- a/drivers/tty/serial/8250/8250.c ++++ b/drivers/tty/serial/8250/8250.c +@@ -2900,14 +2900,10 @@ serial8250_console_write(struct console touch_nmi_watchdog(); @@ -32,7 +30,7 @@ Index: linux-stable/drivers/tty/serial/8250/8250.c /* * First save the IER then disable the interrupts -@@ -2812,8 +2808,7 @@ serial8250_console_write(struct console +@@ -2939,8 +2935,7 @@ serial8250_console_write(struct console serial8250_modem_status(up); if (locked) diff --git a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch index 382de0a9d..03b070dd7 100644 --- a/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch +++ b/debian/patches/features/all/rt/drivers-tty-fix-omap-lock-crap.patch @@ -7,13 +7,11 @@ Signed-off-by: Thomas Gleixner drivers/tty/serial/omap-serial.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) -Index: linux-stable/drivers/tty/serial/omap-serial.c -=================================================================== ---- linux-stable.orig/drivers/tty/serial/omap-serial.c -+++ linux-stable/drivers/tty/serial/omap-serial.c -@@ -1082,13 +1082,10 @@ serial_omap_console_write(struct console +--- a/drivers/tty/serial/omap-serial.c ++++ b/drivers/tty/serial/omap-serial.c +@@ -1166,13 +1166,10 @@ serial_omap_console_write(struct console - pm_runtime_get_sync(&up->pdev->dev); + pm_runtime_get_sync(up->dev); - local_irq_save(flags); - if (up->port.sysrq) @@ -28,9 +26,9 @@ Index: linux-stable/drivers/tty/serial/omap-serial.c /* * First save the IER then disable the interrupts -@@ -1117,8 +1114,7 @@ serial_omap_console_write(struct console - pm_runtime_mark_last_busy(&up->pdev->dev); - pm_runtime_put_autosuspend(&up->pdev->dev); +@@ -1201,8 +1198,7 @@ serial_omap_console_write(struct console + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); diff --git a/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch new file mode 100644 index 000000000..0e648b0f1 --- /dev/null +++ b/debian/patches/features/all/rt/drivers-tty-pl011-irq-disable-madness.patch @@ -0,0 +1,44 @@ +Subject: drivers-tty-pl011-irq-disable-madness.patch +From: Thomas Gleixner +Date: Tue, 08 Jan 2013 21:36:51 +0100 + +Signed-off-by: Thomas Gleixner +--- + drivers/tty/serial/amba-pl011.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -1779,13 +1779,19 @@ pl011_console_write(struct console *co, + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -1807,8 +1813,7 @@ pl011_console_write(struct console *co, + writew(old_cr, uap->port.membase + UART011_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } diff --git a/debian/patches/features/all/rt/early-printk-consolidate.patch b/debian/patches/features/all/rt/early-printk-consolidate.patch index 54015e02a..29d5d9e68 100644 --- a/debian/patches/features/all/rt/early-printk-consolidate.patch +++ b/debian/patches/features/all/rt/early-printk-consolidate.patch @@ -21,10 +21,8 @@ Signed-off-by: Thomas Gleixner kernel/printk.c | 30 +++++++++++++++++++++++------- 15 files changed, 68 insertions(+), 110 deletions(-) -Index: linux-stable/arch/arm/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/arm/kernel/early_printk.c -+++ linux-stable/arch/arm/kernel/early_printk.c +--- a/arch/arm/kernel/early_printk.c ++++ b/arch/arm/kernel/early_printk.c @@ -29,28 +29,17 @@ static void early_console_write(struct c early_write(s, n); } @@ -57,10 +55,8 @@ Index: linux-stable/arch/arm/kernel/early_printk.c return 0; } -Index: linux-stable/arch/blackfin/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/blackfin/kernel/early_printk.c -+++ linux-stable/arch/blackfin/kernel/early_printk.c +--- a/arch/blackfin/kernel/early_printk.c ++++ b/arch/blackfin/kernel/early_printk.c @@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_ extern struct console *bfin_jc_early_init(void); #endif @@ -70,10 +66,8 @@ Index: linux-stable/arch/blackfin/kernel/early_printk.c /* Default console */ #define DEFAULT_PORT 0 #define DEFAULT_CFLAG CS8|B57600 -Index: linux-stable/arch/microblaze/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/microblaze/kernel/early_printk.c -+++ linux-stable/arch/microblaze/kernel/early_printk.c +--- a/arch/microblaze/kernel/early_printk.c ++++ b/arch/microblaze/kernel/early_printk.c @@ -21,7 +21,6 @@ #include #include @@ -140,10 +134,8 @@ Index: linux-stable/arch/microblaze/kernel/early_printk.c - early_console_initialized = 0; + early_console = NULL; } -Index: linux-stable/arch/mips/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/mips/kernel/early_printk.c -+++ linux-stable/arch/mips/kernel/early_printk.c +--- a/arch/mips/kernel/early_printk.c ++++ b/arch/mips/kernel/early_printk.c @@ -8,6 +8,7 @@ * written by Ralf Baechle (ralf@linux-mips.org) */ @@ -177,11 +169,9 @@ Index: linux-stable/arch/mips/kernel/early_printk.c - register_console(&early_console); + register_console(&early_console_prom); } -Index: linux-stable/arch/powerpc/kernel/udbg.c -=================================================================== ---- linux-stable.orig/arch/powerpc/kernel/udbg.c -+++ linux-stable/arch/powerpc/kernel/udbg.c -@@ -179,15 +179,13 @@ static struct console udbg_console = { +--- a/arch/powerpc/kernel/udbg.c ++++ b/arch/powerpc/kernel/udbg.c +@@ -156,15 +156,13 @@ static struct console udbg_console = { .index = 0, }; @@ -198,7 +188,7 @@ Index: linux-stable/arch/powerpc/kernel/udbg.c return; if (!udbg_putc) -@@ -197,7 +195,7 @@ void __init register_early_udbg_console( +@@ -174,7 +172,7 @@ void __init register_early_udbg_console( printk(KERN_INFO "early console immortal !\n"); udbg_console.flags &= ~CON_BOOT; } @@ -207,10 +197,8 @@ Index: linux-stable/arch/powerpc/kernel/udbg.c register_console(&udbg_console); } -Index: linux-stable/arch/sh/kernel/sh_bios.c -=================================================================== ---- linux-stable.orig/arch/sh/kernel/sh_bios.c -+++ linux-stable/arch/sh/kernel/sh_bios.c +--- a/arch/sh/kernel/sh_bios.c ++++ b/arch/sh/kernel/sh_bios.c @@ -144,8 +144,6 @@ static struct console bios_console = { .index = -1, }; @@ -220,10 +208,8 @@ Index: linux-stable/arch/sh/kernel/sh_bios.c static int __init setup_early_printk(char *buf) { int keep_early = 0; -Index: linux-stable/arch/sparc/kernel/setup_32.c -=================================================================== ---- linux-stable.orig/arch/sparc/kernel/setup_32.c -+++ linux-stable/arch/sparc/kernel/setup_32.c +--- a/arch/sparc/kernel/setup_32.c ++++ b/arch/sparc/kernel/setup_32.c @@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p) boot_flags_init(*cmdline_p); @@ -232,12 +218,10 @@ Index: linux-stable/arch/sparc/kernel/setup_32.c register_console(&prom_early_console); printk("ARCH: "); -Index: linux-stable/arch/sparc/kernel/setup_64.c -=================================================================== ---- linux-stable.orig/arch/sparc/kernel/setup_64.c -+++ linux-stable/arch/sparc/kernel/setup_64.c -@@ -487,6 +487,12 @@ static void __init init_sparc64_elf_hwca - popc_patch(); +--- a/arch/sparc/kernel/setup_64.c ++++ b/arch/sparc/kernel/setup_64.c +@@ -551,6 +551,12 @@ static void __init init_sparc64_elf_hwca + pause_patch(); } +static inline void register_prom_console(void) @@ -249,7 +233,7 @@ Index: linux-stable/arch/sparc/kernel/setup_64.c void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ -@@ -498,7 +504,7 @@ void __init setup_arch(char **cmdline_p) +@@ -562,7 +568,7 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EARLYFB if (btext_find_display()) #endif @@ -258,10 +242,8 @@ Index: linux-stable/arch/sparc/kernel/setup_64.c if (tlb_type == hypervisor) printk("ARCH: SUN4V\n"); -Index: linux-stable/arch/tile/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/tile/kernel/early_printk.c -+++ linux-stable/arch/tile/kernel/early_printk.c +--- a/arch/tile/kernel/early_printk.c ++++ b/arch/tile/kernel/early_printk.c @@ -17,6 +17,7 @@ #include #include @@ -336,10 +318,8 @@ Index: linux-stable/arch/tile/kernel/early_printk.c return; early_printk("\ Machine shutting down before console output is fully initialized.\n\ -Index: linux-stable/arch/um/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/um/kernel/early_printk.c -+++ linux-stable/arch/um/kernel/early_printk.c +--- a/arch/um/kernel/early_printk.c ++++ b/arch/um/kernel/early_printk.c @@ -16,7 +16,7 @@ static void early_console_write(struct c um_early_printk(s, n); } @@ -362,11 +342,9 @@ Index: linux-stable/arch/um/kernel/early_printk.c return 0; } -Index: linux-stable/arch/unicore32/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/unicore32/kernel/early_printk.c -+++ linux-stable/arch/unicore32/kernel/early_printk.c -@@ -33,21 +33,17 @@ static struct console early_ocd_console +--- a/arch/unicore32/kernel/early_printk.c ++++ b/arch/unicore32/kernel/early_printk.c +@@ -33,21 +33,17 @@ static struct console early_ocd_console .index = -1, }; @@ -392,10 +370,8 @@ Index: linux-stable/arch/unicore32/kernel/early_printk.c if (keep_early) early_console->flags &= ~CON_BOOT; -Index: linux-stable/arch/x86/kernel/early_printk.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/early_printk.c -+++ linux-stable/arch/x86/kernel/early_printk.c +--- a/arch/x86/kernel/early_printk.c ++++ b/arch/x86/kernel/early_printk.c @@ -169,25 +169,9 @@ static struct console early_serial_conso .index = -1, }; @@ -434,11 +410,9 @@ Index: linux-stable/arch/x86/kernel/early_printk.c keep = (strstr(buf, "keep") != NULL); -Index: linux-stable/include/linux/console.h -=================================================================== ---- linux-stable.orig/include/linux/console.h -+++ linux-stable/include/linux/console.h -@@ -133,6 +133,7 @@ struct console { +--- a/include/linux/console.h ++++ b/include/linux/console.h +@@ -141,6 +141,7 @@ struct console { for (con = console_drivers; con != NULL; con = con->next) extern int console_set_on_cmdline; @@ -446,10 +420,8 @@ Index: linux-stable/include/linux/console.h extern int add_preferred_console(char *name, int idx, char *options); extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options); -Index: linux-stable/include/linux/printk.h -=================================================================== ---- linux-stable.orig/include/linux/printk.h -+++ linux-stable/include/linux/printk.h +--- a/include/linux/printk.h ++++ b/include/linux/printk.h @@ -95,8 +95,14 @@ int no_printk(const char *fmt, ...) return 0; } @@ -465,10 +437,8 @@ Index: linux-stable/include/linux/printk.h extern int printk_needs_cpu(int cpu); extern void printk_tick(void); -Index: linux-stable/kernel/printk.c -=================================================================== ---- linux-stable.orig/kernel/printk.c -+++ linux-stable/kernel/printk.c +--- a/kernel/printk.c ++++ b/kernel/printk.c @@ -48,13 +48,6 @@ #define CREATE_TRACE_POINTS #include @@ -483,9 +453,9 @@ Index: linux-stable/kernel/printk.c /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL -@@ -1232,6 +1225,29 @@ SYSCALL_DEFINE3(syslog, int, type, char - return do_syslog(type, buf, len, SYSLOG_FROM_CALL); - } +@@ -756,6 +749,29 @@ module_param(ignore_loglevel, bool, S_IR + MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to" + "print all kernel messages to the console."); +#ifdef CONFIG_EARLY_PRINTK +struct console *early_console; @@ -510,6 +480,6 @@ Index: linux-stable/kernel/printk.c +} +#endif + - static bool __read_mostly ignore_loglevel; + #ifdef CONFIG_BOOT_PRINTK_DELAY - static int __init ignore_loglevel_setup(char *str) + static int boot_delay; /* msecs delay after each printk during bootup */ diff --git a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch index 269a907d3..e419acc51 100644 --- a/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch +++ b/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner fs/eventpoll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/fs/eventpoll.c -=================================================================== ---- linux-stable.orig/fs/eventpoll.c -+++ linux-stable/fs/eventpoll.c -@@ -495,12 +495,12 @@ static int ep_poll_wakeup_proc(void *pri +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -497,12 +497,12 @@ static int ep_poll_wakeup_proc(void *pri */ static void ep_poll_safewake(wait_queue_head_t *wq) { diff --git a/debian/patches/features/all/rt/filemap-fix-up.patch b/debian/patches/features/all/rt/filemap-fix-up.patch index 0f6897c7b..26f78d7c7 100644 --- a/debian/patches/features/all/rt/filemap-fix-up.patch +++ b/debian/patches/features/all/rt/filemap-fix-up.patch @@ -9,10 +9,8 @@ Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org mm/filemap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/mm/filemap.c -=================================================================== ---- linux-stable.orig/mm/filemap.c -+++ linux-stable/mm/filemap.c +--- a/mm/filemap.c ++++ b/mm/filemap.c @@ -1955,7 +1955,7 @@ size_t iov_iter_copy_from_user_atomic(st char *kaddr; size_t copied; diff --git a/debian/patches/features/all/rt/fix-1-2-slub-do-not-dereference-null-pointer-in-node_match.patch b/debian/patches/features/all/rt/fix-1-2-slub-do-not-dereference-null-pointer-in-node_match.patch new file mode 100644 index 000000000..f4c300389 --- /dev/null +++ b/debian/patches/features/all/rt/fix-1-2-slub-do-not-dereference-null-pointer-in-node_match.patch @@ -0,0 +1,34 @@ +Subject: FIX [1/2] slub: Do not dereference NULL pointer in node_match +From: Christoph Lameter +Date: Wed, 23 Jan 2013 21:45:47 +0000 + +The variables accessed in slab_alloc are volatile and therefore +the page pointer passed to node_match can be NULL. The processing +of data in slab_alloc is tentative until either the cmpxhchg +succeeds or the __slab_alloc slowpath is invoked. Both are +able to perform the same allocation from the freelist. + +Check for the NULL pointer in node_match. + +A false positive will lead to a retry of the loop in __slab_alloc. + +Signed-off-by: Christoph Lameter +Cc: Steven Rostedt +Cc: Pekka Enberg +Signed-off-by: Thomas Gleixner + +--- + mm/slub.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -2041,7 +2041,7 @@ static void flush_all(struct kmem_cache + static inline int node_match(struct page *page, int node) + { + #ifdef CONFIG_NUMA +- if (node != NUMA_NO_NODE && page_to_nid(page) != node) ++ if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) + return 0; + #endif + return 1; diff --git a/debian/patches/features/all/rt/fix-2-2-slub-tid-must-be-retrieved-from-the-percpu-area-of-the-current-processor.patch b/debian/patches/features/all/rt/fix-2-2-slub-tid-must-be-retrieved-from-the-percpu-area-of-the-current-processor.patch new file mode 100644 index 000000000..0915b2afa --- /dev/null +++ b/debian/patches/features/all/rt/fix-2-2-slub-tid-must-be-retrieved-from-the-percpu-area-of-the-current-processor.patch @@ -0,0 +1,65 @@ +Subject: FIX [2/2] slub: Tid must be retrieved from the percpu area of the current processor +From: Christoph Lameter +Date: Wed, 23 Jan 2013 21:45:48 +0000 + +As Steven Rostedt has pointer out: Rescheduling could occur on a differnet processor +after the determination of the per cpu pointer and before the tid is retrieved. +This could result in allocation from the wrong node in slab_alloc. + +The effect is much more severe in slab_free() where we could free to the freelist +of the wrong page. + +The window for something like that occurring is pretty small but it is possible. + +Signed-off-by: Christoph Lameter +Cc: Steven Rostedt +Cc: Pekka Enberg +Signed-off-by: Thomas Gleixner + +--- + mm/slub.c | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -2331,13 +2331,13 @@ static __always_inline void *slab_alloc_ + + s = memcg_kmem_get_cache(s, gfpflags); + redo: +- + /* +- * Must read kmem_cache cpu data via this cpu ptr. Preemption is +- * enabled. We may switch back and forth between cpus while +- * reading from one cpu area. That does not matter as long +- * as we end up on the original cpu again when doing the cmpxchg. ++ * Preemption is disabled for the retrieval of the tid because that ++ * must occur from the current processor. We cannot allow rescheduling ++ * on a different processor between the determination of the pointer ++ * and the retrieval of the tid. + */ ++ preempt_disable(); + c = __this_cpu_ptr(s->cpu_slab); + + /* +@@ -2347,7 +2347,7 @@ redo: + * linked list in between. + */ + tid = c->tid; +- barrier(); ++ preempt_enable(); + + object = c->freelist; + page = c->page; +@@ -2594,10 +2594,11 @@ redo: + * data is retrieved via this pointer. If we are on the same cpu + * during the cmpxchg then the free will succedd. + */ ++ preempt_disable(); + c = __this_cpu_ptr(s->cpu_slab); + + tid = c->tid; +- barrier(); ++ preempt_enable(); + + if (likely(page == c->page)) { + set_freepointer(s, object, c->freelist); diff --git a/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch b/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch deleted file mode 100644 index 07c4ee483..000000000 --- a/debian/patches/features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch +++ /dev/null @@ -1,38 +0,0 @@ -Subject: crypto: Remove duplicate srcu init -From: Milan Broz -Date: Tue, 30 Oct 2012 16:27:18 +0100 - -In peterz-srcu-crypto-chain.patch the blocking notifier is changed to -srcu notifier and added initialization to module init fucntion. -Later, in crypto-make-core-static-and-init-scru-early.patch, is that -initialization added also to core_initcall(), but not removed from -Peter's patch. So the initializer is called twice which can wipe out -already registered notifiers. This cause a failure in initialization -of larval algorithms, like e.g. cbc(aes). - -Remove the old one. - -Signed-off-by: Milan Broz -Cc: Tvrtko Ursulin -Cc: dm-crypt@saout.de -Cc: okozina@redhat.com -Cc: u.kleine-koenig@pengutronix.de -Link: http://lkml.kernel.org/r/508FF1D6.3030900@redhat.com -Signed-off-by: Thomas Gleixner - ---- - crypto/algapi.c | 1 - - 1 file changed, 1 deletion(-) - -Index: linux-stable/crypto/algapi.c -=================================================================== ---- linux-stable.orig/crypto/algapi.c -+++ linux-stable/crypto/algapi.c -@@ -956,7 +956,6 @@ EXPORT_SYMBOL_GPL(crypto_xor); - - static int __init crypto_algapi_init(void) - { -- srcu_init_notifier_head(&crypto_chain); - crypto_init_proc(); - return 0; - } diff --git a/debian/patches/features/all/rt/fix-random-fallout.patch b/debian/patches/features/all/rt/fix-random-fallout.patch deleted file mode 100644 index 11051d9af..000000000 --- a/debian/patches/features/all/rt/fix-random-fallout.patch +++ /dev/null @@ -1,27 +0,0 @@ -Subject: genirq: Fix 32bit random changes fallout -From: Thomas Gleixner -Date: Wed, 31 Oct 2012 17:06:19 +0100 - -On 32bit sytems pointers are surprisingly 32bit wide. So gcc complains -correctly about a cast to a different size. Use an cast to unsigned -long instead which handles this correctly for bioth 32 and 64 bit. - -Signed-off-by: Thomas Gleixner -Cc: stable-rt@vger.kernel.org ---- - kernel/irq/manage.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: linux-stable/kernel/irq/manage.c -=================================================================== ---- linux-stable.orig/kernel/irq/manage.c -+++ linux-stable/kernel/irq/manage.c -@@ -855,7 +855,7 @@ static int irq_thread(void *data) - #ifdef CONFIG_PREEMPT_RT_FULL - migrate_disable(); - add_interrupt_randomness(action->irq, 0, -- desc->random_ip ^ (u64) action); -+ desc->random_ip ^ (unsigned long) action); - migrate_enable(); - #endif - wake_threads_waitq(desc); diff --git a/debian/patches/features/all/rt/fix-rq-3elock-vs-logbuf_lock-unlock-race.patch b/debian/patches/features/all/rt/fix-rq-3elock-vs-logbuf_lock-unlock-race.patch new file mode 100644 index 000000000..1ed928e63 --- /dev/null +++ b/debian/patches/features/all/rt/fix-rq-3elock-vs-logbuf_lock-unlock-race.patch @@ -0,0 +1,34 @@ +Subject: printk: Fix rq->lock vs logbuf_lock unlock lock inversion +From: "Bu, Yitian" +Date: Mon, 18 Feb 2013 12:53:37 +0000 + +commit 07354eb1a74d1 ("locking printk: Annotate logbuf_lock as raw") +reintroduced a lock inversion problem which was fixed in commit +0b5e1c5255 ("printk: Release console_sem after logbuf_lock"). This +happened probably when fixing up patch rejects. + +Restore the ordering and unlock logbuf_lock before releasing +console_sem. + +Signed-off-by: ybu +Cc: Peter Zijlstra +Cc: stable@vger.kernel.org +Link: http://lkml.kernel.org/r/E807E903FE6CBE4D95E420FBFCC273B827413C@nasanexd01h.na.qualcomm.com +Signed-off-by: Thomas Gleixner +--- + kernel/printk.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -1358,9 +1358,9 @@ static int console_trylock_for_printk(un + } + } + logbuf_cpu = UINT_MAX; ++ raw_spin_unlock(&logbuf_lock); + if (wake) + up(&console_sem); +- raw_spin_unlock(&logbuf_lock); + return retval; + } + diff --git a/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch index c1ecdf8b2..8f450e968 100644 --- a/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch +++ b/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch @@ -22,11 +22,9 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) -Index: linux-stable/arch/x86/kernel/traps.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/traps.c -+++ linux-stable/arch/x86/kernel/traps.c -@@ -87,9 +87,21 @@ static inline void conditional_sti(struc +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -85,9 +85,21 @@ static inline void conditional_sti(struc local_irq_enable(); } @@ -49,7 +47,7 @@ Index: linux-stable/arch/x86/kernel/traps.c if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } -@@ -100,11 +112,13 @@ static inline void conditional_cli(struc +@@ -98,11 +110,13 @@ static inline void conditional_cli(struc local_irq_disable(); } @@ -63,20 +61,20 @@ Index: linux-stable/arch/x86/kernel/traps.c +#endif } - static void __kprobes -@@ -225,9 +239,9 @@ dotraplinkage void do_stack_segment(stru + static int __kprobes +@@ -229,9 +243,9 @@ dotraplinkage void do_stack_segment(stru + exception_enter(regs); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) - return; -- preempt_conditional_sti(regs); -+ conditional_sti_ist(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); -- preempt_conditional_cli(regs); -+ conditional_cli_ist(regs); + X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + } + exception_exit(regs); } - - dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) -@@ -327,9 +341,9 @@ dotraplinkage void __kprobes notrace do_ +@@ -331,9 +345,9 @@ dotraplinkage void __kprobes notrace do_ * as we may switch to the interrupt stack. */ debug_stack_usage_inc(); @@ -86,9 +84,9 @@ Index: linux-stable/arch/x86/kernel/traps.c - preempt_conditional_cli(regs); + conditional_cli_ist(regs); debug_stack_usage_dec(); - } - -@@ -430,12 +444,12 @@ dotraplinkage void __kprobes do_debug(st + exit: + exception_exit(regs); +@@ -438,12 +452,12 @@ dotraplinkage void __kprobes do_debug(st debug_stack_usage_inc(); /* It's safe to allow irq's after DR6 has been saved */ @@ -101,9 +99,9 @@ Index: linux-stable/arch/x86/kernel/traps.c - preempt_conditional_cli(regs); + conditional_cli_ist(regs); debug_stack_usage_dec(); - return; + goto exit; } -@@ -455,7 +469,7 @@ dotraplinkage void __kprobes do_debug(st +@@ -463,7 +477,7 @@ dotraplinkage void __kprobes do_debug(st si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); @@ -111,4 +109,4 @@ Index: linux-stable/arch/x86/kernel/traps.c + conditional_cli_ist(regs); debug_stack_usage_dec(); - return; + exit: diff --git a/debian/patches/features/all/rt/fs-block-rt-support.patch b/debian/patches/features/all/rt/fs-block-rt-support.patch index a047eb43f..9246db6a0 100644 --- a/debian/patches/features/all/rt/fs-block-rt-support.patch +++ b/debian/patches/features/all/rt/fs-block-rt-support.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner fs/file.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) -Index: linux-stable/block/blk-core.c -=================================================================== ---- linux-stable.orig/block/blk-core.c -+++ linux-stable/block/blk-core.c -@@ -239,7 +239,7 @@ EXPORT_SYMBOL(blk_delay_queue); +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -241,7 +241,7 @@ EXPORT_SYMBOL(blk_delay_queue); **/ void blk_start_queue(struct request_queue *q) { @@ -21,11 +19,9 @@ Index: linux-stable/block/blk-core.c queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); -Index: linux-stable/fs/file.c -=================================================================== ---- linux-stable.orig/fs/file.c -+++ linux-stable/fs/file.c -@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *r +--- a/fs/file.c ++++ b/fs/file.c +@@ -98,14 +98,14 @@ static void free_fdtable_rcu(struct rcu_ kfree(fdt->open_fds); kfree(fdt); } else { diff --git a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch index 2665a2446..d6228dca8 100644 --- a/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch +++ b/debian/patches/features/all/rt/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -15,10 +15,8 @@ Cc: stable-rt@vger.kernel.org fs/namespace.c | 3 ++- 4 files changed, 8 insertions(+), 5 deletions(-) -Index: linux-stable/fs/autofs4/autofs_i.h -=================================================================== ---- linux-stable.orig/fs/autofs4/autofs_i.h -+++ linux-stable/fs/autofs4/autofs_i.h +--- a/fs/autofs4/autofs_i.h ++++ b/fs/autofs4/autofs_i.h @@ -34,6 +34,7 @@ #include #include @@ -27,10 +25,8 @@ Index: linux-stable/fs/autofs4/autofs_i.h #include #include -Index: linux-stable/fs/autofs4/expire.c -=================================================================== ---- linux-stable.orig/fs/autofs4/expire.c -+++ linux-stable/fs/autofs4/expire.c +--- a/fs/autofs4/expire.c ++++ b/fs/autofs4/expire.c @@ -166,7 +166,7 @@ again: parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { @@ -40,10 +36,8 @@ Index: linux-stable/fs/autofs4/expire.c goto relock; } spin_unlock(&p->d_lock); -Index: linux-stable/fs/dcache.c -=================================================================== ---- linux-stable.orig/fs/dcache.c -+++ linux-stable/fs/dcache.c +--- a/fs/dcache.c ++++ b/fs/dcache.c @@ -37,6 +37,7 @@ #include #include @@ -52,7 +46,7 @@ Index: linux-stable/fs/dcache.c #include "internal.h" #include "mount.h" -@@ -488,7 +489,7 @@ static inline struct dentry *dentry_kill +@@ -470,7 +471,7 @@ static inline struct dentry *dentry_kill if (inode && !spin_trylock(&inode->i_lock)) { relock: spin_unlock(&dentry->d_lock); @@ -61,7 +55,7 @@ Index: linux-stable/fs/dcache.c return dentry; /* try again with same dentry */ } if (IS_ROOT(dentry)) -@@ -876,7 +877,7 @@ relock: +@@ -852,7 +853,7 @@ relock: if (!spin_trylock(&dentry->d_lock)) { spin_unlock(&dcache_lru_lock); @@ -70,30 +64,28 @@ Index: linux-stable/fs/dcache.c goto relock; } -@@ -2115,7 +2116,7 @@ again: +@@ -2084,7 +2085,7 @@ again: if (dentry->d_count == 1) { - if (inode && !spin_trylock(&inode->i_lock)) { + if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); - cpu_relax(); + cpu_chill(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; -Index: linux-stable/fs/namespace.c -=================================================================== ---- linux-stable.orig/fs/namespace.c -+++ linux-stable/fs/namespace.c -@@ -20,6 +20,7 @@ - #include /* get_fs_root et.al. */ +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -22,6 +22,7 @@ #include /* fsnotify_vfsmount_delete */ #include + #include +#include #include "pnode.h" #include "internal.h" -@@ -313,7 +314,7 @@ int __mnt_want_write(struct vfsmount *m) +@@ -315,7 +316,7 @@ int __mnt_want_write(struct vfsmount *m) smp_mb(); - while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) { + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { preempt_enable(); - cpu_relax(); + cpu_chill(); diff --git a/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch index cbd3076e0..0dd4c95d3 100644 --- a/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch +++ b/debian/patches/features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch @@ -16,10 +16,8 @@ Signed-off-by: Thomas Gleixner fs/jbd/checkpoint.c | 2 ++ 1 file changed, 2 insertions(+) -Index: linux-stable/fs/jbd/checkpoint.c -=================================================================== ---- linux-stable.orig/fs/jbd/checkpoint.c -+++ linux-stable/fs/jbd/checkpoint.c +--- a/fs/jbd/checkpoint.c ++++ b/fs/jbd/checkpoint.c @@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou if (journal->j_flags & JFS_ABORT) return; diff --git a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch index a981eefc2..b7b48cee4 100644 --- a/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch +++ b/debian/patches/features/all/rt/fs-jbd-replace-bh_state-lock.patch @@ -13,10 +13,8 @@ Signed-off-by: Thomas Gleixner include/linux/jbd_common.h | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+) -Index: linux-stable/include/linux/buffer_head.h -=================================================================== ---- linux-stable.orig/include/linux/buffer_head.h -+++ linux-stable/include/linux/buffer_head.h +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h @@ -74,6 +74,11 @@ struct buffer_head { atomic_t b_count; /* users using this buffer_head */ #ifdef CONFIG_PREEMPT_RT_BASE @@ -41,10 +39,8 @@ Index: linux-stable/include/linux/buffer_head.h #endif } -Index: linux-stable/include/linux/jbd_common.h -=================================================================== ---- linux-stable.orig/include/linux/jbd_common.h -+++ linux-stable/include/linux/jbd_common.h +--- a/include/linux/jbd_common.h ++++ b/include/linux/jbd_common.h @@ -39,32 +39,56 @@ static inline struct journal_head *bh2jh static inline void jbd_lock_bh_state(struct buffer_head *bh) diff --git a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch index 8b4a69ea1..04805b8f1 100644 --- a/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch +++ b/debian/patches/features/all/rt/fs-namespace-preemption-fix.patch @@ -13,16 +13,14 @@ Signed-off-by: Thomas Gleixner fs/namespace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) -Index: linux-stable/fs/namespace.c -=================================================================== ---- linux-stable.orig/fs/namespace.c -+++ linux-stable/fs/namespace.c -@@ -311,8 +311,11 @@ int __mnt_want_write(struct vfsmount *m) +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -313,8 +313,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); -- while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) -+ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) { +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); cpu_relax(); + preempt_disable(); diff --git a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch index f30579a4e..f06ad2972 100644 --- a/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch +++ b/debian/patches/features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch @@ -39,10 +39,8 @@ Signed-off-by: Thomas Gleixner fs/ntfs/aops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/fs/ntfs/aops.c -=================================================================== ---- linux-stable.orig/fs/ntfs/aops.c -+++ linux-stable/fs/ntfs/aops.c +--- a/fs/ntfs/aops.c ++++ b/fs/ntfs/aops.c @@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s recs = PAGE_CACHE_SIZE / rec_size; /* Should have been verified before we got here... */ diff --git a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch index fffcf6a29..466a0a3bf 100644 --- a/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch +++ b/debian/patches/features/all/rt/fs-replace-bh_uptodate_lock-for-rt.patch @@ -12,11 +12,9 @@ Signed-off-by: Thomas Gleixner include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 21 deletions(-) -Index: linux-stable/fs/buffer.c -=================================================================== ---- linux-stable.orig/fs/buffer.c -+++ linux-stable/fs/buffer.c -@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -280,8 +280,7 @@ static void end_buffer_async_read(struct * decide that the page is now completely done. */ first = page_buffers(page); @@ -26,7 +24,7 @@ Index: linux-stable/fs/buffer.c clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; -@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct +@@ -294,8 +293,7 @@ static void end_buffer_async_read(struct } tmp = tmp->b_this_page; } while (tmp != bh); @@ -36,7 +34,7 @@ Index: linux-stable/fs/buffer.c /* * If none of the buffers had errors and they are all -@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct +@@ -307,9 +305,7 @@ static void end_buffer_async_read(struct return; still_busy: @@ -47,7 +45,7 @@ Index: linux-stable/fs/buffer.c } /* -@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffe +@@ -343,8 +339,7 @@ void end_buffer_async_write(struct buffe } first = page_buffers(page); @@ -57,7 +55,7 @@ Index: linux-stable/fs/buffer.c clear_buffer_async_write(bh); unlock_buffer(bh); -@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffe +@@ -356,15 +351,12 @@ void end_buffer_async_write(struct buffe } tmp = tmp->b_this_page; } @@ -75,7 +73,7 @@ Index: linux-stable/fs/buffer.c } EXPORT_SYMBOL(end_buffer_async_write); -@@ -3178,6 +3170,7 @@ struct buffer_head *alloc_buffer_head(gf +@@ -3256,6 +3248,7 @@ struct buffer_head *alloc_buffer_head(gf struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); @@ -83,10 +81,8 @@ Index: linux-stable/fs/buffer.c preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); -Index: linux-stable/fs/ntfs/aops.c -=================================================================== ---- linux-stable.orig/fs/ntfs/aops.c -+++ linux-stable/fs/ntfs/aops.c +--- a/fs/ntfs/aops.c ++++ b/fs/ntfs/aops.c @@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s "0x%llx.", (unsigned long long)bh->b_blocknr); } @@ -118,10 +114,8 @@ Index: linux-stable/fs/ntfs/aops.c } /** -Index: linux-stable/include/linux/buffer_head.h -=================================================================== ---- linux-stable.orig/include/linux/buffer_head.h -+++ linux-stable/include/linux/buffer_head.h +--- a/include/linux/buffer_head.h ++++ b/include/linux/buffer_head.h @@ -72,8 +72,42 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ diff --git a/debian/patches/features/all/rt/fscache_compile_fix.patch b/debian/patches/features/all/rt/fscache_compile_fix.patch new file mode 100644 index 000000000..7cf833c8e --- /dev/null +++ b/debian/patches/features/all/rt/fscache_compile_fix.patch @@ -0,0 +1,34 @@ +From: Sebastian Andrzej Siewior +Subject: fs/fscache: done merge spin_lock() in while() + +Signed-off-by: Sebastian Andrzej Siewior +--- a/fs/fscache/page.c ++++ b/fs/fscache/page.c +@@ -796,11 +796,13 @@ void fscache_invalidate_writes(struct fs + + _enter(""); + +- while (spin_lock(&cookie->stores_lock), +- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, +- ARRAY_SIZE(results), +- FSCACHE_COOKIE_PENDING_TAG), +- n > 0) { ++ do { ++ spin_lock(&cookie->stores_lock); ++ n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, ++ ARRAY_SIZE(results), ++ FSCACHE_COOKIE_PENDING_TAG); ++ if (n == 0) ++ break; + for (i = n - 1; i >= 0; i--) { + page = results[i]; + radix_tree_delete(&cookie->stores, page->index); +@@ -810,7 +812,7 @@ void fscache_invalidate_writes(struct fs + + for (i = n - 1; i >= 0; i--) + page_cache_release(results[i]); +- } ++ } while (1); + + spin_unlock(&cookie->stores_lock); + _leave(""); diff --git a/debian/patches/features/all/rt/ftrace-crap.patch b/debian/patches/features/all/rt/ftrace-crap.patch deleted file mode 100644 index 70a9227f2..000000000 --- a/debian/patches/features/all/rt/ftrace-crap.patch +++ /dev/null @@ -1,92 +0,0 @@ -Subject: ftrace-crap.patch -From: Thomas Gleixner -Date: Fri, 09 Sep 2011 16:55:53 +0200 - -Signed-off-by: Thomas Gleixner ---- - kernel/trace/trace.c | 26 ++++++++++++++++++++++++-- - kernel/trace/trace.h | 1 - - 2 files changed, 24 insertions(+), 3 deletions(-) - -Index: linux-stable/kernel/trace/trace.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace.c -+++ linux-stable/kernel/trace/trace.c -@@ -402,11 +402,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on); - */ - void trace_wake_up(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - const unsigned long delay = msecs_to_jiffies(2); - - if (trace_flags & TRACE_ITER_BLOCK) - return; - schedule_delayed_work(&wakeup_work, delay); -+#endif - } - - static int __init set_buf_size(char *str) -@@ -756,6 +758,12 @@ update_max_tr_single(struct trace_array - } - #endif /* CONFIG_TRACER_MAX_TRACE */ - -+#ifndef CONFIG_PREEMPT_RT_FULL -+static void default_wait_pipe(struct trace_iterator *iter); -+#else -+#define default_wait_pipe poll_wait_pipe -+#endif -+ - /** - * register_tracer - register a tracer with the ftrace system. - * @type - the plugin for the tracer -@@ -3365,6 +3373,7 @@ static int tracing_release_pipe(struct i - return 0; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - static unsigned int - tracing_poll_pipe(struct file *filp, poll_table *poll_table) - { -@@ -3386,8 +3395,7 @@ tracing_poll_pipe(struct file *filp, pol - } - } - -- --void default_wait_pipe(struct trace_iterator *iter) -+static void default_wait_pipe(struct trace_iterator *iter) - { - DEFINE_WAIT(wait); - -@@ -3398,6 +3406,20 @@ void default_wait_pipe(struct trace_iter - - finish_wait(&trace_wait, &wait); - } -+#else -+static unsigned int -+tracing_poll_pipe(struct file *filp, poll_table *poll_table) -+{ -+ struct trace_iterator *iter = filp->private_data; -+ -+ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter)) -+ return POLLIN | POLLRDNORM; -+ poll_wait_pipe(iter); -+ if (!trace_empty(iter)) -+ return POLLIN | POLLRDNORM; -+ return 0; -+} -+#endif - - /* - * This is a make-shift waitqueue. -Index: linux-stable/kernel/trace/trace.h -=================================================================== ---- linux-stable.orig/kernel/trace/trace.h -+++ linux-stable/kernel/trace/trace.h -@@ -367,7 +367,6 @@ void trace_init_global_iter(struct trace - - void tracing_iter_reset(struct trace_iterator *iter, int cpu); - --void default_wait_pipe(struct trace_iterator *iter); - void poll_wait_pipe(struct trace_iterator *iter); - - void ftrace(struct trace_array *tr, diff --git a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch index cc073a93a..a85247d34 100644 --- a/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch +++ b/debian/patches/features/all/rt/ftrace-migrate-disable-tracing.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner kernel/trace/trace_output.c | 5 +++++ 4 files changed, 14 insertions(+), 4 deletions(-) -Index: linux-stable/include/linux/ftrace_event.h -=================================================================== ---- linux-stable.orig/include/linux/ftrace_event.h -+++ linux-stable/include/linux/ftrace_event.h +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h @@ -49,7 +49,8 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; @@ -24,11 +22,9 @@ Index: linux-stable/include/linux/ftrace_event.h }; #define FTRACE_MAX_EVENT \ -Index: linux-stable/kernel/trace/trace.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace.c -+++ linux-stable/kernel/trace/trace.c -@@ -1155,6 +1155,8 @@ tracing_generic_entry_update(struct trac +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1177,6 +1177,8 @@ tracing_generic_entry_update(struct trac ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); @@ -37,7 +33,7 @@ Index: linux-stable/kernel/trace/trace.c } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -@@ -1980,9 +1982,10 @@ static void print_lat_help_header(struct +@@ -2034,9 +2036,10 @@ static void print_lat_help_header(struct seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); @@ -51,10 +47,8 @@ Index: linux-stable/kernel/trace/trace.c } static void print_event_info(struct trace_array *tr, struct seq_file *m) -Index: linux-stable/kernel/trace/trace_events.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace_events.c -+++ linux-stable/kernel/trace/trace_events.c +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c @@ -116,6 +116,7 @@ static int trace_define_common_fields(vo __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); @@ -63,10 +57,8 @@ Index: linux-stable/kernel/trace/trace_events.c __common_field(int, padding); return ret; -Index: linux-stable/kernel/trace/trace_output.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace_output.c -+++ linux-stable/kernel/trace/trace_output.c +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c @@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq else ret = trace_seq_putc(s, '.'); diff --git a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch index 75b4003b1..9e8e808ed 100644 --- a/debian/patches/features/all/rt/futex-requeue-pi-fix.patch +++ b/debian/patches/features/all/rt/futex-requeue-pi-fix.patch @@ -54,10 +54,8 @@ Signed-off-by: Thomas Gleixner kernel/rtmutex_common.h | 1 + 2 files changed, 32 insertions(+), 1 deletion(-) -Index: linux-stable/kernel/rtmutex.c -=================================================================== ---- linux-stable.orig/kernel/rtmutex.c -+++ linux-stable/kernel/rtmutex.c +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c @@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struc static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) @@ -68,7 +66,7 @@ Index: linux-stable/kernel/rtmutex.c } /* -@@ -1002,6 +1003,35 @@ int rt_mutex_start_proxy_lock(struct rt_ +@@ -981,6 +982,35 @@ int rt_mutex_start_proxy_lock(struct rt_ return 1; } @@ -104,10 +102,8 @@ Index: linux-stable/kernel/rtmutex.c ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { -Index: linux-stable/kernel/rtmutex_common.h -=================================================================== ---- linux-stable.orig/kernel/rtmutex_common.h -+++ linux-stable/kernel/rtmutex_common.h +--- a/kernel/rtmutex_common.h ++++ b/kernel/rtmutex_common.h @@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut * PI-futex support (proxy locking functions, etc.): */ diff --git a/debian/patches/features/all/rt/generic-cmpxchg-use-raw-local-irq.patch b/debian/patches/features/all/rt/generic-cmpxchg-use-raw-local-irq.patch index 3750ddad7..12d82a25d 100644 --- a/debian/patches/features/all/rt/generic-cmpxchg-use-raw-local-irq.patch +++ b/debian/patches/features/all/rt/generic-cmpxchg-use-raw-local-irq.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner include/asm-generic/cmpxchg-local.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/include/asm-generic/cmpxchg-local.h -=================================================================== ---- linux-stable.orig/include/asm-generic/cmpxchg-local.h -+++ linux-stable/include/asm-generic/cmpxchg-local.h +--- a/include/asm-generic/cmpxchg-local.h ++++ b/include/asm-generic/cmpxchg-local.h @@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); diff --git a/debian/patches/features/all/rt/genirq-add-default-mask-cmdline-option.patch b/debian/patches/features/all/rt/genirq-add-default-mask-cmdline-option.patch index a6104c2da..d452bf25a 100644 --- a/debian/patches/features/all/rt/genirq-add-default-mask-cmdline-option.patch +++ b/debian/patches/features/all/rt/genirq-add-default-mask-cmdline-option.patch @@ -14,11 +14,9 @@ Signed-off-by: Thomas Gleixner kernel/irq/irqdesc.c | 21 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) -Index: linux-stable/Documentation/kernel-parameters.txt -=================================================================== ---- linux-stable.orig/Documentation/kernel-parameters.txt -+++ linux-stable/Documentation/kernel-parameters.txt -@@ -1157,6 +1157,15 @@ bytes respectively. Such letter suffixes +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1182,6 +1182,15 @@ bytes respectively. Such letter suffixes See comment before ip2_setup() in drivers/char/ip2/ip2base.c. @@ -34,10 +32,8 @@ Index: linux-stable/Documentation/kernel-parameters.txt irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken -Index: linux-stable/kernel/irq/irqdesc.c -=================================================================== ---- linux-stable.orig/kernel/irq/irqdesc.c -+++ linux-stable/kernel/irq/irqdesc.c +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c @@ -23,10 +23,27 @@ static struct lock_class_key irq_desc_lock_class; diff --git a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch index 453192393..b9072eafd 100644 --- a/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch +++ b/debian/patches/features/all/rt/genirq-disable-irqpoll-on-rt.patch @@ -11,29 +11,27 @@ Signed-off-by: Thomas Gleixner kernel/irq/spurious.c | 10 ++++++++++ 1 file changed, 10 insertions(+) -Index: linux-stable/kernel/irq/spurious.c -=================================================================== ---- linux-stable.orig/kernel/irq/spurious.c -+++ linux-stable/kernel/irq/spurious.c -@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir +--- a/kernel/irq/spurious.c ++++ b/kernel/irq/spurious.c +@@ -340,6 +340,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + printk(KERN_WARNING "irqfixup boot option not supported " -+ "w/ CONFIG_PREEMPT_RT\n"); ++ "w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); -@@ -353,6 +358,11 @@ module_param(irqfixup, int, 0644); +@@ -352,6 +357,11 @@ module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + printk(KERN_WARNING "irqpoll boot option not supported " -+ "w/ CONFIG_PREEMPT_RT\n"); ++ "w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 2; diff --git a/debian/patches/features/all/rt/genirq-force-threading.patch b/debian/patches/features/all/rt/genirq-force-threading.patch index 0c609612c..72fbf9846 100644 --- a/debian/patches/features/all/rt/genirq-force-threading.patch +++ b/debian/patches/features/all/rt/genirq-force-threading.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/irq/manage.c | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -388,9 +388,13 @@ static inline int disable_irq_wake(unsig +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -383,9 +383,13 @@ static inline int disable_irq_wake(unsig #ifdef CONFIG_IRQ_FORCED_THREADING @@ -28,10 +26,8 @@ Index: linux-stable/include/linux/interrupt.h #endif #ifndef __ARCH_SET_SOFTIRQ_PENDING -Index: linux-stable/kernel/irq/manage.c -=================================================================== ---- linux-stable.orig/kernel/irq/manage.c -+++ linux-stable/kernel/irq/manage.c +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c @@ -21,6 +21,7 @@ #include "internals.h" diff --git a/debian/patches/features/all/rt/genirq-nodebug-shirq.patch b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch index b2647e1f2..4885fb429 100644 --- a/debian/patches/features/all/rt/genirq-nodebug-shirq.patch +++ b/debian/patches/features/all/rt/genirq-nodebug-shirq.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/lib/Kconfig.debug -=================================================================== ---- linux-stable.orig/lib/Kconfig.debug -+++ linux-stable/lib/Kconfig.debug +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug @@ -164,7 +164,7 @@ config DEBUG_KERNEL config DEBUG_SHIRQ diff --git a/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch b/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch index 0090a7730..8e94d1515 100644 --- a/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch +++ b/debian/patches/features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch @@ -22,11 +22,9 @@ Signed-off-by: Thomas Gleixner include/linux/hardirq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/include/linux/hardirq.h -=================================================================== ---- linux-stable.orig/include/linux/hardirq.h -+++ linux-stable/include/linux/hardirq.h -@@ -84,7 +84,7 @@ +--- a/include/linux/hardirq.h ++++ b/include/linux/hardirq.h +@@ -85,7 +85,7 @@ # define softirq_count() (preempt_count() & SOFTIRQ_MASK) # define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) #else diff --git a/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch b/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch index d9be58406..27f2ef353 100644 --- a/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch +++ b/debian/patches/features/all/rt/hotplug-call-cpu_unplug_begin-a-little-early.patch @@ -25,11 +25,9 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c -@@ -383,22 +383,20 @@ static int __ref _cpu_down(unsigned int +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -387,22 +387,20 @@ static int __ref _cpu_down(unsigned int return -EBUSY; } @@ -57,5 +55,5 @@ Index: linux-stable/kernel/cpu.c + __func__, cpu); + goto out_release; } + smpboot_park_threads(cpu); - err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); diff --git a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch index 9f0b4dc3e..5b051394d 100644 --- a/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch +++ b/debian/patches/features/all/rt/hotplug-light-get-online-cpus.patch @@ -16,10 +16,8 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 128 insertions(+), 3 deletions(-) -Index: linux-stable/include/linux/cpu.h -=================================================================== ---- linux-stable.orig/include/linux/cpu.h -+++ linux-stable/include/linux/cpu.h +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h @@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys; extern void get_online_cpus(void); @@ -38,10 +36,8 @@ Index: linux-stable/include/linux/cpu.h #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c +--- a/kernel/cpu.c ++++ b/kernel/cpu.c @@ -63,6 +63,102 @@ static struct { .refcount = 0, }; @@ -145,7 +141,7 @@ Index: linux-stable/kernel/cpu.c void get_online_cpus(void) { might_sleep(); -@@ -256,13 +352,14 @@ static int __ref take_cpu_down(void *_pa +@@ -260,13 +356,14 @@ static int __ref take_cpu_down(void *_pa /* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { @@ -161,7 +157,7 @@ Index: linux-stable/kernel/cpu.c if (num_online_cpus() == 1) return -EBUSY; -@@ -270,7 +367,20 @@ static int __ref _cpu_down(unsigned int +@@ -274,7 +371,20 @@ static int __ref _cpu_down(unsigned int if (!cpu_online(cpu)) return -EINVAL; @@ -183,7 +179,7 @@ Index: linux-stable/kernel/cpu.c err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { -@@ -278,7 +388,16 @@ static int __ref _cpu_down(unsigned int +@@ -282,7 +392,16 @@ static int __ref _cpu_down(unsigned int __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); @@ -199,9 +195,9 @@ Index: linux-stable/kernel/cpu.c + printk("cpu_unplug_begin(%d) failed\n", cpu); + goto out_cancel; } + smpboot_park_threads(cpu); - err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); -@@ -309,6 +428,8 @@ static int __ref _cpu_down(unsigned int +@@ -314,6 +433,8 @@ static int __ref _cpu_down(unsigned int check_for_tasks(cpu); out_release: diff --git a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch index 73e14f185..c224c7c83 100644 --- a/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch +++ b/debian/patches/features/all/rt/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c +--- a/kernel/cpu.c ++++ b/kernel/cpu.c @@ -143,7 +143,7 @@ static int cpu_unplug_begin(unsigned int struct task_struct *tsk; diff --git a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch index cef74186f..876d12396 100644 --- a/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch +++ b/debian/patches/features/all/rt/hotplug-use-migrate-disable.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c -@@ -375,14 +375,13 @@ static int __ref _cpu_down(unsigned int +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -379,14 +379,13 @@ static int __ref _cpu_down(unsigned int cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask); free_cpumask_var(cpumask); @@ -28,7 +26,7 @@ Index: linux-stable/kernel/cpu.c err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { -@@ -432,6 +431,7 @@ static int __ref _cpu_down(unsigned int +@@ -437,6 +436,7 @@ static int __ref _cpu_down(unsigned int out_release: cpu_unplug_done(cpu); out_cancel: diff --git a/debian/patches/features/all/rt/hrtimer-add-missing-debug_activate-aid.patch b/debian/patches/features/all/rt/hrtimer-add-missing-debug_activate-aid.patch deleted file mode 100644 index f4e13a560..000000000 --- a/debian/patches/features/all/rt/hrtimer-add-missing-debug_activate-aid.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Yong Zhang -Subject: hrtimer: Add missing debug_activate() aid -Date: Thu, 13 Oct 2011 15:52:30 +0800 - -It will fix below warning, which is also reported by Fernando: - -[ 7.616090] ------------[ cut here ]------------ -[ 7.616093] WARNING: at kernel/hrtimer.c:391 hrtimer_fixup_activate+0x27/0x50() -[ 7.616094] Hardware name: OptiPlex 755 -[ 7.616096] Modules linked in: -[ 7.616099] Pid: 0, comm: kworker/0:0 Tainted: G W 3.0.6-rt17-00284-g9d73a61 #15 -[ 7.616100] Call Trace: -[ 7.616103] [] warn_slowpath_common+0x72/0xa0 -[ 7.616106] [] ? hrtimer_fixup_activate+0x27/0x50 -[ 7.616109] [] ? hrtimer_fixup_activate+0x27/0x50 -[ 7.616112] [] warn_slowpath_null+0x22/0x30 -[ 7.616115] [] hrtimer_fixup_activate+0x27/0x50 -[ 7.616118] [] debug_object_activate+0x100/0x130 -[ 7.616121] [] ? hrtimer_start_range_ns+0x26/0x30 -[ 7.616123] [] enqueue_hrtimer+0x19/0x100 -[ 7.616126] [] ? hrtimer_start_range_ns+0x26/0x30 -[ 7.616129] [] __hrtimer_start_range_ns+0x144/0x540 -[ 7.616132] [] ? _raw_spin_unlock_irqrestore+0x3a/0x80 -[ 7.616136] [] hrtimer_start_range_ns+0x26/0x30 -[ 7.616139] [] tick_nohz_restart_sched_tick+0x185/0x1b0 -[ 7.616142] [] cpu_idle+0x98/0xc0 -[ 7.616146] [] start_secondary+0x1d3/0x1da -[ 7.616148] ---[ end trace 0000000000000003 ]--- - -Reported-by: Fernando Lopez-Lezcano -Signed-off-by: Yong Zhang -Link: http://lkml.kernel.org/r/20111013075230.GA2740@zhy -Signed-off-by: Thomas Gleixner ---- - kernel/hrtimer.c | 1 + - 1 file changed, 1 insertion(+) - -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c -@@ -1063,6 +1063,7 @@ int __hrtimer_start_range_ns(struct hrti - * remove it again and report a failure. This avoids - * stale base->first entries. - */ -+ debug_deactivate(timer); - __remove_hrtimer(timer, new_base, - timer->state & HRTIMER_STATE_CALLBACK, 0); - } diff --git a/debian/patches/features/all/rt/hrtimer-fix-reprogram-madness.patch b/debian/patches/features/all/rt/hrtimer-fix-reprogram-madness.patch deleted file mode 100644 index ec68a0438..000000000 --- a/debian/patches/features/all/rt/hrtimer-fix-reprogram-madness.patch +++ /dev/null @@ -1,42 +0,0 @@ -Subject: hrtimer-fix-reprogram-madness.patch -From: Thomas Gleixner -Date: Wed, 14 Sep 2011 14:48:43 +0200 - -Signed-off-by: Thomas Gleixner ---- - kernel/hrtimer.c | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c -@@ -1338,7 +1338,11 @@ static void hrtimer_rt_reprogram(int res - if (!enqueue_hrtimer(timer, base)) - return; - -- if (hrtimer_reprogram(timer, base)) -+#ifndef CONFIG_HIGH_RES_TIMERS -+ } -+#else -+ if (base->cpu_base->hres_active && -+ hrtimer_reprogram(timer, base)) - goto requeue; - - } else if (hrtimer_active(timer)) { -@@ -1347,6 +1351,7 @@ static void hrtimer_rt_reprogram(int res - * the event device. - */ - if (&timer->node == base->active.next && -+ base->cpu_base->hres_active && - hrtimer_reprogram(timer, base)) - goto requeue; - } -@@ -1359,6 +1364,7 @@ requeue: - */ - __remove_hrtimer(timer, base, timer->state, 0); - list_add_tail(&timer->cb_entry, &base->expired); -+#endif - } - - /* diff --git a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch index ffd05f871..930553a7f 100644 --- a/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch +++ b/debian/patches/features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch @@ -13,17 +13,15 @@ Signed-off-by: Ingo Molnar --- include/linux/hrtimer.h | 3 - kernel/hrtimer.c | 196 ++++++++++++++++++++++++++++++++++++++++++----- + kernel/hrtimer.c | 220 ++++++++++++++++++++++++++++++++++++++++------- kernel/sched/core.c | 1 kernel/sched/rt.c | 1 kernel/time/tick-sched.c | 1 kernel/watchdog.c | 1 - 6 files changed, 183 insertions(+), 20 deletions(-) + 6 files changed, 198 insertions(+), 29 deletions(-) -Index: linux-stable/include/linux/hrtimer.h -=================================================================== ---- linux-stable.orig/include/linux/hrtimer.h -+++ linux-stable/include/linux/hrtimer.h +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h @@ -111,6 +111,8 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; @@ -41,10 +39,8 @@ Index: linux-stable/include/linux/hrtimer.h ktime_t resolution; ktime_t (*get_time)(void); ktime_t softirq_time; -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c @@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrti * When the callback is running, we do not reprogram the clock event * device. The timer callback is either running on a different CPU or @@ -65,37 +61,36 @@ Index: linux-stable/kernel/hrtimer.c /* * Initialize the high resolution related parts of cpu_base */ -@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprog - struct hrtimer_clock_base *base, - int wakeup) +@@ -641,9 +643,18 @@ static inline void hrtimer_init_hres(str + * and expiry check is done in the hrtimer_interrupt or in the softirq. + */ + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) { +- return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); ++ if (!(base->cpu_base->hres_active && hrtimer_reprogram(timer, base))) ++ return 0; ++ if (!wakeup) ++ return -ETIME; +#ifdef CONFIG_PREEMPT_RT_BASE -+again: - if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { -+ /* -+ * Move softirq based timers away from the rbtree in -+ * case it expired already. Otherwise we would have a -+ * stale base->first entry until the softirq runs. -+ */ -+ if (!hrtimer_rt_defer(timer)) { -+ ktime_t now = ktime_get(); -+ -+ __run_hrtimer(timer, &now); -+ /* -+ * __run_hrtimer might have requeued timer and -+ * it could be base->first again. -+ */ -+ if (&timer->node == base->active.next) -+ goto again; -+ return 1; -+ } -+#else -+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { ++ if (!hrtimer_rt_defer(timer)) ++ return -ETIME; +#endif - if (wakeup) { - raw_spin_unlock(&base->cpu_base->lock); - raise_softirq_irqoff(HRTIMER_SOFTIRQ); -@@ -743,6 +767,11 @@ static inline int hrtimer_enqueue_reprog ++ return 1; + } + + static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) +@@ -724,12 +735,18 @@ static inline int hrtimer_switch_to_hres + static inline void + hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } + static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) ++ struct hrtimer_clock_base *base, ++ int wakeup) + { + return 0; } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void retrigger_next_event(void *arg) { } @@ -107,7 +102,7 @@ Index: linux-stable/kernel/hrtimer.c #endif /* CONFIG_HIGH_RES_TIMERS */ -@@ -874,9 +903,9 @@ void hrtimer_wait_for_timer(const struct +@@ -861,9 +878,9 @@ void hrtimer_wait_for_timer(const struct { struct hrtimer_clock_base *base = timer->base; @@ -119,7 +114,7 @@ Index: linux-stable/kernel/hrtimer.c } #else -@@ -926,6 +955,11 @@ static void __remove_hrtimer(struct hrti +@@ -913,6 +930,11 @@ static void __remove_hrtimer(struct hrti if (!(timer->state & HRTIMER_STATE_ENQUEUED)) goto out; @@ -131,7 +126,41 @@ Index: linux-stable/kernel/hrtimer.c next_timer = timerqueue_getnext(&base->active); timerqueue_del(&base->active, &timer->node); if (&timer->node == next_timer) { -@@ -1199,6 +1233,7 @@ static void __hrtimer_init(struct hrtime +@@ -1020,9 +1042,19 @@ int __hrtimer_start_range_ns(struct hrti + * + * XXX send_remote_softirq() ? + */ +- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) +- && hrtimer_enqueue_reprogram(timer, new_base)) { +- if (wakeup) { ++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { ++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); ++ if (ret < 0) { ++ /* ++ * In case we failed to reprogram the timer (mostly ++ * because out current timer is already elapsed), ++ * remove it again and report a failure. This avoids ++ * stale base->first entries. ++ */ ++ debug_deactivate(timer); ++ __remove_hrtimer(timer, new_base, ++ timer->state & HRTIMER_STATE_CALLBACK, 0); ++ } else if (ret > 0) { + /* + * We need to drop cpu_base->lock to avoid a + * lock ordering issue vs. rq->lock. +@@ -1030,9 +1062,7 @@ int __hrtimer_start_range_ns(struct hrti + raw_spin_unlock(&new_base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + local_irq_restore(flags); +- return ret; +- } else { +- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); ++ return 0; + } + } + +@@ -1199,6 +1229,7 @@ static void __hrtimer_init(struct hrtime base = hrtimer_clockid_to_base(clock_id); timer->base = &cpu_base->clock_base[base]; @@ -139,7 +168,7 @@ Index: linux-stable/kernel/hrtimer.c timerqueue_init(&timer->node); #ifdef CONFIG_TIMER_STATS -@@ -1282,10 +1317,118 @@ static void __run_hrtimer(struct hrtimer +@@ -1282,10 +1313,128 @@ static void __run_hrtimer(struct hrtimer timer->state &= ~HRTIMER_STATE_CALLBACK; } @@ -167,7 +196,11 @@ Index: linux-stable/kernel/hrtimer.c + if (!enqueue_hrtimer(timer, base)) + return; + -+ if (hrtimer_reprogram(timer, base)) ++#ifndef CONFIG_HIGH_RES_TIMERS ++ } ++#else ++ if (base->cpu_base->hres_active && ++ hrtimer_reprogram(timer, base)) + goto requeue; + + } else if (hrtimer_active(timer)) { @@ -176,6 +209,7 @@ Index: linux-stable/kernel/hrtimer.c + * the event device. + */ + if (&timer->node == base->active.next && ++ base->cpu_base->hres_active && + hrtimer_reprogram(timer, base)) + goto requeue; + } @@ -188,6 +222,7 @@ Index: linux-stable/kernel/hrtimer.c + */ + __remove_hrtimer(timer, base, timer->state, 0); + list_add_tail(&timer->cb_entry, &base->expired); ++#endif +} + +/* @@ -250,7 +285,11 @@ Index: linux-stable/kernel/hrtimer.c + +#else + -+static inline void hrtimer_rt_run_pending(void) { } ++static inline void hrtimer_rt_run_pending(void) ++{ ++ hrtimer_peek_ahead_timers(); ++} ++ +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } + +#endif @@ -260,7 +299,7 @@ Index: linux-stable/kernel/hrtimer.c /* * High resolution timer interrupt * Called with interrupts disabled -@@ -1294,7 +1437,7 @@ void hrtimer_interrupt(struct clock_even +@@ -1294,7 +1443,7 @@ void hrtimer_interrupt(struct clock_even { struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); ktime_t expires_next, now, entry_time, delta; @@ -269,7 +308,7 @@ Index: linux-stable/kernel/hrtimer.c BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; -@@ -1361,7 +1504,10 @@ retry: +@@ -1361,7 +1510,10 @@ retry: break; } @@ -281,7 +320,7 @@ Index: linux-stable/kernel/hrtimer.c } } -@@ -1376,6 +1522,10 @@ retry: +@@ -1376,6 +1528,10 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; @@ -292,7 +331,7 @@ Index: linux-stable/kernel/hrtimer.c return; } -@@ -1456,24 +1606,26 @@ void hrtimer_peek_ahead_timers(void) +@@ -1456,24 +1612,26 @@ void hrtimer_peek_ahead_timers(void) local_irq_restore(flags); } @@ -326,7 +365,7 @@ Index: linux-stable/kernel/hrtimer.c /* * Called from timer softirq every jiffy, expire hrtimers: * -@@ -1506,7 +1658,7 @@ void hrtimer_run_queues(void) +@@ -1506,7 +1664,7 @@ void hrtimer_run_queues(void) struct timerqueue_node *node; struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); struct hrtimer_clock_base *base; @@ -335,7 +374,7 @@ Index: linux-stable/kernel/hrtimer.c if (hrtimer_hres_active()) return; -@@ -1531,12 +1683,16 @@ void hrtimer_run_queues(void) +@@ -1531,12 +1689,16 @@ void hrtimer_run_queues(void) hrtimer_get_expires_tv64(timer)) break; @@ -354,7 +393,7 @@ Index: linux-stable/kernel/hrtimer.c } /* -@@ -1558,6 +1714,7 @@ static enum hrtimer_restart hrtimer_wake +@@ -1558,6 +1720,7 @@ static enum hrtimer_restart hrtimer_wake void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; @@ -362,7 +401,7 @@ Index: linux-stable/kernel/hrtimer.c sl->task = task; } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); -@@ -1696,6 +1853,7 @@ static void __cpuinit init_hrtimers_cpu( +@@ -1696,6 +1859,7 @@ static void __cpuinit init_hrtimers_cpu( for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); @@ -370,7 +409,7 @@ Index: linux-stable/kernel/hrtimer.c } hrtimer_init_hres(cpu_base); -@@ -1814,9 +1972,7 @@ void __init hrtimers_init(void) +@@ -1814,9 +1978,7 @@ void __init hrtimers_init(void) hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); register_cpu_notifier(&hrtimers_nb); @@ -380,11 +419,9 @@ Index: linux-stable/kernel/hrtimer.c } /** -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -480,6 +480,7 @@ static void init_rq_hrtick(struct rq *rq +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -489,6 +489,7 @@ static void init_rq_hrtick(struct rq *rq hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; @@ -392,10 +429,8 @@ Index: linux-stable/kernel/sched/core.c } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) -Index: linux-stable/kernel/sched/rt.c -=================================================================== ---- linux-stable.orig/kernel/sched/rt.c -+++ linux-stable/kernel/sched/rt.c +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c @@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwid hrtimer_init(&rt_b->rt_period_timer, @@ -404,11 +439,9 @@ Index: linux-stable/kernel/sched/rt.c rt_b->rt_period_timer.function = sched_rt_period_timer; } -Index: linux-stable/kernel/time/tick-sched.c -=================================================================== ---- linux-stable.orig/kernel/time/tick-sched.c -+++ linux-stable/kernel/time/tick-sched.c -@@ -873,6 +873,7 @@ void tick_setup_sched_timer(void) +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -867,6 +867,7 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); @@ -416,15 +449,13 @@ Index: linux-stable/kernel/time/tick-sched.c ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ -Index: linux-stable/kernel/watchdog.c -=================================================================== ---- linux-stable.orig/kernel/watchdog.c -+++ linux-stable/kernel/watchdog.c -@@ -470,6 +470,7 @@ static void watchdog_prepare_cpu(int cpu - WARN_ON(per_cpu(softlockup_watchdog, cpu)); +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -358,6 +358,7 @@ static void watchdog_enable(unsigned int + /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; + hrtimer->irqsafe = 1; - } - static int watchdog_enable(int cpu) + if (!watchdog_enabled) { + kthread_park(current); diff --git a/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch index a5f0a7b81..5c8bce5ab 100644 --- a/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch +++ b/debian/patches/features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch @@ -10,11 +10,9 @@ Cc: stable-rt@vger.kernel.org kernel/hrtimer.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c -@@ -1527,11 +1527,7 @@ retry: +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -1528,11 +1528,7 @@ retry: if (expires_next.tv64 == KTIME_MAX || !tick_program_event(expires_next, 0)) { cpu_base->hang_detected = 0; @@ -27,7 +25,7 @@ Index: linux-stable/kernel/hrtimer.c } /* -@@ -1575,6 +1571,9 @@ retry: +@@ -1576,6 +1572,9 @@ retry: tick_program_event(expires_next, 1); printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); diff --git a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch index 467eb03d1..be6ba1d2b 100644 --- a/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch +++ b/debian/patches/features/all/rt/hrtimers-prepare-full-preemption.patch @@ -15,10 +15,8 @@ Signed-off-by: Thomas Gleixner kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/hrtimer.h -=================================================================== ---- linux-stable.orig/include/linux/hrtimer.h -+++ linux-stable/include/linux/hrtimer.h +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h @@ -192,6 +192,9 @@ struct hrtimer_cpu_base { unsigned long nr_hangs; ktime_t max_hang_time; @@ -43,11 +41,9 @@ Index: linux-stable/include/linux/hrtimer.h /* Query timers: */ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c -@@ -857,6 +857,32 @@ u64 hrtimer_forward(struct hrtimer *time +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -844,6 +844,32 @@ u64 hrtimer_forward(struct hrtimer *time } EXPORT_SYMBOL_GPL(hrtimer_forward); @@ -108,10 +104,8 @@ Index: linux-stable/kernel/hrtimer.c } #ifdef CONFIG_HOTPLUG_CPU -Index: linux-stable/kernel/itimer.c -=================================================================== ---- linux-stable.orig/kernel/itimer.c -+++ linux-stable/kernel/itimer.c +--- a/kernel/itimer.c ++++ b/kernel/itimer.c @@ -213,6 +213,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { @@ -120,11 +114,9 @@ Index: linux-stable/kernel/itimer.c goto again; } expires = timeval_to_ktime(value->it_value); -Index: linux-stable/kernel/posix-timers.c -=================================================================== ---- linux-stable.orig/kernel/posix-timers.c -+++ linux-stable/kernel/posix-timers.c -@@ -766,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -773,6 +773,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_ return overrun; } @@ -145,7 +137,7 @@ Index: linux-stable/kernel/posix-timers.c /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int -@@ -843,6 +857,7 @@ retry: +@@ -850,6 +864,7 @@ retry: if (!timr) return -EINVAL; @@ -153,7 +145,7 @@ Index: linux-stable/kernel/posix-timers.c kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; -@@ -851,9 +866,12 @@ retry: +@@ -858,9 +873,12 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { @@ -166,7 +158,7 @@ Index: linux-stable/kernel/posix-timers.c if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) -@@ -891,10 +909,15 @@ retry_delete: +@@ -898,10 +916,15 @@ retry_delete: if (!timer) return -EINVAL; @@ -182,7 +174,7 @@ Index: linux-stable/kernel/posix-timers.c spin_lock(¤t->sighand->siglock); list_del(&timer->list); -@@ -920,8 +943,18 @@ static void itimer_delete(struct k_itime +@@ -927,8 +950,18 @@ static void itimer_delete(struct k_itime retry_delete: spin_lock_irqsave(&timer->it_lock, flags); diff --git a/debian/patches/features/all/rt/hwlatdetect.patch b/debian/patches/features/all/rt/hwlatdetect.patch index c64e5db5a..08045b393 100644 --- a/debian/patches/features/all/rt/hwlatdetect.patch +++ b/debian/patches/features/all/rt/hwlatdetect.patch @@ -15,10 +15,8 @@ Signed-off-by: Carsten Emde drivers/misc/hwlat_detector.c | 1212 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 1306 insertions(+) -Index: linux-stable/Documentation/hwlat_detector.txt -=================================================================== --- /dev/null -+++ linux-stable/Documentation/hwlat_detector.txt ++++ b/Documentation/hwlat_detector.txt @@ -0,0 +1,64 @@ +Introduction: +------------- @@ -84,11 +82,9 @@ Index: linux-stable/Documentation/hwlat_detector.txt +observe any latencies that exceed the threshold (initially 100 usecs), +then we write to a global sample ring buffer of 8K samples, which is +consumed by reading from the "sample" (pipe) debugfs file interface. -Index: linux-stable/drivers/misc/Kconfig -=================================================================== ---- linux-stable.orig/drivers/misc/Kconfig -+++ linux-stable/drivers/misc/Kconfig -@@ -131,6 +131,35 @@ config IBM_ASM +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -121,6 +121,35 @@ config IBM_ASM for information on the specific driver level and support statement for your IBM server. @@ -124,19 +120,15 @@ Index: linux-stable/drivers/misc/Kconfig config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI -Index: linux-stable/drivers/misc/Makefile -=================================================================== ---- linux-stable.orig/drivers/misc/Makefile -+++ linux-stable/drivers/misc/Makefile -@@ -50,3 +50,4 @@ obj-y += carma/ +--- a/drivers/misc/Makefile ++++ b/drivers/misc/Makefile +@@ -49,3 +49,4 @@ obj-y += carma/ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_INTEL_MEI) += mei/ +obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o -Index: linux-stable/drivers/misc/hwlat_detector.c -=================================================================== --- /dev/null -+++ linux-stable/drivers/misc/hwlat_detector.c ++++ b/drivers/misc/hwlat_detector.c @@ -0,0 +1,1212 @@ +/* + * hwlat_detector.c - A simple Hardware Latency detector. diff --git a/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch new file mode 100644 index 000000000..fe35c0aab --- /dev/null +++ b/debian/patches/features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch @@ -0,0 +1,34 @@ +From 5145351047b216cca13aaca99f939a9a594c6c4d Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 11:35:49 +0100 +Subject: [PATCH 2/3] i2c/omap: drop the lock hard irq context + +The lock is taken while reading two registers. On RT the first lock is +taken in hard irq where it might sleep and in the threaded irq. +The threaded irq runs in oneshot mode so the hard irq does not run until +the thread the completes so there is no reason to grab the lock. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/i2c/busses/i2c-omap.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/i2c/busses/i2c-omap.c ++++ b/drivers/i2c/busses/i2c-omap.c +@@ -881,15 +881,12 @@ omap_i2c_isr(int irq, void *dev_id) + u16 mask; + u16 stat; + +- spin_lock(&dev->lock); +- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); ++ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + + if (stat & mask) + ret = IRQ_WAKE_THREAD; + +- spin_unlock(&dev->lock); +- + return ret; + } + diff --git a/debian/patches/features/all/rt/i915_compile_fix.patch b/debian/patches/features/all/rt/i915_compile_fix.patch new file mode 100644 index 000000000..0b2007906 --- /dev/null +++ b/debian/patches/features/all/rt/i915_compile_fix.patch @@ -0,0 +1,48 @@ +From: Sebastian Andrzej Siewior +Subject: gpu/i915: don't open code these things + +Signed-off-by: Sebastian Andrzej Siewior +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -91,7 +91,6 @@ i915_gem_wait_for_error(struct drm_devic + { + struct drm_i915_private *dev_priv = dev->dev_private; + struct completion *x = &dev_priv->error_completion; +- unsigned long flags; + int ret; + + if (!atomic_read(&dev_priv->mm.wedged)) +@@ -116,9 +115,7 @@ i915_gem_wait_for_error(struct drm_devic + * end up waiting upon a subsequent completion event that + * will never happen. + */ +- spin_lock_irqsave(&x->wait.lock, flags); +- x->done++; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ complete(x); + } + return 0; + } +@@ -946,12 +943,9 @@ i915_gem_check_wedge(struct drm_i915_pri + if (atomic_read(&dev_priv->mm.wedged)) { + struct completion *x = &dev_priv->error_completion; + bool recovery_complete; +- unsigned long flags; + + /* Give the error handler a chance to run. */ +- spin_lock_irqsave(&x->wait.lock, flags); +- recovery_complete = x->done > 0; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ recovery_complete = completion_done(x); + + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. */ +@@ -4366,7 +4360,7 @@ static bool mutex_is_locked_by(struct mu + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch index 1a363e674..21d4299a0 100644 --- a/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch +++ b/debian/patches/features/all/rt/ide-use-nort-local-irq-variants.patch @@ -17,10 +17,8 @@ Signed-off-by: Thomas Gleixner drivers/ide/ide-taskfile.c | 6 +++--- 7 files changed, 16 insertions(+), 16 deletions(-) -Index: linux-stable/drivers/ide/alim15x3.c -=================================================================== ---- linux-stable.orig/drivers/ide/alim15x3.c -+++ linux-stable/drivers/ide/alim15x3.c +--- a/drivers/ide/alim15x3.c ++++ b/drivers/ide/alim15x3.c @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); @@ -39,11 +37,9 @@ Index: linux-stable/drivers/ide/alim15x3.c return 0; } -Index: linux-stable/drivers/ide/hpt366.c -=================================================================== ---- linux-stable.orig/drivers/ide/hpt366.c -+++ linux-stable/drivers/ide/hpt366.c -@@ -1241,7 +1241,7 @@ static int __devinit init_dma_hpt366(ide +--- a/drivers/ide/hpt366.c ++++ b/drivers/ide/hpt366.c +@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h dma_old = inb(base + 2); @@ -52,7 +48,7 @@ Index: linux-stable/drivers/ide/hpt366.c dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); -@@ -1252,7 +1252,7 @@ static int __devinit init_dma_hpt366(ide +@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h if (dma_new != dma_old) outb(dma_new, base + 2); @@ -61,11 +57,9 @@ Index: linux-stable/drivers/ide/hpt366.c printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); -Index: linux-stable/drivers/ide/ide-io-std.c -=================================================================== ---- linux-stable.orig/drivers/ide/ide-io-std.c -+++ linux-stable/drivers/ide/ide-io-std.c -@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, +--- a/drivers/ide/ide-io-std.c ++++ b/drivers/ide/ide-io-std.c +@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { @@ -74,7 +68,7 @@ Index: linux-stable/drivers/ide/ide-io-std.c ata_vlb_sync(io_ports->nsect_addr); } -@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, +@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) @@ -101,10 +95,8 @@ Index: linux-stable/drivers/ide/ide-io-std.c if (((len + 1) & 3) < 2) return; -Index: linux-stable/drivers/ide/ide-io.c -=================================================================== ---- linux-stable.orig/drivers/ide/ide-io.c -+++ linux-stable/drivers/ide/ide-io.c +--- a/drivers/ide/ide-io.c ++++ b/drivers/ide/ide-io.c @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat /* disable_irq_nosync ?? */ disable_irq(hwif->irq); @@ -114,11 +106,9 @@ Index: linux-stable/drivers/ide/ide-io.c if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { -Index: linux-stable/drivers/ide/ide-iops.c -=================================================================== ---- linux-stable.orig/drivers/ide/ide-iops.c -+++ linux-stable/drivers/ide/ide-iops.c -@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, +--- a/drivers/ide/ide-iops.c ++++ b/drivers/ide/ide-iops.c +@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, if ((stat & ATA_BUSY) == 0) break; @@ -133,10 +123,8 @@ Index: linux-stable/drivers/ide/ide-iops.c } /* * Allow status to settle, then read it again. -Index: linux-stable/drivers/ide/ide-probe.c -=================================================================== ---- linux-stable.orig/drivers/ide/ide-probe.c -+++ linux-stable/drivers/ide/ide-probe.c +--- a/drivers/ide/ide-probe.c ++++ b/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri int bswap = 1; @@ -150,10 +138,8 @@ Index: linux-stable/drivers/ide/ide-probe.c drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG -Index: linux-stable/drivers/ide/ide-taskfile.c -=================================================================== ---- linux-stable.orig/drivers/ide/ide-taskfile.c -+++ linux-stable/drivers/ide/ide-taskfile.c +--- a/drivers/ide/ide-taskfile.c ++++ b/drivers/ide/ide-taskfile.c @@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s page_is_high = PageHighMem(page); diff --git a/debian/patches/features/all/rt/idle-state.patch b/debian/patches/features/all/rt/idle-state.patch new file mode 100644 index 000000000..4f1255c43 --- /dev/null +++ b/debian/patches/features/all/rt/idle-state.patch @@ -0,0 +1,19 @@ +Subject: sched: Init idle->on_rq in init_idle() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 23:03:29 +0100 + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4941,6 +4941,7 @@ void __cpuinit init_idle(struct task_str + rcu_read_unlock(); + + rq->curr = rq->idle = idle; ++ idle->on_rq = 1; + #if defined(CONFIG_SMP) + idle->on_cpu = 1; + #endif diff --git a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch index 683982714..8b2c7fd18 100644 --- a/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch +++ b/debian/patches/features/all/rt/infiniband-mellanox-ib-use-nort-irq.patch @@ -18,10 +18,8 @@ Signed-off-by: Thomas Gleixner drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c -=================================================================== ---- linux-stable.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c -+++ linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct wor ipoib_mcast_stop_thread(dev, 0); diff --git a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch index 861a98dfe..fc11de2f5 100644 --- a/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch +++ b/debian/patches/features/all/rt/inpt-gameport-use-local-irq-nort.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner drivers/input/gameport/gameport.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/drivers/input/gameport/gameport.c -=================================================================== ---- linux-stable.orig/drivers/input/gameport/gameport.c -+++ linux-stable/drivers/input/gameport/gameport.c +--- a/drivers/input/gameport/gameport.c ++++ b/drivers/input/gameport/gameport.c @@ -87,12 +87,12 @@ static int gameport_measure_speed(struct tx = 1 << 30; diff --git a/debian/patches/features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch b/debian/patches/features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch index 60291d02a..2d46f5df5 100644 --- a/debian/patches/features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch +++ b/debian/patches/features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch @@ -27,10 +27,8 @@ Signed-off-by: Thomas Gleixner drivers/idle/i7300_idle.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/drivers/idle/i7300_idle.c -=================================================================== ---- linux-stable.orig/drivers/idle/i7300_idle.c -+++ linux-stable/drivers/idle/i7300_idle.c +--- a/drivers/idle/i7300_idle.c ++++ b/drivers/idle/i7300_idle.c @@ -75,7 +75,7 @@ static unsigned long past_skip; static struct pci_dev *fbd_dev; diff --git a/debian/patches/features/all/rt/ipc-make-rt-aware.patch b/debian/patches/features/all/rt/ipc-make-rt-aware.patch index a960f1c78..8b081cff5 100644 --- a/debian/patches/features/all/rt/ipc-make-rt-aware.patch +++ b/debian/patches/features/all/rt/ipc-make-rt-aware.patch @@ -15,10 +15,8 @@ Signed-off-by: Thomas Gleixner ipc/msg.c | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) -Index: linux-stable/ipc/mqueue.c -=================================================================== ---- linux-stable.orig/ipc/mqueue.c -+++ linux-stable/ipc/mqueue.c +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c @@ -912,12 +912,17 @@ static inline void pipelined_send(struct struct msg_msg *message, struct ext_wait_queue *receiver) @@ -37,10 +35,8 @@ Index: linux-stable/ipc/mqueue.c } /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() -Index: linux-stable/ipc/msg.c -=================================================================== ---- linux-stable.orig/ipc/msg.c -+++ linux-stable/ipc/msg.c +--- a/ipc/msg.c ++++ b/ipc/msg.c @@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue while (tmp != &msq->q_receivers) { struct msg_receiver *msr; @@ -62,7 +58,7 @@ Index: linux-stable/ipc/msg.c } } -@@ -611,6 +619,12 @@ static inline int pipelined_send(struct +@@ -614,6 +622,12 @@ static inline int pipelined_send(struct !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { @@ -75,7 +71,7 @@ Index: linux-stable/ipc/msg.c list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; -@@ -624,9 +638,11 @@ static inline int pipelined_send(struct +@@ -627,9 +641,11 @@ static inline int pipelined_send(struct wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; diff --git a/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch index cbc2b1a99..4d65d5b33 100644 --- a/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch +++ b/debian/patches/features/all/rt/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch @@ -34,10 +34,8 @@ Signed-off-by: Thomas Gleixner ipc/mqueue.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) -Index: linux-stable/ipc/mqueue.c -=================================================================== ---- linux-stable.orig/ipc/mqueue.c -+++ linux-stable/ipc/mqueue.c +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c @@ -936,13 +936,18 @@ static inline void pipelined_receive(str wake_up_interruptible(&info->wait_q); return; diff --git a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch index 0c6abe61d..a51262af2 100644 --- a/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch +++ b/debian/patches/features/all/rt/ipc-sem-rework-semaphore-wakeups.patch @@ -31,10 +31,8 @@ Signed-off-by: Thomas Gleixner ipc/sem.c | 10 ++++++++++ 1 file changed, 10 insertions(+) -Index: linux-stable/ipc/sem.c -=================================================================== ---- linux-stable.orig/ipc/sem.c -+++ linux-stable/ipc/sem.c +--- a/ipc/sem.c ++++ b/ipc/sem.c @@ -461,6 +461,13 @@ undo: static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) @@ -65,7 +63,7 @@ Index: linux-stable/ipc/sem.c struct sem_queue *q, *t; int did_something; -@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct +@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct } if (did_something) preempt_enable(); diff --git a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch index ba7fb0a28..be854970d 100644 --- a/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch +++ b/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -18,10 +18,8 @@ Cc: stable-rt@vger.kernel.org kernel/softirq.c | 7 +++++++ 5 files changed, 37 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h @@ -58,6 +58,7 @@ * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device @@ -38,11 +36,9 @@ Index: linux-stable/include/linux/interrupt.h #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) -Index: linux-stable/include/linux/irq.h -=================================================================== ---- linux-stable.orig/include/linux/irq.h -+++ linux-stable/include/linux/irq.h -@@ -73,6 +73,7 @@ typedef void (*irq_preflow_handler_t)(st +--- a/include/linux/irq.h ++++ b/include/linux/irq.h +@@ -70,6 +70,7 @@ typedef void (*irq_preflow_handler_t)(st * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable @@ -50,7 +46,7 @@ Index: linux-stable/include/linux/irq.h */ enum { IRQ_TYPE_NONE = 0x00000000, -@@ -97,12 +98,14 @@ enum { +@@ -94,12 +95,14 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), @@ -66,11 +62,9 @@ Index: linux-stable/include/linux/irq.h #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) -Index: linux-stable/kernel/irq/manage.c -=================================================================== ---- linux-stable.orig/kernel/irq/manage.c -+++ linux-stable/kernel/irq/manage.c -@@ -757,7 +757,15 @@ irq_forced_thread_fn(struct irq_desc *de +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -782,7 +782,15 @@ irq_forced_thread_fn(struct irq_desc *de local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); @@ -87,7 +81,7 @@ Index: linux-stable/kernel/irq/manage.c return ret; } -@@ -1090,6 +1098,9 @@ __setup_irq(unsigned int irq, struct irq +@@ -1127,6 +1135,9 @@ __setup_irq(unsigned int irq, struct irq irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } @@ -97,10 +91,8 @@ Index: linux-stable/kernel/irq/manage.c /* Set default affinity mask once everything is setup */ setup_affinity(irq, desc, mask); -Index: linux-stable/kernel/irq/settings.h -=================================================================== ---- linux-stable.orig/kernel/irq/settings.h -+++ linux-stable/kernel/irq/settings.h +--- a/kernel/irq/settings.h ++++ b/kernel/irq/settings.h @@ -14,6 +14,7 @@ enum { _IRQ_NO_BALANCING = IRQ_NO_BALANCING, _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, @@ -134,11 +126,9 @@ Index: linux-stable/kernel/irq/settings.h static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -434,6 +434,13 @@ void local_bh_enable_ip(unsigned long ip +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -417,6 +417,13 @@ void local_bh_enable_ip(unsigned long ip } EXPORT_SYMBOL(local_bh_enable_ip); diff --git a/debian/patches/features/all/rt/jump-label-rt.patch b/debian/patches/features/all/rt/jump-label-rt.patch index a1c64388c..b3c1ead5c 100644 --- a/debian/patches/features/all/rt/jump-label-rt.patch +++ b/debian/patches/features/all/rt/jump-label-rt.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/jump_label.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/jump_label.h -=================================================================== ---- linux-stable.orig/include/linux/jump_label.h -+++ linux-stable/include/linux/jump_label.h +--- a/include/linux/jump_label.h ++++ b/include/linux/jump_label.h @@ -50,7 +50,8 @@ #include #include diff --git a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch index db590c2fe..17aee871c 100644 --- a/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch +++ b/debian/patches/features/all/rt/kconfig-disable-a-few-options-rt.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner mm/Kconfig | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) -Index: linux-stable/arch/Kconfig -=================================================================== ---- linux-stable.orig/arch/Kconfig -+++ linux-stable/arch/Kconfig +--- a/arch/Kconfig ++++ b/arch/Kconfig @@ -6,6 +6,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING @@ -23,11 +21,9 @@ Index: linux-stable/arch/Kconfig select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help -Index: linux-stable/drivers/net/Kconfig -=================================================================== ---- linux-stable.orig/drivers/net/Kconfig -+++ linux-stable/drivers/net/Kconfig -@@ -153,6 +153,7 @@ config MACVTAP +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -164,6 +164,7 @@ config VXLAN config NETCONSOLE tristate "Network console logging support" @@ -35,16 +31,14 @@ Index: linux-stable/drivers/net/Kconfig ---help--- If you want to log kernel messages over the network, enable this. See for details. -Index: linux-stable/mm/Kconfig -=================================================================== ---- linux-stable.orig/mm/Kconfig -+++ linux-stable/mm/Kconfig -@@ -318,7 +318,7 @@ config NOMMU_INITIAL_TRIM_EXCESS +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -353,7 +353,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" -- depends on X86 && MMU -+ depends on X86 && MMU && !PREEMPT_RT_FULL +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL select COMPACTION help Transparent Hugepages allows the kernel to use huge pages and diff --git a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch index ca7261d5e..62da6d75e 100644 --- a/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch +++ b/debian/patches/features/all/rt/kconfig-preempt-rt-full.patch @@ -5,25 +5,21 @@ Date: Wed, 29 Jun 2011 14:58:57 +0200 Signed-off-by: Thomas Gleixner --- init/Makefile | 2 +- - kernel/Kconfig.preempt | 7 +++++++ + kernel/Kconfig.preempt | 8 ++++++++ scripts/mkcompile_h | 4 +++- - 3 files changed, 11 insertions(+), 2 deletions(-) + 3 files changed, 12 insertions(+), 2 deletions(-) -Index: linux-stable/init/Makefile -=================================================================== ---- linux-stable.orig/init/Makefile -+++ linux-stable/init/Makefile +--- a/init/Makefile ++++ b/init/Makefile @@ -33,4 +33,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" -Index: linux-stable/kernel/Kconfig.preempt -=================================================================== ---- linux-stable.orig/kernel/Kconfig.preempt -+++ linux-stable/kernel/Kconfig.preempt -@@ -73,6 +73,13 @@ config PREEMPT_RTB +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -73,6 +73,14 @@ config PREEMPT_RTB enables changes which are preliminary for the full preemptiple RT kernel. @@ -31,16 +27,15 @@ Index: linux-stable/kernel/Kconfig.preempt + bool "Fully Preemptible Kernel (RT)" + depends on IRQ_FORCED_THREADING + select PREEMPT_RT_BASE ++ select PREEMPT_RCU + help + All and everything + endchoice config PREEMPT_COUNT -Index: linux-stable/scripts/mkcompile_h -=================================================================== ---- linux-stable.orig/scripts/mkcompile_h -+++ linux-stable/scripts/mkcompile_h +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h @@ -4,7 +4,8 @@ TARGET=$1 ARCH=$2 SMP=$3 diff --git a/debian/patches/features/all/rt/kgb-serial-hackaround.patch b/debian/patches/features/all/rt/kgb-serial-hackaround.patch index 2793aa1fb..1a13d29f8 100644 --- a/debian/patches/features/all/rt/kgb-serial-hackaround.patch +++ b/debian/patches/features/all/rt/kgb-serial-hackaround.patch @@ -19,14 +19,12 @@ Jason. --- drivers/tty/serial/8250/8250.c | 3 ++- - include/linux/kdb.h | 2 ++ + include/linux/kdb.h | 3 ++- kernel/debug/kdb/kdb_io.c | 6 ++---- - 3 files changed, 6 insertions(+), 5 deletions(-) + 3 files changed, 6 insertions(+), 6 deletions(-) -Index: linux-stable/drivers/tty/serial/8250/8250.c -=================================================================== ---- linux-stable.orig/drivers/tty/serial/8250/8250.c -+++ linux-stable/drivers/tty/serial/8250/8250.c +--- a/drivers/tty/serial/8250/8250.c ++++ b/drivers/tty/serial/8250/8250.c @@ -38,6 +38,7 @@ #include #include @@ -35,7 +33,7 @@ Index: linux-stable/drivers/tty/serial/8250/8250.c #ifdef CONFIG_SPARC #include #endif -@@ -2782,7 +2783,7 @@ serial8250_console_write(struct console +@@ -2909,7 +2910,7 @@ serial8250_console_write(struct console touch_nmi_watchdog(); @@ -44,29 +42,27 @@ Index: linux-stable/drivers/tty/serial/8250/8250.c locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); -Index: linux-stable/include/linux/kdb.h -=================================================================== ---- linux-stable.orig/include/linux/kdb.h -+++ linux-stable/include/linux/kdb.h -@@ -148,12 +148,14 @@ extern int kdb_register(char *, kdb_func - extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, - short, kdb_repeat_t); - extern int kdb_unregister(char *); +--- a/include/linux/kdb.h ++++ b/include/linux/kdb.h +@@ -115,7 +115,7 @@ extern int kdb_trap_printk; + extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +- +#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -150,6 +150,7 @@ extern int kdb_register_repeat(char *, k + extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ - #define kdb_printf(...) - #define kdb_init(x) - #define kdb_register(...) - #define kdb_register_repeat(...) - #define kdb_uregister(x) + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } +#define in_kdb_printk() (0) - #endif /* CONFIG_KGDB_KDB */ - enum { - KDB_NOT_INITIALIZED, -Index: linux-stable/kernel/debug/kdb/kdb_io.c -=================================================================== ---- linux-stable.orig/kernel/debug/kdb/kdb_io.c -+++ linux-stable/kernel/debug/kdb/kdb_io.c + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c @@ -554,7 +554,6 @@ int vkdb_printf(const char *fmt, va_list int linecount; int colcount; diff --git a/debian/patches/features/all/rt/latency-hist.patch b/debian/patches/features/all/rt/latency-hist.patch index 1a3feeb7e..6e646b31d 100644 --- a/debian/patches/features/all/rt/latency-hist.patch +++ b/debian/patches/features/all/rt/latency-hist.patch @@ -25,10 +25,8 @@ Signed-off-by: Thomas Gleixner kernel/trace/trace_irqsoff.c | 11 10 files changed, 1611 insertions(+) -Index: linux-stable/Documentation/trace/histograms.txt -=================================================================== --- /dev/null -+++ linux-stable/Documentation/trace/histograms.txt ++++ b/Documentation/trace/histograms.txt @@ -0,0 +1,186 @@ + Using the Linux Kernel Latency Histograms + @@ -216,10 +214,8 @@ Index: linux-stable/Documentation/trace/histograms.txt +is provided. + +These data are also reset when the wakeup histogram is reset. -Index: linux-stable/include/linux/hrtimer.h -=================================================================== ---- linux-stable.orig/include/linux/hrtimer.h -+++ linux-stable/include/linux/hrtimer.h +--- a/include/linux/hrtimer.h ++++ b/include/linux/hrtimer.h @@ -111,6 +111,9 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; @@ -230,11 +226,9 @@ Index: linux-stable/include/linux/hrtimer.h #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1583,6 +1583,12 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1598,6 +1598,12 @@ struct task_struct { unsigned long trace; /* bitmask and counter of trace recursion */ unsigned long trace_recursion; @@ -247,10 +241,8 @@ Index: linux-stable/include/linux/sched.h #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ struct memcg_batch_info { -Index: linux-stable/include/trace/events/hist.h -=================================================================== --- /dev/null -+++ linux-stable/include/trace/events/hist.h ++++ b/include/trace/events/hist.h @@ -0,0 +1,69 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hist @@ -321,10 +313,8 @@ Index: linux-stable/include/trace/events/hist.h + +/* This part must be outside protection */ +#include -Index: linux-stable/include/trace/events/latency_hist.h -=================================================================== --- /dev/null -+++ linux-stable/include/trace/events/latency_hist.h ++++ b/include/trace/events/latency_hist.h @@ -0,0 +1,29 @@ +#ifndef _LATENCY_HIST_H +#define _LATENCY_HIST_H @@ -355,10 +345,8 @@ Index: linux-stable/include/trace/events/latency_hist.h +} + +#endif /* _LATENCY_HIST_H */ -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c @@ -49,6 +49,7 @@ #include @@ -367,7 +355,7 @@ Index: linux-stable/kernel/hrtimer.c /* * The timer bases: -@@ -983,6 +984,17 @@ int __hrtimer_start_range_ns(struct hrti +@@ -970,6 +971,17 @@ int __hrtimer_start_range_ns(struct hrti #endif } @@ -410,11 +398,9 @@ Index: linux-stable/kernel/hrtimer.c /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the -Index: linux-stable/kernel/trace/Kconfig -=================================================================== ---- linux-stable.orig/kernel/trace/Kconfig -+++ linux-stable/kernel/trace/Kconfig -@@ -191,6 +191,24 @@ config IRQSOFF_TRACER +--- a/kernel/trace/Kconfig ++++ b/kernel/trace/Kconfig +@@ -202,6 +202,24 @@ config IRQSOFF_TRACER enabled. This option and the preempt-off timing option can be used together or separately.) @@ -439,7 +425,7 @@ Index: linux-stable/kernel/trace/Kconfig config PREEMPT_TRACER bool "Preemption-off Latency Tracer" default n -@@ -213,6 +231,24 @@ config PREEMPT_TRACER +@@ -224,6 +242,24 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) @@ -464,7 +450,7 @@ Index: linux-stable/kernel/trace/Kconfig config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER -@@ -222,6 +258,74 @@ config SCHED_TRACER +@@ -233,6 +269,74 @@ config SCHED_TRACER This tracer tracks the latency of the highest priority task to be scheduled in, starting from the point it has woken up. @@ -539,11 +525,9 @@ Index: linux-stable/kernel/trace/Kconfig config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER -Index: linux-stable/kernel/trace/Makefile -=================================================================== ---- linux-stable.orig/kernel/trace/Makefile -+++ linux-stable/kernel/trace/Makefile -@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f +--- a/kernel/trace/Makefile ++++ b/kernel/trace/Makefile +@@ -34,6 +34,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o @@ -554,10 +538,8 @@ Index: linux-stable/kernel/trace/Makefile obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o -Index: linux-stable/kernel/trace/latency_hist.c -=================================================================== --- /dev/null -+++ linux-stable/kernel/trace/latency_hist.c ++++ b/kernel/trace/latency_hist.c @@ -0,0 +1,1176 @@ +/* + * kernel/trace/latency_hist.c @@ -1735,10 +1717,8 @@ Index: linux-stable/kernel/trace/latency_hist.c +} + +__initcall(latency_hist_init); -Index: linux-stable/kernel/trace/trace_irqsoff.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace_irqsoff.c -+++ linux-stable/kernel/trace/trace_irqsoff.c +--- a/kernel/trace/trace_irqsoff.c ++++ b/kernel/trace/trace_irqsoff.c @@ -17,6 +17,7 @@ #include @@ -1747,7 +1727,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; -@@ -437,11 +438,13 @@ void start_critical_timings(void) +@@ -438,11 +439,13 @@ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); @@ -1761,7 +1741,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } -@@ -451,6 +454,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) +@@ -452,6 +455,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings) #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { @@ -1769,7 +1749,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } -@@ -459,6 +463,7 @@ void time_hardirqs_off(unsigned long a0, +@@ -460,6 +464,7 @@ void time_hardirqs_off(unsigned long a0, { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); @@ -1777,7 +1757,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c } #else /* !CONFIG_PROVE_LOCKING */ -@@ -484,6 +489,7 @@ inline void print_irqtrace_events(struct +@@ -485,6 +490,7 @@ inline void print_irqtrace_events(struct */ void trace_hardirqs_on(void) { @@ -1785,7 +1765,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } -@@ -493,11 +499,13 @@ void trace_hardirqs_off(void) +@@ -494,11 +500,13 @@ void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); @@ -1799,7 +1779,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } -@@ -507,6 +515,7 @@ void trace_hardirqs_off_caller(unsigned +@@ -508,6 +516,7 @@ void trace_hardirqs_off_caller(unsigned { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); @@ -1807,7 +1787,7 @@ Index: linux-stable/kernel/trace/trace_irqsoff.c } EXPORT_SYMBOL(trace_hardirqs_off_caller); -@@ -516,12 +525,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) +@@ -517,12 +526,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller) #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { diff --git a/debian/patches/features/all/rt/lglocks-rt.patch b/debian/patches/features/all/rt/lglocks-rt.patch index 9f1969642..f870ffc83 100644 --- a/debian/patches/features/all/rt/lglocks-rt.patch +++ b/debian/patches/features/all/rt/lglocks-rt.patch @@ -4,15 +4,13 @@ Date: Wed, 15 Jun 2011 11:02:21 +0200 Signed-off-by: Thomas Gleixner --- - include/linux/lglock.h | 13 ++++++++++- + include/linux/lglock.h | 19 +++++++++++++++-- kernel/lglock.c | 54 ++++++++++++++++++++++++++++++++----------------- - 2 files changed, 48 insertions(+), 19 deletions(-) + 2 files changed, 53 insertions(+), 20 deletions(-) -Index: linux-stable/include/linux/lglock.h -=================================================================== ---- linux-stable.orig/include/linux/lglock.h -+++ linux-stable/include/linux/lglock.h -@@ -49,18 +49,29 @@ +--- a/include/linux/lglock.h ++++ b/include/linux/lglock.h +@@ -42,22 +42,37 @@ #endif struct lglock { @@ -30,23 +28,30 @@ Index: linux-stable/include/linux/lglock.h -#define DEFINE_LGLOCK(name) \ +#ifndef CONFIG_PREEMPT_RT_FULL +# define DEFINE_LGLOCK(name) \ - DEFINE_LGLOCK_LOCKDEP(name); \ - DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ = __ARCH_SPIN_LOCK_UNLOCKED; \ struct lglock name = { .lock = &name ## _lock } + +-#define DEFINE_STATIC_LGLOCK(name) \ ++# define DEFINE_STATIC_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + static struct lglock name = { .lock = &name ## _lock } +#else ++ +# define DEFINE_LGLOCK(name) \ -+ DEFINE_LGLOCK_LOCKDEP(name); \ -+ DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ + struct lglock name = { .lock = &name ## _lock } ++ ++# define DEFINE_STATIC_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \ ++ static struct lglock name = { .lock = &name ## _lock } +#endif void lg_lock_init(struct lglock *lg, char *name); void lg_local_lock(struct lglock *lg); -Index: linux-stable/kernel/lglock.c -=================================================================== ---- linux-stable.orig/kernel/lglock.c -+++ linux-stable/kernel/lglock.c +--- a/kernel/lglock.c ++++ b/kernel/lglock.c @@ -4,6 +4,15 @@ #include #include diff --git a/debian/patches/features/all/rt/list-add-list-last-entry.patch b/debian/patches/features/all/rt/list-add-list-last-entry.patch index 173928146..60a4798b5 100644 --- a/debian/patches/features/all/rt/list-add-list-last-entry.patch +++ b/debian/patches/features/all/rt/list-add-list-last-entry.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/list.h | 11 +++++++++++ 1 file changed, 11 insertions(+) -Index: linux-stable/include/linux/list.h -=================================================================== ---- linux-stable.orig/include/linux/list.h -+++ linux-stable/include/linux/list.h +--- a/include/linux/list.h ++++ b/include/linux/list.h @@ -362,6 +362,17 @@ static inline void list_splice_tail_init list_entry((ptr)->next, type, member) diff --git a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch index 406559fcf..e53e0b428 100644 --- a/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch +++ b/debian/patches/features/all/rt/local-irq-rt-depending-variants.patch @@ -13,11 +13,9 @@ Signed-off-by: Thomas Gleixner include/linux/irqflags.h | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -211,7 +211,7 @@ extern void devm_free_irq(struct device +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -211,7 +211,7 @@ extern void devm_free_irq(struct device #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else @@ -26,10 +24,8 @@ Index: linux-stable/include/linux/interrupt.h #endif extern void disable_irq_nosync(unsigned int irq); -Index: linux-stable/include/linux/irqflags.h -=================================================================== ---- linux-stable.orig/include/linux/irqflags.h -+++ linux-stable/include/linux/irqflags.h +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h @@ -147,4 +147,23 @@ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ diff --git a/debian/patches/features/all/rt/local-var.patch b/debian/patches/features/all/rt/local-var.patch index f4496db27..e39a3127d 100644 --- a/debian/patches/features/all/rt/local-var.patch +++ b/debian/patches/features/all/rt/local-var.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/percpu.h | 5 +++++ 1 file changed, 5 insertions(+) -Index: linux-stable/include/linux/percpu.h -=================================================================== ---- linux-stable.orig/include/linux/percpu.h -+++ linux-stable/include/linux/percpu.h +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h @@ -48,6 +48,11 @@ preempt_enable(); \ } while (0) diff --git a/debian/patches/features/all/rt/local-vars-migrate-disable.patch b/debian/patches/features/all/rt/local-vars-migrate-disable.patch index 99f6827a3..ebd655742 100644 --- a/debian/patches/features/all/rt/local-vars-migrate-disable.patch +++ b/debian/patches/features/all/rt/local-vars-migrate-disable.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/percpu.h | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) -Index: linux-stable/include/linux/percpu.h -=================================================================== ---- linux-stable.orig/include/linux/percpu.h -+++ linux-stable/include/linux/percpu.h +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h @@ -48,10 +48,30 @@ preempt_enable(); \ } while (0) diff --git a/debian/patches/features/all/rt/localversion.patch b/debian/patches/features/all/rt/localversion.patch index a473a1cad..2f31c4f46 100644 --- a/debian/patches/features/all/rt/localversion.patch +++ b/debian/patches/features/all/rt/localversion.patch @@ -9,9 +9,7 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org localversion-rt | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/localversion-rt -=================================================================== --- /dev/null -+++ linux-stable/localversion-rt ++++ b/localversion-rt @@ -0,0 +1 @@ -+-rt17 ++-rt2 diff --git a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch index 7ae3423a9..dd739dbef 100644 --- a/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch +++ b/debian/patches/features/all/rt/lockdep-no-softirq-accounting-on-rt.patch @@ -8,10 +8,8 @@ Signed-off-by: Thomas Gleixner kernel/lockdep.c | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) -Index: linux-stable/include/linux/irqflags.h -=================================================================== ---- linux-stable.orig/include/linux/irqflags.h -+++ linux-stable/include/linux/irqflags.h +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h @@ -25,8 +25,6 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) @@ -38,11 +36,9 @@ Index: linux-stable/include/linux/irqflags.h #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ -Index: linux-stable/kernel/lockdep.c -=================================================================== ---- linux-stable.orig/kernel/lockdep.c -+++ linux-stable/kernel/lockdep.c -@@ -3495,6 +3495,7 @@ static void check_flags(unsigned long fl +--- a/kernel/lockdep.c ++++ b/kernel/lockdep.c +@@ -3534,6 +3534,7 @@ static void check_flags(unsigned long fl } } @@ -50,7 +46,7 @@ Index: linux-stable/kernel/lockdep.c /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -3509,6 +3510,7 @@ static void check_flags(unsigned long fl +@@ -3548,6 +3549,7 @@ static void check_flags(unsigned long fl DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } diff --git a/debian/patches/features/all/rt/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch index 734a9da83..8edb4d81f 100644 --- a/debian/patches/features/all/rt/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch +++ b/debian/patches/features/all/rt/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch @@ -15,10 +15,8 @@ Signed-off-by: Thomas Gleixner lib/locking-selftest.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) -Index: linux-stable/lib/locking-selftest.c -=================================================================== ---- linux-stable.orig/lib/locking-selftest.c -+++ linux-stable/lib/locking-selftest.c +--- a/lib/locking-selftest.c ++++ b/lib/locking-selftest.c @@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_de * Normal standalone locks, for the circular and irq-context * dependency tests: diff --git a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch index fece3d4f6..f476b5672 100644 --- a/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch +++ b/debian/patches/features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch @@ -15,10 +15,8 @@ Signed-off-by: Thomas Gleixner lib/locking-selftest.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) -Index: linux-stable/lib/locking-selftest.c -=================================================================== ---- linux-stable.orig/lib/locking-selftest.c -+++ linux-stable/lib/locking-selftest.c +--- a/lib/locking-selftest.c ++++ b/lib/locking-selftest.c @@ -1175,6 +1175,7 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); diff --git a/debian/patches/features/all/rt/locking-various-init-fixes.patch b/debian/patches/features/all/rt/locking-various-init-fixes.patch index f18974d50..9ccea4d6a 100644 --- a/debian/patches/features/all/rt/locking-various-init-fixes.patch +++ b/debian/patches/features/all/rt/locking-various-init-fixes.patch @@ -8,14 +8,11 @@ Signed-off-by: Thomas Gleixner drivers/usb/chipidea/debug.c | 2 +- fs/file.c | 2 +- include/linux/idr.h | 2 +- - kernel/cred.c | 2 +- - 5 files changed, 7 insertions(+), 7 deletions(-) + 4 files changed, 6 insertions(+), 6 deletions(-) -Index: linux-stable/drivers/char/random.c -=================================================================== ---- linux-stable.orig/drivers/char/random.c -+++ linux-stable/drivers/char/random.c -@@ -448,7 +448,7 @@ static struct entropy_store input_pool = +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -445,7 +445,7 @@ static struct entropy_store input_pool = .poolinfo = &poolinfo_table[0], .name = "input", .limit = 1, @@ -24,7 +21,7 @@ Index: linux-stable/drivers/char/random.c .pool = input_pool_data }; -@@ -457,7 +457,7 @@ static struct entropy_store blocking_poo +@@ -454,7 +454,7 @@ static struct entropy_store blocking_poo .name = "blocking", .limit = 1, .pull = &input_pool, @@ -33,7 +30,7 @@ Index: linux-stable/drivers/char/random.c .pool = blocking_pool_data }; -@@ -465,7 +465,7 @@ static struct entropy_store nonblocking_ +@@ -462,7 +462,7 @@ static struct entropy_store nonblocking_ .poolinfo = &poolinfo_table[1], .name = "nonblocking", .pull = &input_pool, @@ -42,11 +39,9 @@ Index: linux-stable/drivers/char/random.c .pool = nonblocking_pool_data }; -Index: linux-stable/drivers/usb/chipidea/debug.c -=================================================================== ---- linux-stable.orig/drivers/usb/chipidea/debug.c -+++ linux-stable/drivers/usb/chipidea/debug.c -@@ -225,7 +225,7 @@ static struct { +--- a/drivers/usb/chipidea/debug.c ++++ b/drivers/usb/chipidea/debug.c +@@ -222,7 +222,7 @@ static struct { } dbg_data = { .idx = 0, .tty = 0, @@ -55,11 +50,9 @@ Index: linux-stable/drivers/usb/chipidea/debug.c }; /** -Index: linux-stable/fs/file.c -=================================================================== ---- linux-stable.orig/fs/file.c -+++ linux-stable/fs/file.c -@@ -421,7 +421,7 @@ struct files_struct init_files = { +--- a/fs/file.c ++++ b/fs/file.c +@@ -516,7 +516,7 @@ struct files_struct init_files = { .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, }, @@ -68,10 +61,8 @@ Index: linux-stable/fs/file.c }; /* -Index: linux-stable/include/linux/idr.h -=================================================================== ---- linux-stable.orig/include/linux/idr.h -+++ linux-stable/include/linux/idr.h +--- a/include/linux/idr.h ++++ b/include/linux/idr.h @@ -136,7 +136,7 @@ struct ida { struct ida_bitmap *free_bitmap; }; @@ -81,16 +72,3 @@ Index: linux-stable/include/linux/idr.h #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_pre_get(struct ida *ida, gfp_t gfp_mask); -Index: linux-stable/kernel/cred.c -=================================================================== ---- linux-stable.orig/kernel/cred.c -+++ linux-stable/kernel/cred.c -@@ -36,7 +36,7 @@ static struct kmem_cache *cred_jar; - static struct thread_group_cred init_tgcred = { - .usage = ATOMIC_INIT(2), - .tgid = 0, -- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock), -+ .lock = __SPIN_LOCK_UNLOCKED(init_tgcred.lock), - }; - #endif - diff --git a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch index e354e7025..c4e56e50d 100644 --- a/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch +++ b/debian/patches/features/all/rt/md-raid5-percpu-handling-rt-aware.patch @@ -18,11 +18,9 @@ Tested-by: Udo van den Heuvel drivers/md/raid5.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) -Index: linux-stable/drivers/md/raid5.c -=================================================================== ---- linux-stable.orig/drivers/md/raid5.c -+++ linux-stable/drivers/md/raid5.c -@@ -1373,8 +1373,9 @@ static void __raid_run_ops(struct stripe +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1415,8 +1415,9 @@ static void __raid_run_ops(struct stripe struct raid5_percpu *percpu; unsigned long cpu; @@ -33,7 +31,7 @@ Index: linux-stable/drivers/md/raid5.c if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; -@@ -1426,7 +1427,8 @@ static void __raid_run_ops(struct stripe +@@ -1468,7 +1469,8 @@ static void __raid_run_ops(struct stripe if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } @@ -43,7 +41,7 @@ Index: linux-stable/drivers/md/raid5.c } #ifdef CONFIG_MULTICORE_RAID456 -@@ -4925,6 +4927,7 @@ static int raid5_alloc_percpu(struct r5c +@@ -5093,6 +5095,7 @@ static int raid5_alloc_percpu(struct r5c break; } per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; @@ -51,11 +49,9 @@ Index: linux-stable/drivers/md/raid5.c } #ifdef CONFIG_HOTPLUG_CPU conf->cpu_notify.notifier_call = raid456_cpu_notify; -Index: linux-stable/drivers/md/raid5.h -=================================================================== ---- linux-stable.orig/drivers/md/raid5.h -+++ linux-stable/drivers/md/raid5.h -@@ -427,6 +427,7 @@ struct r5conf { +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -428,6 +428,7 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { diff --git a/debian/patches/features/all/rt/might-sleep-check-for-idle.patch b/debian/patches/features/all/rt/might-sleep-check-for-idle.patch new file mode 100644 index 000000000..be17e41eb --- /dev/null +++ b/debian/patches/features/all/rt/might-sleep-check-for-idle.patch @@ -0,0 +1,23 @@ +Subject: sched: Check for idle task in might_sleep() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 23:34:08 +0100 + +Idle is not allowed to call sleeping functions ever! + +Signed-off-by: Thomas Gleixner +--- + kernel/sched/core.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7391,7 +7391,8 @@ void __might_sleep(const char *file, int + static unsigned long prev_jiffy; /* ratelimiting */ + + rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ +- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || + system_state != SYSTEM_RUNNING || oops_in_progress) + return; + if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) diff --git a/debian/patches/features/all/rt/migrate-disable-rt-variant.patch b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch index 0c73a6fb1..68958ded8 100644 --- a/debian/patches/features/all/rt/migrate-disable-rt-variant.patch +++ b/debian/patches/features/all/rt/migrate-disable-rt-variant.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/preempt.h | 4 ++++ 1 file changed, 4 insertions(+) -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -121,11 +121,15 @@ extern void migrate_enable(void); # define preempt_enable_rt() preempt_enable() # define preempt_disable_nort() do { } while (0) diff --git a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch index 177a7a031..eb1312bc8 100644 --- a/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch +++ b/debian/patches/features/all/rt/mips-disable-highmem-on-rt.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/mips/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/arch/mips/Kconfig -=================================================================== ---- linux-stable.orig/arch/mips/Kconfig -+++ linux-stable/arch/mips/Kconfig -@@ -2119,7 +2119,7 @@ config CPU_R4400_WORKAROUNDS +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -2102,7 +2102,7 @@ config CPU_R4400_WORKAROUNDS # config HIGHMEM bool "High Memory Support" diff --git a/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch index 21d0b984e..571c43afa 100644 --- a/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch +++ b/debian/patches/features/all/rt/mips-enable-interrupts-in-signal.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/mips/kernel/signal.c | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/arch/mips/kernel/signal.c -=================================================================== ---- linux-stable.orig/arch/mips/kernel/signal.c -+++ linux-stable/arch/mips/kernel/signal.c -@@ -606,6 +606,7 @@ asmlinkage void do_notify_resume(struct +--- a/arch/mips/kernel/signal.c ++++ b/arch/mips/kernel/signal.c +@@ -601,6 +601,7 @@ asmlinkage void do_notify_resume(struct __u32 thread_info_flags) { local_irq_enable(); diff --git a/debian/patches/features/all/rt/mm-allow-slab-rt.patch b/debian/patches/features/all/rt/mm-allow-slab-rt.patch index fd8366d61..864280ad5 100644 --- a/debian/patches/features/all/rt/mm-allow-slab-rt.patch +++ b/debian/patches/features/all/rt/mm-allow-slab-rt.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner init/Kconfig | 2 ++ 1 file changed, 2 insertions(+) -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -1441,6 +1441,7 @@ config SLAB +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1493,6 +1493,7 @@ config SLAB config SLUB bool "SLUB (Unqueued Allocator)" @@ -21,7 +19,7 @@ Index: linux-stable/init/Kconfig help SLUB is a slab allocator that minimizes cache line usage instead of managing queues of cached objects (SLAB approach). -@@ -1452,6 +1453,7 @@ config SLUB +@@ -1504,6 +1505,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" diff --git a/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch new file mode 100644 index 000000000..7a0219d36 --- /dev/null +++ b/debian/patches/features/all/rt/mm-bounce-local-irq-save-nort.patch @@ -0,0 +1,27 @@ +Subject: mm: bounce: Use local_irq_save_nort +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 10:33:09 +0100 + +kmap_atomic() is preemptible on RT. + +Signed-off-by: Thomas Gleixner +--- + mm/bounce.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/bounce.c ++++ b/mm/bounce.c +@@ -51,11 +51,11 @@ static void bounce_copy_vec(struct bio_v + unsigned long flags; + unsigned char *vto; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + #else /* CONFIG_HIGHMEM */ diff --git a/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch index e814d8482..3b3b3a719 100644 --- a/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch +++ b/debian/patches/features/all/rt/mm-cgroup-page-bit-spinlock.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner mm/page_cgroup.c | 11 +++++++++++ 2 files changed, 26 insertions(+) -Index: linux-stable/include/linux/page_cgroup.h -=================================================================== ---- linux-stable.orig/include/linux/page_cgroup.h -+++ linux-stable/include/linux/page_cgroup.h +--- a/include/linux/page_cgroup.h ++++ b/include/linux/page_cgroup.h @@ -24,6 +24,9 @@ enum { */ struct page_cgroup { @@ -57,10 +55,8 @@ Index: linux-stable/include/linux/page_cgroup.h #endif /* CONFIG_MEMCG */ #include -Index: linux-stable/mm/page_cgroup.c -=================================================================== ---- linux-stable.orig/mm/page_cgroup.c -+++ linux-stable/mm/page_cgroup.c +--- a/mm/page_cgroup.c ++++ b/mm/page_cgroup.c @@ -13,6 +13,14 @@ static unsigned long total_usage; diff --git a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch index 303054c9d..0e8ac430f 100644 --- a/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch +++ b/debian/patches/features/all/rt/mm-convert-swap-to-percpu-locked.patch @@ -6,13 +6,11 @@ Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- - mm/swap.c | 33 +++++++++++++++++++++------------ - 1 file changed, 21 insertions(+), 12 deletions(-) + mm/swap.c | 30 ++++++++++++++++++------------ + 1 file changed, 18 insertions(+), 12 deletions(-) -Index: linux-stable/mm/swap.c -=================================================================== ---- linux-stable.orig/mm/swap.c -+++ linux-stable/mm/swap.c +--- a/mm/swap.c ++++ b/mm/swap.c @@ -30,6 +30,7 @@ #include #include @@ -61,22 +59,23 @@ Index: linux-stable/mm/swap.c } } -@@ -448,12 +453,12 @@ EXPORT_SYMBOL(mark_page_accessed); - +@@ -456,13 +461,13 @@ EXPORT_SYMBOL(mark_page_accessed); + */ void __lru_cache_add(struct page *page, enum lru_list lru) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; + struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru]; page_cache_get(page); - if (!pagevec_add(pvec, page)) + if (!pagevec_space(pvec)) __pagevec_lru_add(pvec, lru); + pagevec_add(pvec, page); - put_cpu_var(lru_add_pvecs); + put_locked_var(swap_lock, lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); -@@ -588,9 +593,9 @@ void lru_add_drain_cpu(int cpu) +@@ -597,9 +602,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ @@ -88,7 +87,7 @@ Index: linux-stable/mm/swap.c } pvec = &per_cpu(lru_deactivate_pvecs, cpu); -@@ -618,18 +623,19 @@ void deactivate_page(struct page *page) +@@ -627,18 +632,19 @@ void deactivate_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { @@ -112,13 +111,3 @@ Index: linux-stable/mm/swap.c } static void lru_add_drain_per_cpu(struct work_struct *dummy) -@@ -847,6 +853,9 @@ void __init swap_setup(void) - { - unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); - -+ local_irq_lock_init(rotate_lock); -+ local_irq_lock_init(swap_lock); -+ - #ifdef CONFIG_SWAP - bdi_init(swapper_space.backing_dev_info); - #endif diff --git a/debian/patches/features/all/rt/mm-enable-slub.patch b/debian/patches/features/all/rt/mm-enable-slub.patch index 5ad130740..160b577fa 100644 --- a/debian/patches/features/all/rt/mm-enable-slub.patch +++ b/debian/patches/features/all/rt/mm-enable-slub.patch @@ -8,13 +8,11 @@ Signed-off-by: Thomas Gleixner --- include/linux/slub_def.h | 2 init/Kconfig | 1 - mm/slub.c | 116 ++++++++++++++++++++++++++++++++++++----------- - 3 files changed, 90 insertions(+), 29 deletions(-) + mm/slub.c | 119 +++++++++++++++++++++++++++++++++++------------ + 3 files changed, 92 insertions(+), 30 deletions(-) -Index: linux-stable/include/linux/slub_def.h -=================================================================== ---- linux-stable.orig/include/linux/slub_def.h -+++ linux-stable/include/linux/slub_def.h +--- a/include/linux/slub_def.h ++++ b/include/linux/slub_def.h @@ -54,7 +54,7 @@ struct kmem_cache_cpu { }; @@ -24,11 +22,9 @@ Index: linux-stable/include/linux/slub_def.h unsigned long nr_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -1442,7 +1442,6 @@ config SLAB +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1494,7 +1494,6 @@ config SLAB config SLUB bool "SLUB (Unqueued Allocator)" @@ -36,10 +32,26 @@ Index: linux-stable/init/Kconfig help SLUB is a slab allocator that minimizes cache line usage instead of managing queues of cached objects (SLAB approach). -Index: linux-stable/mm/slub.c -=================================================================== ---- linux-stable.orig/mm/slub.c -+++ linux-stable/mm/slub.c +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1070,7 +1070,7 @@ static noinline struct kmem_cache_node * + { + struct kmem_cache_node *n = get_node(s, page_to_nid(page)); + +- spin_lock_irqsave(&n->list_lock, *flags); ++ raw_spin_lock_irqsave(&n->list_lock, *flags); + slab_lock(page); + + if (!check_slab(s, page)) +@@ -1118,7 +1118,7 @@ out: + + fail: + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, *flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, *flags); + slab_fix(s, "Object at 0x%p not freed", object); + return NULL; + } @@ -1253,6 +1253,12 @@ static inline void slab_free_hook(struct #endif /* CONFIG_SLUB_DEBUG */ @@ -77,8 +89,8 @@ Index: linux-stable/mm/slub.c local_irq_disable(); if (!page) return NULL; -@@ -1409,6 +1423,16 @@ static void __free_slab(struct kmem_cach - __free_pages(page, order); +@@ -1414,6 +1428,16 @@ static void __free_slab(struct kmem_cach + __free_memcg_kmem_pages(page, order); } +static void free_delayed(struct kmem_cache *s, struct list_head *h) @@ -94,7 +106,7 @@ Index: linux-stable/mm/slub.c #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) -@@ -1443,6 +1467,12 @@ static void free_slab(struct kmem_cache +@@ -1448,6 +1472,12 @@ static void free_slab(struct kmem_cache } call_rcu(head, rcu_free_slab); @@ -107,7 +119,7 @@ Index: linux-stable/mm/slub.c } else __free_slab(s, page); } -@@ -1544,7 +1574,7 @@ static void *get_partial_node(struct kme +@@ -1549,7 +1579,7 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; @@ -116,7 +128,7 @@ Index: linux-stable/mm/slub.c list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; int available; -@@ -1569,7 +1599,7 @@ static void *get_partial_node(struct kme +@@ -1574,7 +1604,7 @@ static void *get_partial_node(struct kme break; } @@ -125,7 +137,7 @@ Index: linux-stable/mm/slub.c return object; } -@@ -1811,7 +1841,7 @@ redo: +@@ -1816,7 +1846,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ @@ -134,7 +146,7 @@ Index: linux-stable/mm/slub.c } } else { m = M_FULL; -@@ -1822,7 +1852,7 @@ redo: +@@ -1827,7 +1857,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ @@ -143,7 +155,7 @@ Index: linux-stable/mm/slub.c } } -@@ -1857,7 +1887,7 @@ redo: +@@ -1862,7 +1892,7 @@ redo: goto redo; if (lock) @@ -152,7 +164,7 @@ Index: linux-stable/mm/slub.c if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -1886,10 +1916,10 @@ static void unfreeze_partials(struct kme +@@ -1893,10 +1923,10 @@ static void unfreeze_partials(struct kme n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -165,7 +177,7 @@ Index: linux-stable/mm/slub.c } do { -@@ -1918,7 +1948,7 @@ static void unfreeze_partials(struct kme +@@ -1925,7 +1955,7 @@ static void unfreeze_partials(struct kme } if (n) @@ -174,16 +186,7 @@ Index: linux-stable/mm/slub.c while (discard_page) { page = discard_page; -@@ -1939,7 +1969,7 @@ static void unfreeze_partials(struct kme - * If we did not find a slot then simply move all the partials to the - * per node partial list. - */ --int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) -+static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) - { - struct page *oldpage; - int pages; -@@ -1954,14 +1984,21 @@ int put_cpu_partial(struct kmem_cache *s +@@ -1961,14 +1991,21 @@ static int put_cpu_partial(struct kmem_c pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -202,10 +205,10 @@ Index: linux-stable/mm/slub.c + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(s, &tofree); + oldpage = NULL; pobjects = 0; pages = 0; - stat(s, CPU_PARTIAL_DRAIN); -@@ -2023,7 +2060,22 @@ static bool has_cpu_slab(int cpu, void * +@@ -2031,7 +2068,22 @@ static bool has_cpu_slab(int cpu, void * static void flush_all(struct kmem_cache *s) { @@ -228,7 +231,7 @@ Index: linux-stable/mm/slub.c } /* -@@ -2051,10 +2103,10 @@ static unsigned long count_partial(struc +@@ -2059,10 +2111,10 @@ static unsigned long count_partial(struc unsigned long x = 0; struct page *page; @@ -241,7 +244,7 @@ Index: linux-stable/mm/slub.c return x; } -@@ -2197,9 +2249,11 @@ static inline void *get_freelist(struct +@@ -2205,9 +2257,11 @@ static inline void *get_freelist(struct static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) { @@ -253,7 +256,7 @@ Index: linux-stable/mm/slub.c local_irq_save(flags); #ifdef CONFIG_PREEMPT -@@ -2262,7 +2316,13 @@ load_freelist: +@@ -2270,7 +2324,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); @@ -267,7 +270,7 @@ Index: linux-stable/mm/slub.c return freelist; new_slab: -@@ -2280,9 +2340,7 @@ new_slab: +@@ -2288,9 +2348,7 @@ new_slab: if (unlikely(!freelist)) { if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); @@ -278,7 +281,7 @@ Index: linux-stable/mm/slub.c } page = c->page; -@@ -2296,8 +2354,7 @@ new_slab: +@@ -2304,8 +2362,7 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; @@ -288,7 +291,16 @@ Index: linux-stable/mm/slub.c } /* -@@ -2488,7 +2545,7 @@ static void __slab_free(struct kmem_cach +@@ -2477,7 +2534,7 @@ static void __slab_free(struct kmem_cach + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2507,7 +2564,7 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ @@ -297,16 +309,16 @@ Index: linux-stable/mm/slub.c } } -@@ -2538,7 +2595,7 @@ static void __slab_free(struct kmem_cach - stat(s, FREE_ADD_PARTIAL); - } +@@ -2548,7 +2605,7 @@ static void __slab_free(struct kmem_cach + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: -@@ -2552,7 +2609,7 @@ slab_empty: +@@ -2562,7 +2619,7 @@ slab_empty: /* Slab must be on the full list */ remove_full(s, page); @@ -315,7 +327,7 @@ Index: linux-stable/mm/slub.c stat(s, FREE_SLAB); discard_slab(s, page); } -@@ -2781,7 +2838,7 @@ static void +@@ -2764,7 +2821,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -324,7 +336,7 @@ Index: linux-stable/mm/slub.c INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); -@@ -3524,7 +3581,7 @@ int kmem_cache_shrink(struct kmem_cache +@@ -3451,7 +3508,7 @@ int kmem_cache_shrink(struct kmem_cache for (i = 0; i < objects; i++) INIT_LIST_HEAD(slabs_by_inuse + i); @@ -333,7 +345,7 @@ Index: linux-stable/mm/slub.c /* * Build lists indexed by the items in use in each slab. -@@ -3545,7 +3602,7 @@ int kmem_cache_shrink(struct kmem_cache +@@ -3472,7 +3529,7 @@ int kmem_cache_shrink(struct kmem_cache for (i = objects - 1; i > 0; i--) list_splice(slabs_by_inuse + i, n->partial.prev); @@ -342,24 +354,20 @@ Index: linux-stable/mm/slub.c /* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) -@@ -3711,10 +3768,15 @@ void __init kmem_cache_init(void) +@@ -3642,6 +3699,12 @@ void __init kmem_cache_init(void) + boot_kmem_cache_node; int i; - int caches = 0; - struct kmem_cache *temp_kmem_cache; -- int order; -+ int order, cpu; - struct kmem_cache *temp_kmem_cache_node; - unsigned long kmalloc_size; - + int caches = 2; ++ int cpu; ++ + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } -+ + if (debug_guardpage_minorder()) slub_max_order = 0; - -@@ -4127,7 +4189,7 @@ static int validate_slab_node(struct kme +@@ -4033,7 +4096,7 @@ static int validate_slab_node(struct kme struct page *page; unsigned long flags; @@ -368,7 +376,7 @@ Index: linux-stable/mm/slub.c list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); -@@ -4150,7 +4212,7 @@ static int validate_slab_node(struct kme +@@ -4056,7 +4119,7 @@ static int validate_slab_node(struct kme atomic_long_read(&n->nr_slabs)); out: @@ -377,7 +385,7 @@ Index: linux-stable/mm/slub.c return count; } -@@ -4340,12 +4402,12 @@ static int list_locations(struct kmem_ca +@@ -4246,12 +4309,12 @@ static int list_locations(struct kmem_ca if (!atomic_long_read(&n->nr_slabs)) continue; diff --git a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch index be91dcb1b..52da0b1f8 100644 --- a/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch +++ b/debian/patches/features/all/rt/mm-make-vmstat-rt-aware.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner mm/vmstat.c | 6 ++++++ 2 files changed, 10 insertions(+) -Index: linux-stable/include/linux/vmstat.h -=================================================================== ---- linux-stable.orig/include/linux/vmstat.h -+++ linux-stable/include/linux/vmstat.h +--- a/include/linux/vmstat.h ++++ b/include/linux/vmstat.h @@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v static inline void __count_vm_event(enum vm_event_item item) @@ -34,10 +32,8 @@ Index: linux-stable/include/linux/vmstat.h } static inline void count_vm_events(enum vm_event_item item, long delta) -Index: linux-stable/mm/vmstat.c -=================================================================== ---- linux-stable.orig/mm/vmstat.c -+++ linux-stable/mm/vmstat.c +--- a/mm/vmstat.c ++++ b/mm/vmstat.c @@ -216,6 +216,7 @@ void __mod_zone_page_state(struct zone * long x; long t; diff --git a/debian/patches/features/all/rt/mm-page-alloc-fix.patch b/debian/patches/features/all/rt/mm-page-alloc-fix.patch index a6ef431d2..225635352 100644 --- a/debian/patches/features/all/rt/mm-page-alloc-fix.patch +++ b/debian/patches/features/all/rt/mm-page-alloc-fix.patch @@ -7,12 +7,10 @@ Signed-off-by: Thomas Gleixner mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/mm/page_alloc.c -=================================================================== ---- linux-stable.orig/mm/page_alloc.c -+++ linux-stable/mm/page_alloc.c -@@ -2180,8 +2180,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m - if (*did_some_progress != COMPACT_SKIPPED) { +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2204,8 +2204,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m + struct page *page; /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu()); diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch index 77d7268d0..57048031f 100644 --- a/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch +++ b/debian/patches/features/all/rt/mm-page-alloc-use-list-last-entry.patch @@ -7,16 +7,14 @@ Signed-off-by: Thomas Gleixner mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/mm/page_alloc.c -=================================================================== ---- linux-stable.orig/mm/page_alloc.c -+++ linux-stable/mm/page_alloc.c -@@ -668,7 +668,7 @@ static void free_pcppages_bulk(struct zo - batch_free = to_free; - +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -658,7 +658,7 @@ static void free_pcppages_bulk(struct zo do { + int mt; /* migratetype of the to-be-freed page */ + - page = list_entry(list->prev, struct page, lru); + page = list_last_entry(list, struct page, lru); /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ + mt = get_freepage_migratetype(page); diff --git a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch index c2266935e..8d12bf721 100644 --- a/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch +++ b/debian/patches/features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch @@ -12,10 +12,8 @@ Cc: stable-rt@vger.kernel.org mm/page_alloc.c | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/locallock.h -=================================================================== ---- linux-stable.orig/include/linux/locallock.h -+++ linux-stable/include/linux/locallock.h +--- a/include/linux/locallock.h ++++ b/include/linux/locallock.h @@ -137,6 +137,12 @@ static inline int __local_lock_irqsave(s _flags = __get_cpu_var(lvar).flags; \ } while (0) @@ -41,17 +39,15 @@ Index: linux-stable/include/linux/locallock.h #define local_spin_trylock_irq(lvar, lock) \ ({ \ int __locked; \ -Index: linux-stable/mm/page_alloc.c -=================================================================== ---- linux-stable.orig/mm/page_alloc.c -+++ linux-stable/mm/page_alloc.c -@@ -221,9 +221,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -224,9 +224,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); #ifdef CONFIG_PREEMPT_RT_BASE # define cpu_lock_irqsave(cpu, flags) \ - spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) + local_lock_irqsave_on(pa_lock, flags, cpu) - # define cpu_unlock_irqrestore(cpu, flags) \ + # define cpu_unlock_irqrestore(cpu, flags) \ - spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) + local_unlock_irqrestore_on(pa_lock, flags, cpu) #else diff --git a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch index 6d9f67936..072387cd3 100644 --- a/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch +++ b/debian/patches/features/all/rt/mm-page_alloc-reduce-lock-sections-further.patch @@ -8,14 +8,12 @@ call free_pages_bulk() outside of the percpu page allocator locks. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner --- - mm/page_alloc.c | 81 +++++++++++++++++++++++++++++++++++++++++--------------- - 1 file changed, 60 insertions(+), 21 deletions(-) + mm/page_alloc.c | 98 +++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 69 insertions(+), 29 deletions(-) -Index: linux-stable/mm/page_alloc.c -=================================================================== ---- linux-stable.orig/mm/page_alloc.c -+++ linux-stable/mm/page_alloc.c -@@ -637,7 +637,7 @@ static inline int free_pages_check(struc +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -625,7 +625,7 @@ static inline int free_pages_check(struc } /* @@ -24,7 +22,7 @@ Index: linux-stable/mm/page_alloc.c * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * -@@ -648,16 +648,42 @@ static inline int free_pages_check(struc +@@ -636,16 +636,50 @@ static inline int free_pages_check(struc * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, @@ -43,16 +41,24 @@ Index: linux-stable/mm/page_alloc.c + while (!list_empty(list)) { + struct page *page = list_first_entry(list, struct page, lru); ++ int mt; /* migratetype of the to-be-freed page */ + + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); ++ ++ mt = get_freepage_migratetype(page); + /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ -+ __free_one_page(page, zone, 0, page_private(page)); -+ trace_mm_page_pcpu_drain(page, 0, page_private(page)); ++ __free_one_page(page, zone, 0, mt); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) { ++ __mod_zone_page_state(zone, NR_FREE_PAGES, 1); ++ if (is_migrate_cma(mt)) ++ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); ++ } ++ + to_free--; + } + WARN_ON(to_free != 0); -+ __mod_zone_page_state(zone, NR_FREE_PAGES, count); + spin_unlock_irqrestore(&zone->lock, flags); +} + @@ -71,7 +77,7 @@ Index: linux-stable/mm/page_alloc.c while (to_free) { struct page *page; struct list_head *list; -@@ -673,7 +699,7 @@ static void free_pcppages_bulk(struct zo +@@ -661,7 +695,7 @@ static void free_pcppages_bulk(struct zo batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; @@ -80,19 +86,27 @@ Index: linux-stable/mm/page_alloc.c } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ -@@ -682,27 +708,24 @@ static void free_pcppages_bulk(struct zo +@@ -669,36 +703,26 @@ static void free_pcppages_bulk(struct zo + batch_free = to_free; do { +- int mt; /* migratetype of the to-be-freed page */ +- page = list_last_entry(list, struct page, lru); - /* must delete as __free_one_page list manipulates */ list_del(&page->lru); +- mt = get_freepage_migratetype(page); - /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ -- __free_one_page(page, zone, 0, page_private(page)); -- trace_mm_page_pcpu_drain(page, 0, page_private(page)); +- __free_one_page(page, zone, 0, mt); +- trace_mm_page_pcpu_drain(page, 0, mt); +- if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) { +- __mod_zone_page_state(zone, NR_FREE_PAGES, 1); +- if (is_migrate_cma(mt)) +- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); +- } + list_add(&page->lru, dst); } while (--to_free && --batch_free && !list_empty(list)); } -- __mod_zone_page_state(zone, NR_FREE_PAGES, count); - spin_unlock(&zone->lock); } @@ -107,13 +121,14 @@ Index: linux-stable/mm/page_alloc.c zone->pages_scanned = 0; __free_one_page(page, zone, order, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); + if (unlikely(migratetype != MIGRATE_ISOLATE)) + __mod_zone_freepage_state(zone, 1 << order, migratetype); - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static bool free_pages_prepare(struct page *page, unsigned int order) -@@ -1168,6 +1191,7 @@ static int rmqueue_bulk(struct zone *zon +@@ -1180,6 +1204,7 @@ static int rmqueue_bulk(struct zone *zon void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; @@ -121,7 +136,7 @@ Index: linux-stable/mm/page_alloc.c int to_drain; local_lock_irqsave(pa_lock, flags); -@@ -1176,10 +1200,11 @@ void drain_zone_pages(struct zone *zone, +@@ -1188,10 +1213,11 @@ void drain_zone_pages(struct zone *zone, else to_drain = pcp->count; if (to_drain > 0) { @@ -134,7 +149,7 @@ Index: linux-stable/mm/page_alloc.c } #endif -@@ -1198,16 +1223,21 @@ static void drain_pages(unsigned int cpu +@@ -1210,16 +1236,21 @@ static void drain_pages(unsigned int cpu for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -158,7 +173,7 @@ Index: linux-stable/mm/page_alloc.c } } -@@ -1348,8 +1378,15 @@ void free_hot_cold_page(struct page *pag +@@ -1357,8 +1388,15 @@ void free_hot_cold_page(struct page *pag list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { @@ -175,7 +190,13 @@ Index: linux-stable/mm/page_alloc.c } out: -@@ -5888,14 +5925,16 @@ static int __meminit __zone_pcp_update(v +@@ -5977,20 +6015,22 @@ static int __meminit __zone_pcp_update(v + { + struct zone *zone = data; + int cpu; +- unsigned long batch = zone_batchsize(zone), flags; ++ unsigned long flags; + for_each_possible_cpu(cpu) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -187,11 +208,12 @@ Index: linux-stable/mm/page_alloc.c cpu_lock_irqsave(cpu, flags); - if (pcp->count > 0) - free_pcppages_bulk(zone, pcp->count, pcp); -- setup_pageset(pset, batch); + if (pcp->count > 0) { + isolate_pcp_pages(pcp->count, pcp, &dst); + free_pcppages_bulk(zone, pcp->count, &dst); + } + drain_zonestat(zone, pset); +- setup_pageset(pset, batch); cpu_unlock_irqrestore(cpu, flags); } return 0; diff --git a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch index d915106a2..6dcb233b5 100644 --- a/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/debian/patches/features/all/rt/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -16,10 +16,8 @@ Signed-off-by: Thomas Gleixner mm/page_alloc.c | 57 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 18 deletions(-) -Index: linux-stable/mm/page_alloc.c -=================================================================== ---- linux-stable.orig/mm/page_alloc.c -+++ linux-stable/mm/page_alloc.c +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c @@ -58,6 +58,7 @@ #include #include @@ -28,7 +26,7 @@ Index: linux-stable/mm/page_alloc.c #include #include -@@ -216,6 +217,18 @@ EXPORT_SYMBOL(nr_node_ids); +@@ -219,6 +220,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif @@ -37,7 +35,7 @@ Index: linux-stable/mm/page_alloc.c +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) -+# define cpu_unlock_irqrestore(cpu, flags) \ ++# define cpu_unlock_irqrestore(cpu, flags) \ + spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) @@ -46,24 +44,23 @@ Index: linux-stable/mm/page_alloc.c + int page_group_by_mobility_disabled __read_mostly; - /* -@@ -726,13 +739,13 @@ static void __free_pages_ok(struct page + void set_pageblock_migratetype(struct page *page, int migratetype) +@@ -722,12 +735,12 @@ static void __free_pages_ok(struct page if (!free_pages_prepare(page, order)) return; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); - if (unlikely(wasMlocked)) - free_page_mlock(page); __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, order, - get_pageblock_migratetype(page)); + migratetype = get_pageblock_migratetype(page); + set_freepage_migratetype(page, migratetype); + free_one_page(page_zone(page), page, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } - void __meminit __free_pages_bootmem(struct page *page, unsigned int order) -@@ -1157,7 +1170,7 @@ void drain_zone_pages(struct zone *zone, + /* +@@ -1169,7 +1182,7 @@ void drain_zone_pages(struct zone *zone, unsigned long flags; int to_drain; @@ -72,7 +69,7 @@ Index: linux-stable/mm/page_alloc.c if (pcp->count >= pcp->batch) to_drain = pcp->batch; else -@@ -1166,7 +1179,7 @@ void drain_zone_pages(struct zone *zone, +@@ -1178,7 +1191,7 @@ void drain_zone_pages(struct zone *zone, free_pcppages_bulk(zone, to_drain, pcp); pcp->count -= to_drain; } @@ -81,7 +78,7 @@ Index: linux-stable/mm/page_alloc.c } #endif -@@ -1186,7 +1199,7 @@ static void drain_pages(unsigned int cpu +@@ -1198,7 +1211,7 @@ static void drain_pages(unsigned int cpu struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -90,7 +87,7 @@ Index: linux-stable/mm/page_alloc.c pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; -@@ -1194,7 +1207,7 @@ static void drain_pages(unsigned int cpu +@@ -1206,7 +1219,7 @@ static void drain_pages(unsigned int cpu free_pcppages_bulk(zone, pcp->count, pcp); pcp->count = 0; } @@ -99,7 +96,7 @@ Index: linux-stable/mm/page_alloc.c } } -@@ -1247,7 +1260,12 @@ void drain_all_pages(void) +@@ -1259,7 +1272,12 @@ void drain_all_pages(void) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } @@ -112,16 +109,16 @@ Index: linux-stable/mm/page_alloc.c } #ifdef CONFIG_HIBERNATION -@@ -1303,7 +1321,7 @@ void free_hot_cold_page(struct page *pag +@@ -1314,7 +1332,7 @@ void free_hot_cold_page(struct page *pag migratetype = get_pageblock_migratetype(page); - set_page_private(page, migratetype); + set_freepage_migratetype(page, migratetype); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); - if (unlikely(wasMlocked)) - free_page_mlock(page); __count_vm_event(PGFREE); -@@ -1335,7 +1353,7 @@ void free_hot_cold_page(struct page *pag + + /* +@@ -1344,7 +1362,7 @@ void free_hot_cold_page(struct page *pag } out: @@ -130,7 +127,7 @@ Index: linux-stable/mm/page_alloc.c } /* -@@ -1447,7 +1465,7 @@ again: +@@ -1473,7 +1491,7 @@ again: struct per_cpu_pages *pcp; struct list_head *list; @@ -139,7 +136,7 @@ Index: linux-stable/mm/page_alloc.c pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { -@@ -1479,17 +1497,19 @@ again: +@@ -1505,18 +1523,20 @@ again: */ WARN_ON_ONCE(order > 1); } @@ -152,7 +149,8 @@ Index: linux-stable/mm/page_alloc.c + spin_unlock(&zone->lock); goto failed; + } - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); + __mod_zone_freepage_state(zone, -(1 << order), + get_pageblock_migratetype(page)); + spin_unlock(&zone->lock); } @@ -163,7 +161,7 @@ Index: linux-stable/mm/page_alloc.c VM_BUG_ON(bad_range(zone, page)); if (prep_new_page(page, order, gfp_flags)) -@@ -1497,7 +1517,7 @@ again: +@@ -1524,7 +1544,7 @@ again: return page; failed: @@ -172,7 +170,7 @@ Index: linux-stable/mm/page_alloc.c return NULL; } -@@ -4992,6 +5012,7 @@ static int page_alloc_cpu_notify(struct +@@ -5133,6 +5153,7 @@ static int page_alloc_cpu_notify(struct void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); @@ -180,7 +178,7 @@ Index: linux-stable/mm/page_alloc.c } /* -@@ -5871,11 +5892,11 @@ static int __meminit __zone_pcp_update(v +@@ -5965,12 +5986,12 @@ static int __meminit __zone_pcp_update(v pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; @@ -188,19 +186,23 @@ Index: linux-stable/mm/page_alloc.c + cpu_lock_irqsave(cpu, flags); if (pcp->count > 0) free_pcppages_bulk(zone, pcp->count, pcp); + drain_zonestat(zone, pset); setup_pageset(pset, batch); - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); } return 0; } -@@ -5892,12 +5913,12 @@ void zone_pcp_reset(struct zone *zone) - unsigned long flags; +@@ -5988,7 +6009,7 @@ void zone_pcp_reset(struct zone *zone) + struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -5997,7 +6018,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } @@ -208,4 +210,4 @@ Index: linux-stable/mm/page_alloc.c + local_unlock_irqrestore(pa_lock, flags); } - /* + #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch index 7df71070b..d3a545891 100644 --- a/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch +++ b/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch @@ -15,11 +15,9 @@ Signed-off-by: Thomas Gleixner mm/memory.c | 29 +++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 30 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1448,6 +1448,7 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1452,6 +1452,7 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif @@ -27,10 +25,8 @@ Index: linux-stable/include/linux/sched.h #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; -Index: linux-stable/include/linux/uaccess.h -=================================================================== ---- linux-stable.orig/include/linux/uaccess.h -+++ linux-stable/include/linux/uaccess.h +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h @@ -6,37 +6,10 @@ /* @@ -72,11 +68,9 @@ Index: linux-stable/include/linux/uaccess.h #ifndef ARCH_HAS_NOCACHE_UACCESS -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -1298,6 +1298,7 @@ static struct task_struct *copy_process( +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1285,6 +1285,7 @@ static struct task_struct *copy_process( p->hardirq_context = 0; p->softirq_context = 0; #endif @@ -84,11 +78,9 @@ Index: linux-stable/kernel/fork.c #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; -Index: linux-stable/mm/memory.c -=================================================================== ---- linux-stable.orig/mm/memory.c -+++ linux-stable/mm/memory.c -@@ -3484,6 +3484,35 @@ unlock: +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3669,6 +3669,35 @@ unlock: return 0; } @@ -102,7 +94,7 @@ Index: linux-stable/mm/memory.c + */ + barrier(); +} -+EXPORT_SYMBOL_GPL(pagefault_disable); ++EXPORT_SYMBOL(pagefault_disable); + +void pagefault_enable(void) +{ @@ -119,7 +111,7 @@ Index: linux-stable/mm/memory.c + barrier(); + preempt_check_resched(); +} -+EXPORT_SYMBOL_GPL(pagefault_enable); ++EXPORT_SYMBOL(pagefault_enable); + /* * By the time we get here, we already hold the mm semaphore diff --git a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch index 8dd324a46..dcaa01f6d 100644 --- a/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch +++ b/debian/patches/features/all/rt/mm-protect-activate-switch-mm.patch @@ -1,39 +1,61 @@ -Subject: mm-protect-activate-switch-mm.patch -From: Thomas Gleixner -Date: Mon, 04 Jul 2011 09:48:40 +0200 +Subject: mm: Protect activate_mm() by preempt_[disable&enable]_rt() +From: Yong Zhang +Date: Tue, 15 May 2012 13:53:56 +0800 +User preempt_*_rt instead of local_irq_*_rt or otherwise there will be +warning on ARM like below: + +WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() +Modules linked in: +[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) +[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) +[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) +[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) +[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) +[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) +[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) +[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) +[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) +[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) +[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) +---[ end trace 0000000000000002 ]--- + +The reason is that ARM need irq enabled when doing activate_mm(). +According to mm-protect-activate-switch-mm.patch, actually +preempt_[disable|enable]_rt() is sufficient. + +Inspired-by: Steven Rostedt +Signed-off-by: Yong Zhang +Cc: Steven Rostedt +Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com Signed-off-by: Thomas Gleixner --- fs/exec.c | 2 ++ mm/mmu_context.c | 2 ++ 2 files changed, 4 insertions(+) -Index: linux-stable/fs/exec.c -=================================================================== ---- linux-stable.orig/fs/exec.c -+++ linux-stable/fs/exec.c -@@ -836,10 +836,12 @@ static int exec_mmap(struct mm_struct *m +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -827,10 +827,12 @@ static int exec_mmap(struct mm_struct *m } } task_lock(tsk); -+ local_irq_disable_rt(); ++ preempt_disable_rt(); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); -+ local_irq_enable_rt(); ++ preempt_enable_rt(); task_unlock(tsk); arch_pick_mmap_layout(mm); if (old_mm) { -Index: linux-stable/mm/mmu_context.c -=================================================================== ---- linux-stable.orig/mm/mmu_context.c -+++ linux-stable/mm/mmu_context.c +--- a/mm/mmu_context.c ++++ b/mm/mmu_context.c @@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); -+ local_irq_disable_rt(); ++ preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { atomic_inc(&mm->mm_count); @@ -41,7 +63,7 @@ Index: linux-stable/mm/mmu_context.c } tsk->mm = mm; switch_mm(active_mm, mm, tsk); -+ local_irq_enable_rt(); ++ preempt_enable_rt(); task_unlock(tsk); if (active_mm != mm) diff --git a/debian/patches/features/all/rt/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch b/debian/patches/features/all/rt/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch deleted file mode 100644 index cf7879551..000000000 --- a/debian/patches/features/all/rt/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch +++ /dev/null @@ -1,80 +0,0 @@ -Subject: mm: Protect activate_mm() by preempt_[disable%7Cenable]_rt() -From: Yong Zhang -Date: Tue, 15 May 2012 13:53:56 +0800 - -From: Yong Zhang - -Otherwise there will be warning on ARM like below: - -WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264() -Modules linked in: -[] (unwind_backtrace+0x0/0xe4) from [] (warn_slowpath_common+0x4c/0x64) -[] (warn_slowpath_common+0x4c/0x64) from [] (warn_slowpath_null+0x18/0x1c) -[] (warn_slowpath_null+0x18/0x1c) from [](smp_call_function_many+0x98/0x264) -[] (smp_call_function_many+0x98/0x264) from [] (smp_call_function+0x44/0x6c) -[] (smp_call_function+0x44/0x6c) from [] (__new_context+0xbc/0x124) -[] (__new_context+0xbc/0x124) from [] (flush_old_exec+0x460/0x5e4) -[] (flush_old_exec+0x460/0x5e4) from [] (load_elf_binary+0x2e0/0x11ac) -[] (load_elf_binary+0x2e0/0x11ac) from [] (search_binary_handler+0x94/0x2a4) -[] (search_binary_handler+0x94/0x2a4) from [] (do_execve+0x254/0x364) -[] (do_execve+0x254/0x364) from [] (sys_execve+0x34/0x54) -[] (sys_execve+0x34/0x54) from [] (ret_fast_syscall+0x0/0x30) ----[ end trace 0000000000000002 ]--- - -The reason is that ARM need irq enabled when doing activate_mm(). -According to mm-protect-activate-switch-mm.patch, actually -preempt_[disable|enable]_rt() is sufficient. - -Inspired-by: Steven Rostedt -Signed-off-by: Yong Zhang -Cc: Steven Rostedt -Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com -Signed-off-by: Thomas Gleixner -Cc: Steven Rostedt -Cc: Thomas Gleixner ---- - fs/exec.c | 4 ++-- - mm/mmu_context.c | 4 ++-- - 2 files changed, 4 insertions(+), 4 deletions(-) - -Index: linux-stable/fs/exec.c -=================================================================== ---- linux-stable.orig/fs/exec.c -+++ linux-stable/fs/exec.c -@@ -836,12 +836,12 @@ static int exec_mmap(struct mm_struct *m - } - } - task_lock(tsk); -- local_irq_disable_rt(); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - tsk->mm = mm; - tsk->active_mm = mm; - activate_mm(active_mm, mm); -- local_irq_enable_rt(); -+ preempt_enable_rt(); - task_unlock(tsk); - arch_pick_mmap_layout(mm); - if (old_mm) { -Index: linux-stable/mm/mmu_context.c -=================================================================== ---- linux-stable.orig/mm/mmu_context.c -+++ linux-stable/mm/mmu_context.c -@@ -26,7 +26,7 @@ void use_mm(struct mm_struct *mm) - struct task_struct *tsk = current; - - task_lock(tsk); -- local_irq_disable_rt(); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - if (active_mm != mm) { - atomic_inc(&mm->mm_count); -@@ -34,7 +34,7 @@ void use_mm(struct mm_struct *mm) - } - tsk->mm = mm; - switch_mm(active_mm, mm, tsk); -- local_irq_enable_rt(); -+ preempt_enable_rt(); - task_unlock(tsk); - - if (active_mm != mm) diff --git a/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch index 5ba03dc2a..a7ca61d74 100644 --- a/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch +++ b/debian/patches/features/all/rt/mm-remove-preempt-count-from-pf.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner mm/memory.c | 7 ------- 1 file changed, 7 deletions(-) -Index: linux-stable/mm/memory.c -=================================================================== ---- linux-stable.orig/mm/memory.c -+++ linux-stable/mm/memory.c -@@ -3487,7 +3487,6 @@ unlock: +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3672,7 +3672,6 @@ unlock: #ifdef CONFIG_PREEMPT_RT_FULL void pagefault_disable(void) { @@ -21,7 +19,7 @@ Index: linux-stable/mm/memory.c current->pagefault_disabled++; /* * make sure to have issued the store before a pagefault -@@ -3505,12 +3504,6 @@ void pagefault_enable(void) +@@ -3690,12 +3689,6 @@ void pagefault_enable(void) */ barrier(); current->pagefault_disabled--; @@ -32,5 +30,5 @@ Index: linux-stable/mm/memory.c - barrier(); - preempt_check_resched(); } - EXPORT_SYMBOL_GPL(pagefault_enable); + EXPORT_SYMBOL(pagefault_enable); #endif diff --git a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch index 84a6f070d..6345236d4 100644 --- a/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch +++ b/debian/patches/features/all/rt/mm-rt-kmap-atomic-scheduling.patch @@ -14,16 +14,22 @@ you can flip kmaps around like below. Signed-off-by: Peter Zijlstra [dvhart@linux.intel.com: build fix] Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins ---- - arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++ - include/linux/sched.h | 5 +++++ - mm/memory.c | 2 ++ - 3 files changed, 43 insertions(+) -Index: linux-stable/arch/x86/kernel/process_32.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/process_32.c -+++ linux-stable/arch/x86/kernel/process_32.c +[tglx@linutronix.de: Get rid of the per cpu variable and store the idx + and the pte content right away in the task struct. + Shortens the context switch code. ] +--- + arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++ + arch/x86/mm/highmem_32.c | 9 ++++++++- + arch/x86/mm/iomap_32.c | 9 ++++++++- + include/linux/highmem.h | 27 +++++++++++++++++++++++---- + include/linux/sched.h | 7 +++++++ + mm/highmem.c | 6 ++++-- + mm/memory.c | 2 ++ + 7 files changed, 84 insertions(+), 8 deletions(-) + +--- a/arch/x86/kernel/process_32.c ++++ b/arch/x86/kernel/process_32.c @@ -36,6 +36,7 @@ #include #include @@ -32,53 +38,176 @@ Index: linux-stable/arch/x86/kernel/process_32.c #include #include -@@ -276,6 +277,41 @@ __switch_to(struct task_struct *prev_p, +@@ -216,6 +217,35 @@ start_thread(struct pt_regs *regs, unsig + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -295,6 +325,8 @@ __switch_to(struct task_struct *prev_p, task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); -+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM -+ /* -+ * Save @prev's kmap_atomic stack -+ */ -+ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx); -+ if (unlikely(prev_p->kmap_idx)) { -+ int i; -+ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ pte_t *ptep = kmap_pte - idx; -+ prev_p->kmap_pte[i] = *ptep; -+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ } -+ -+ __this_cpu_write(__kmap_atomic_idx, 0); -+ } -+ -+ /* -+ * Restore @next_p's kmap_atomic stack -+ */ -+ if (unlikely(next_p->kmap_idx)) { -+ int i; -+ -+ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx); -+ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); -+ } -+ } -+#endif ++ switch_kmaps(prev_p, next_p); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -63,6 +63,7 @@ struct sched_param { +--- a/arch/x86/mm/highmem_32.c ++++ b/arch/x86/mm/highmem_32.c +@@ -31,6 +31,7 @@ EXPORT_SYMBOL(kunmap); + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +@@ -44,7 +45,10 @@ void *kmap_atomic_prot(struct page *page + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -87,6 +91,9 @@ void __kunmap_atomic(void *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +--- a/arch/x86/mm/iomap_32.c ++++ b/arch/x86/mm/iomap_32.c +@@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -64,7 +65,10 @@ void *kmap_atomic_prot_pfn(unsigned long + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -110,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr) + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +--- a/include/linux/highmem.h ++++ b/include/linux/highmem.h +@@ -85,32 +85,51 @@ static inline void __kunmap_atomic(void + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx > KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -23,6 +23,7 @@ struct sched_param { #include #include @@ -86,22 +215,48 @@ Index: linux-stable/include/linux/sched.h #include #include #include -@@ -1619,6 +1620,10 @@ struct task_struct { +@@ -1635,6 +1636,12 @@ struct task_struct { struct rcu_head put_rcu; int softirq_nestcnt; #endif -+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 + int kmap_idx; + pte_t kmap_pte[KM_TYPE_NR]; ++# endif +#endif }; - #ifdef CONFIG_PREEMPT_RT_FULL -Index: linux-stable/mm/memory.c -=================================================================== ---- linux-stable.orig/mm/memory.c -+++ linux-stable/mm/memory.c -@@ -3487,6 +3487,7 @@ unlock: + #ifdef CONFIG_NUMA_BALANCING +--- a/mm/highmem.c ++++ b/mm/highmem.c +@@ -29,10 +29,11 @@ + #include + #include + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -47,8 +48,9 @@ DEFINE_PER_CPU(int, __kmap_atomic_idx); + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3672,6 +3672,7 @@ unlock: #ifdef CONFIG_PREEMPT_RT_FULL void pagefault_disable(void) { @@ -109,11 +264,11 @@ Index: linux-stable/mm/memory.c current->pagefault_disabled++; /* * make sure to have issued the store before a pagefault -@@ -3504,6 +3505,7 @@ void pagefault_enable(void) +@@ -3689,6 +3690,7 @@ void pagefault_enable(void) */ barrier(); current->pagefault_disabled--; + migrate_enable(); } - EXPORT_SYMBOL_GPL(pagefault_enable); + EXPORT_SYMBOL(pagefault_enable); #endif diff --git a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch index cc45e7ab1..23a622c6f 100644 --- a/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch +++ b/debian/patches/features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch @@ -7,20 +7,18 @@ Signed-off-by: Thomas Gleixner lib/scatterlist.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -Index: linux-stable/lib/scatterlist.c -=================================================================== ---- linux-stable.orig/lib/scatterlist.c -+++ linux-stable/lib/scatterlist.c -@@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_ite +--- a/lib/scatterlist.c ++++ b/lib/scatterlist.c +@@ -499,7 +499,7 @@ void sg_miter_stop(struct sg_mapping_ite flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); -@@ -519,7 +519,7 @@ static size_t sg_copy_buffer(struct scat +@@ -539,7 +539,7 @@ static size_t sg_copy_buffer(struct scat sg_miter_start(&miter, sgl, nents, sg_flags); @@ -29,7 +27,7 @@ Index: linux-stable/lib/scatterlist.c while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; -@@ -536,7 +536,7 @@ static size_t sg_copy_buffer(struct scat +@@ -556,7 +556,7 @@ static size_t sg_copy_buffer(struct scat sg_miter_stop(&miter); diff --git a/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch b/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch index 2f6279302..80f968bb5 100644 --- a/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch +++ b/debian/patches/features/all/rt/mm-shrink-the-page-frame-to-rt-size.patch @@ -13,15 +13,13 @@ Signed-off-by: Thomas Gleixner --- include/linux/mm.h | 46 +++++++++++++++++++++++++++++++++++++++------- - include/linux/mm_types.h | 6 +++++- + include/linux/mm_types.h | 4 ++++ mm/memory.c | 32 ++++++++++++++++++++++++++++++++ - 3 files changed, 76 insertions(+), 8 deletions(-) + 3 files changed, 75 insertions(+), 7 deletions(-) -Index: linux-stable/include/linux/mm.h -=================================================================== ---- linux-stable.orig/include/linux/mm.h -+++ linux-stable/include/linux/mm.h -@@ -1225,27 +1225,59 @@ static inline pmd_t *pmd_alloc(struct mm +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1259,27 +1259,59 @@ static inline pmd_t *pmd_alloc(struct mm * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ @@ -88,28 +86,23 @@ Index: linux-stable/include/linux/mm.h static inline void pgtable_page_dtor(struct page *page) { pte_lock_deinit(page); -Index: linux-stable/include/linux/mm_types.h -=================================================================== ---- linux-stable.orig/include/linux/mm_types.h -+++ linux-stable/include/linux/mm_types.h -@@ -145,7 +145,11 @@ struct page { +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -141,7 +141,11 @@ struct page { * system if PG_buddy is set. */ #if USE_SPLIT_PTLOCKS -- spinlock_t ptl; +# ifndef CONFIG_PREEMPT_RT_FULL -+ spinlock_t ptl; + spinlock_t ptl; +# else -+ spinlock_t *ptl; ++ spinlock_t *ptl; +# endif #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ -Index: linux-stable/mm/memory.c -=================================================================== ---- linux-stable.orig/mm/memory.c -+++ linux-stable/mm/memory.c -@@ -4062,3 +4062,35 @@ void copy_user_huge_page(struct page *ds +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -4264,3 +4264,35 @@ void copy_user_huge_page(struct page *ds } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ diff --git a/debian/patches/features/all/rt/mm-slab-fix-potential-deadlock.patch b/debian/patches/features/all/rt/mm-slab-fix-potential-deadlock.patch deleted file mode 100644 index 80e926842..000000000 --- a/debian/patches/features/all/rt/mm-slab-fix-potential-deadlock.patch +++ /dev/null @@ -1,122 +0,0 @@ -Subject: mm: slab: Fix potential deadlock -From: Thomas Gleixner -Date: Wed, 26 Sep 2012 16:20:00 +0200 - - ============================================= -[ INFO: possible recursive locking detected ] - 3.6.0-rt1+ #49 Not tainted - --------------------------------------------- - swapper/0/1 is trying to acquire lock: - lock_slab_on+0x72/0x77 - - but task is already holding lock: - __local_lock_irq+0x24/0x77 - - other info that might help us debug this: - Possible unsafe locking scenario: - - CPU0 - ---- - lock(&per_cpu(slab_lock, __cpu).lock); - lock(&per_cpu(slab_lock, __cpu).lock); - - *** DEADLOCK *** - - May be due to missing lock nesting notation - - 2 locks held by swapper/0/1: - kmem_cache_create+0x33/0x89 - __local_lock_irq+0x24/0x77 - - stack backtrace: - Pid: 1, comm: swapper/0 Not tainted 3.6.0-rt1+ #49 - Call Trace: - __lock_acquire+0x9a4/0xdc4 - ? __local_lock_irq+0x24/0x77 - ? lock_slab_on+0x72/0x77 - lock_acquire+0xc4/0x108 - ? lock_slab_on+0x72/0x77 - ? unlock_slab_on+0x5b/0x5b - rt_spin_lock+0x36/0x3d - ? lock_slab_on+0x72/0x77 - ? migrate_disable+0x85/0x93 - lock_slab_on+0x72/0x77 - do_ccupdate_local+0x19/0x44 - slab_on_each_cpu+0x36/0x5a - do_tune_cpucache+0xc1/0x305 - enable_cpucache+0x8c/0xb5 - setup_cpu_cache+0x28/0x182 - __kmem_cache_create+0x34b/0x380 - ? shmem_mount+0x1a/0x1a - kmem_cache_create+0x4a/0x89 - ? shmem_mount+0x1a/0x1a - shmem_init+0x3e/0xd4 - kernel_init+0x11c/0x214 - kernel_thread_helper+0x4/0x10 - ? retint_restore_args+0x13/0x13 - ? start_kernel+0x3bc/0x3bc - ? gs_change+0x13/0x13 - -It's not a missing annotation. It's simply wrong code and needs to be -fixed. Instead of nesting the local and the remote cpu lock simply -acquire only the remote cpu lock, which is sufficient protection for -this procedure. - -Signed-off-by: Thomas Gleixner -Cc: stable-rt@vger.kernel.org ---- - include/linux/locallock.h | 8 ++++++++ - mm/slab.c | 10 ++-------- - 2 files changed, 10 insertions(+), 8 deletions(-) - -Index: linux-stable/include/linux/locallock.h -=================================================================== ---- linux-stable.orig/include/linux/locallock.h -+++ linux-stable/include/linux/locallock.h -@@ -96,6 +96,9 @@ static inline void __local_lock_irq(stru - #define local_lock_irq(lvar) \ - do { __local_lock_irq(&get_local_var(lvar)); } while (0) - -+#define local_lock_irq_on(lvar, cpu) \ -+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) -+ - static inline void __local_unlock_irq(struct local_irq_lock *lv) - { - LL_WARN(!lv->nestcnt); -@@ -111,6 +114,11 @@ static inline void __local_unlock_irq(st - put_local_var(lvar); \ - } while (0) - -+#define local_unlock_irq_on(lvar, cpu) \ -+ do { \ -+ __local_unlock_irq(&per_cpu(lvar, cpu)); \ -+ } while (0) -+ - static inline int __local_lock_irqsave(struct local_irq_lock *lv) - { - if (lv->owner != current) { -Index: linux-stable/mm/slab.c -=================================================================== ---- linux-stable.orig/mm/slab.c -+++ linux-stable/mm/slab.c -@@ -728,18 +728,12 @@ slab_on_each_cpu(void (*func)(void *arg, - - static void lock_slab_on(unsigned int cpu) - { -- if (cpu == smp_processor_id()) -- local_lock_irq(slab_lock); -- else -- local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock); -+ local_lock_irq_on(slab_lock, cpu); - } - - static void unlock_slab_on(unsigned int cpu) - { -- if (cpu == smp_processor_id()) -- local_unlock_irq(slab_lock); -- else -- local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock); -+ local_unlock_irq_on(slab_lock, cpu); - } - #endif - diff --git a/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch b/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch index 77efcc095..455b7fdb9 100644 --- a/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch +++ b/debian/patches/features/all/rt/mm-slab-more-lock-breaks.patch @@ -9,14 +9,12 @@ Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner --- - mm/slab.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++++------------- - 1 file changed, 86 insertions(+), 22 deletions(-) + mm/slab.c | 82 ++++++++++++++++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 64 insertions(+), 18 deletions(-) -Index: linux-stable/mm/slab.c -=================================================================== ---- linux-stable.orig/mm/slab.c -+++ linux-stable/mm/slab.c -@@ -704,6 +704,7 @@ static void slab_set_debugobj_lock_class +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -697,6 +697,7 @@ static void slab_set_debugobj_lock_class #endif static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); @@ -24,34 +22,7 @@ Index: linux-stable/mm/slab.c static DEFINE_LOCAL_IRQ_LOCK(slab_lock); #ifndef CONFIG_PREEMPT_RT_BASE -@@ -719,14 +720,57 @@ slab_on_each_cpu(void (*func)(void *arg, - { - unsigned int i; - -- for_each_online_cpu(i) { -- spin_lock_irq(&per_cpu(slab_lock, i).lock); -+ get_cpu_light(); -+ for_each_online_cpu(i) - func(arg, i); -- spin_unlock_irq(&per_cpu(slab_lock, i).lock); -- } -+ put_cpu_light(); -+} -+ -+static void lock_slab_on(unsigned int cpu) -+{ -+ if (cpu == smp_processor_id()) -+ local_lock_irq(slab_lock); -+ else -+ local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock); -+} -+ -+static void unlock_slab_on(unsigned int cpu) -+{ -+ if (cpu == smp_processor_id()) -+ local_unlock_irq(slab_lock); -+ else -+ local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock); +@@ -729,6 +730,34 @@ static void unlock_slab_on(unsigned int } #endif @@ -86,7 +57,7 @@ Index: linux-stable/mm/slab.c static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return cachep->array[smp_processor_id()]; -@@ -1340,7 +1384,7 @@ static void __cpuinit cpuup_canceled(lon +@@ -1345,7 +1374,7 @@ static void __cpuinit cpuup_canceled(lon free_block(cachep, nc->entry, nc->avail, node); if (!cpumask_empty(mask)) { @@ -95,7 +66,7 @@ Index: linux-stable/mm/slab.c goto free_array_cache; } -@@ -1354,7 +1398,7 @@ static void __cpuinit cpuup_canceled(lon +@@ -1359,7 +1388,7 @@ static void __cpuinit cpuup_canceled(lon alien = l3->alien; l3->alien = NULL; @@ -104,16 +75,16 @@ Index: linux-stable/mm/slab.c kfree(shared); if (alien) { -@@ -1635,6 +1679,8 @@ void __init kmem_cache_init(void) +@@ -1652,6 +1681,8 @@ void __init kmem_cache_init(void) use_alien_caches = 0; local_irq_lock_init(slab_lock); + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(slab_free_list, i)); - for (i = 0; i < NUM_INIT_LISTS; i++) { + for (i = 0; i < NUM_INIT_LISTS; i++) kmem_list3_init(&initkmem_list3[i]); -@@ -1973,12 +2019,14 @@ static void *kmem_getpages(struct kmem_c +@@ -1953,12 +1984,14 @@ static void *kmem_getpages(struct kmem_c /* * Interface to system's page release. */ @@ -130,14 +101,13 @@ Index: linux-stable/mm/slab.c kmemcheck_free_shadow(page, cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) -@@ -1995,7 +2043,13 @@ static void kmem_freepages(struct kmem_c - } +@@ -1977,7 +2010,12 @@ static void kmem_freepages(struct kmem_c + memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; -- free_pages((unsigned long)addr, cachep->gfporder); -+ +- free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); + if (!delayed) { -+ free_pages((unsigned long)addr, cachep->gfporder); ++ free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); + } else { + basepage->index = cachep->gfporder; + list_add(&basepage->lru, &__get_cpu_var(slab_free_list)); @@ -145,7 +115,7 @@ Index: linux-stable/mm/slab.c } static void kmem_rcu_free(struct rcu_head *head) -@@ -2003,7 +2057,7 @@ static void kmem_rcu_free(struct rcu_hea +@@ -1985,7 +2023,7 @@ static void kmem_rcu_free(struct rcu_hea struct slab_rcu *slab_rcu = (struct slab_rcu *)head; struct kmem_cache *cachep = slab_rcu->cachep; @@ -154,7 +124,7 @@ Index: linux-stable/mm/slab.c if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slab_rcu); } -@@ -2222,7 +2276,8 @@ static void slab_destroy_debugcheck(stru +@@ -2204,7 +2242,8 @@ static void slab_destroy_debugcheck(stru * Before calling the slab must have been unlinked from the cache. The * cache-lock is not held/needed. */ @@ -164,7 +134,7 @@ Index: linux-stable/mm/slab.c { void *addr = slabp->s_mem - slabp->colouroff; -@@ -2235,7 +2290,7 @@ static void slab_destroy(struct kmem_cac +@@ -2217,7 +2256,7 @@ static void slab_destroy(struct kmem_cac slab_rcu->addr = addr; call_rcu(&slab_rcu->head, kmem_rcu_free); } else { @@ -173,7 +143,7 @@ Index: linux-stable/mm/slab.c if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slabp); } -@@ -2700,9 +2755,15 @@ static void do_drain(void *arg) +@@ -2628,9 +2667,15 @@ static void do_drain(void *arg) __do_drain(arg, smp_processor_id()); } #else @@ -191,7 +161,7 @@ Index: linux-stable/mm/slab.c } #endif -@@ -2760,7 +2821,7 @@ static int drain_freelist(struct kmem_ca +@@ -2688,7 +2733,7 @@ static int drain_freelist(struct kmem_ca */ l3->free_objects -= cache->num; local_spin_unlock_irq(slab_lock, &l3->list_lock); @@ -200,7 +170,7 @@ Index: linux-stable/mm/slab.c nr_freed++; } out: -@@ -3095,7 +3156,7 @@ static int cache_grow(struct kmem_cache +@@ -3003,7 +3048,7 @@ static int cache_grow(struct kmem_cache spin_unlock(&l3->list_lock); return 1; opps1: @@ -209,7 +179,7 @@ Index: linux-stable/mm/slab.c failed: if (local_flags & __GFP_WAIT) local_lock_irq(slab_lock); -@@ -3772,7 +3833,7 @@ static void free_block(struct kmem_cache +@@ -3684,7 +3729,7 @@ static void free_block(struct kmem_cache * a different cache, refer to comments before * alloc_slabmgmt. */ @@ -218,25 +188,25 @@ Index: linux-stable/mm/slab.c } else { list_add(&slabp->list, &l3->slabs_free); } -@@ -4039,7 +4100,7 @@ void kmem_cache_free(struct kmem_cache * +@@ -3952,7 +3997,7 @@ void kmem_cache_free(struct kmem_cache * debug_check_no_obj_freed(objp, cachep->object_size); local_lock_irqsave(slab_lock, flags); - __cache_free(cachep, objp, __builtin_return_address(0)); + __cache_free(cachep, objp, _RET_IP_); - local_unlock_irqrestore(slab_lock, flags); + unlock_slab_and_free_delayed(flags); trace_kmem_cache_free(_RET_IP_, objp); } -@@ -4070,7 +4131,7 @@ void kfree(const void *objp) +@@ -3983,7 +4028,7 @@ void kfree(const void *objp) debug_check_no_obj_freed(objp, c->object_size); local_lock_irqsave(slab_lock, flags); - __cache_free(c, (void *)objp, __builtin_return_address(0)); + __cache_free(c, (void *)objp, _RET_IP_); - local_unlock_irqrestore(slab_lock, flags); + unlock_slab_and_free_delayed(flags); } EXPORT_SYMBOL(kfree); -@@ -4126,7 +4187,8 @@ static int alloc_kmemlist(struct kmem_ca +@@ -4033,7 +4078,8 @@ static int alloc_kmemlist(struct kmem_ca } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; @@ -246,17 +216,7 @@ Index: linux-stable/mm/slab.c kfree(shared); free_alien_cache(new_alien); continue; -@@ -4192,7 +4254,9 @@ static void do_ccupdate_local(void *info - #else - static void do_ccupdate_local(void *info, int cpu) - { -+ lock_slab_on(cpu); - __do_ccupdate_local(info, cpu); -+ unlock_slab_on(cpu); - } - #endif - -@@ -4234,8 +4298,8 @@ static int do_tune_cpucache(struct kmem_ +@@ -4141,8 +4187,8 @@ static int __do_tune_cpucache(struct kme local_spin_lock_irq(slab_lock, &cachep->nodelists[cpu_to_mem(i)]->list_lock); free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); diff --git a/debian/patches/features/all/rt/mm-slab-move-debug-out.patch b/debian/patches/features/all/rt/mm-slab-move-debug-out.patch index 6fc8444cd..5f2ff42bb 100644 --- a/debian/patches/features/all/rt/mm-slab-move-debug-out.patch +++ b/debian/patches/features/all/rt/mm-slab-move-debug-out.patch @@ -7,23 +7,21 @@ Signed-off-by: Thomas Gleixner mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/mm/slab.c -=================================================================== ---- linux-stable.orig/mm/slab.c -+++ linux-stable/mm/slab.c -@@ -3991,10 +3991,10 @@ void kmem_cache_free(struct kmem_cache * - { - unsigned long flags; +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -3895,10 +3895,10 @@ void kmem_cache_free(struct kmem_cache * + if (!cachep) + return; - local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); + local_irq_save(flags); - __cache_free(cachep, objp, __builtin_return_address(0)); + __cache_free(cachep, objp, _RET_IP_); local_irq_restore(flags); -@@ -4020,12 +4020,12 @@ void kfree(const void *objp) +@@ -3924,12 +3924,12 @@ void kfree(const void *objp) if (unlikely(ZERO_OR_NULL_PTR(objp))) return; @@ -34,6 +32,6 @@ Index: linux-stable/mm/slab.c debug_check_no_obj_freed(objp, c->object_size); + local_irq_save(flags); - __cache_free(c, (void *)objp, __builtin_return_address(0)); + __cache_free(c, (void *)objp, _RET_IP_); local_irq_restore(flags); } diff --git a/debian/patches/features/all/rt/mm-slab-wrap-functions.patch b/debian/patches/features/all/rt/mm-slab-wrap-functions.patch index 1758e6214..b6f823af3 100644 --- a/debian/patches/features/all/rt/mm-slab-wrap-functions.patch +++ b/debian/patches/features/all/rt/mm-slab-wrap-functions.patch @@ -4,14 +4,12 @@ Date: Sat, 18 Jun 2011 19:44:43 +0200 Signed-off-by: Thomas Gleixner --- - mm/slab.c | 154 ++++++++++++++++++++++++++++++++++++++++++-------------------- - 1 file changed, 105 insertions(+), 49 deletions(-) + mm/slab.c | 163 +++++++++++++++++++++++++++++++++++++++++++------------------- + 1 file changed, 114 insertions(+), 49 deletions(-) -Index: linux-stable/mm/slab.c -=================================================================== ---- linux-stable.orig/mm/slab.c -+++ linux-stable/mm/slab.c -@@ -117,6 +117,7 @@ +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -116,6 +116,7 @@ #include #include #include @@ -19,7 +17,7 @@ Index: linux-stable/mm/slab.c #include -@@ -703,12 +704,40 @@ static void slab_set_debugobj_lock_class +@@ -696,12 +697,49 @@ static void slab_set_debugobj_lock_class #endif static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); @@ -38,11 +36,20 @@ Index: linux-stable/mm/slab.c +{ + unsigned int i; + -+ for_each_online_cpu(i) { -+ spin_lock_irq(&per_cpu(slab_lock, i).lock); ++ get_cpu_light(); ++ for_each_online_cpu(i) + func(arg, i); -+ spin_unlock_irq(&per_cpu(slab_lock, i).lock); -+ } ++ put_cpu_light(); ++} ++ ++static void lock_slab_on(unsigned int cpu) ++{ ++ local_lock_irq_on(slab_lock, cpu); ++} ++ ++static void unlock_slab_on(unsigned int cpu) ++{ ++ local_unlock_irq_on(slab_lock, cpu); +} +#endif @@ -60,7 +67,7 @@ Index: linux-stable/mm/slab.c static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags) { -@@ -1175,9 +1204,10 @@ static void reap_alien(struct kmem_cache +@@ -1171,9 +1209,10 @@ static void reap_alien(struct kmem_cache if (l3->alien) { struct array_cache *ac = l3->alien[node]; @@ -73,7 +80,7 @@ Index: linux-stable/mm/slab.c } } } -@@ -1192,9 +1222,9 @@ static void drain_alien_cache(struct kme +@@ -1188,9 +1227,9 @@ static void drain_alien_cache(struct kme for_each_online_node(i) { ac = alien[i]; if (ac) { @@ -85,7 +92,7 @@ Index: linux-stable/mm/slab.c } } } -@@ -1273,11 +1303,11 @@ static int init_cache_nodelists_node(int +@@ -1269,11 +1308,11 @@ static int init_cache_nodelists_node(int cachep->nodelists[node] = l3; } @@ -99,7 +106,7 @@ Index: linux-stable/mm/slab.c } return 0; } -@@ -1302,7 +1332,7 @@ static void __cpuinit cpuup_canceled(lon +@@ -1298,7 +1337,7 @@ static void __cpuinit cpuup_canceled(lon if (!l3) goto free_array_cache; @@ -108,7 +115,7 @@ Index: linux-stable/mm/slab.c /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; -@@ -1310,7 +1340,7 @@ static void __cpuinit cpuup_canceled(lon +@@ -1306,7 +1345,7 @@ static void __cpuinit cpuup_canceled(lon free_block(cachep, nc->entry, nc->avail, node); if (!cpumask_empty(mask)) { @@ -117,7 +124,7 @@ Index: linux-stable/mm/slab.c goto free_array_cache; } -@@ -1324,7 +1354,7 @@ static void __cpuinit cpuup_canceled(lon +@@ -1320,7 +1359,7 @@ static void __cpuinit cpuup_canceled(lon alien = l3->alien; l3->alien = NULL; @@ -126,7 +133,7 @@ Index: linux-stable/mm/slab.c kfree(shared); if (alien) { -@@ -1398,7 +1428,7 @@ static int __cpuinit cpuup_prepare(long +@@ -1394,7 +1433,7 @@ static int __cpuinit cpuup_prepare(long l3 = cachep->nodelists[node]; BUG_ON(!l3); @@ -135,7 +142,7 @@ Index: linux-stable/mm/slab.c if (!l3->shared) { /* * We are serialised from CPU_DEAD or -@@ -1413,7 +1443,7 @@ static int __cpuinit cpuup_prepare(long +@@ -1409,7 +1448,7 @@ static int __cpuinit cpuup_prepare(long alien = NULL; } #endif @@ -144,16 +151,16 @@ Index: linux-stable/mm/slab.c kfree(shared); free_alien_cache(alien); if (cachep->flags & SLAB_DEBUG_OBJECTS) -@@ -1604,6 +1634,8 @@ void __init kmem_cache_init(void) +@@ -1612,6 +1651,8 @@ void __init kmem_cache_init(void) if (num_possible_nodes() == 1) use_alien_caches = 0; + local_irq_lock_init(slab_lock); + - for (i = 0; i < NUM_INIT_LISTS; i++) { + for (i = 0; i < NUM_INIT_LISTS; i++) kmem_list3_init(&initkmem_list3[i]); - if (i < MAX_NUMNODES) -@@ -2614,7 +2646,7 @@ __kmem_cache_create (const char *name, s + +@@ -2533,7 +2574,7 @@ __kmem_cache_create (struct kmem_cache * #if DEBUG static void check_irq_off(void) { @@ -162,7 +169,7 @@ Index: linux-stable/mm/slab.c } static void check_irq_on(void) -@@ -2649,26 +2681,37 @@ static void drain_array(struct kmem_cach +@@ -2568,26 +2609,37 @@ static void drain_array(struct kmem_cach struct array_cache *ac, int force, int node); @@ -205,7 +212,7 @@ Index: linux-stable/mm/slab.c check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; -@@ -2699,10 +2742,10 @@ static int drain_freelist(struct kmem_ca +@@ -2618,10 +2670,10 @@ static int drain_freelist(struct kmem_ca nr_freed = 0; while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { @@ -218,7 +225,7 @@ Index: linux-stable/mm/slab.c goto out; } -@@ -2716,7 +2759,7 @@ static int drain_freelist(struct kmem_ca +@@ -2635,7 +2687,7 @@ static int drain_freelist(struct kmem_ca * to the cache. */ l3->free_objects -= cache->num; @@ -227,7 +234,7 @@ Index: linux-stable/mm/slab.c slab_destroy(cache, slabp); nr_freed++; } -@@ -3011,7 +3054,7 @@ static int cache_grow(struct kmem_cache +@@ -2910,7 +2962,7 @@ static int cache_grow(struct kmem_cache offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) @@ -236,7 +243,7 @@ Index: linux-stable/mm/slab.c /* * The test for missing atomic flag is performed here, rather than -@@ -3041,7 +3084,7 @@ static int cache_grow(struct kmem_cache +@@ -2940,7 +2992,7 @@ static int cache_grow(struct kmem_cache cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) @@ -245,7 +252,7 @@ Index: linux-stable/mm/slab.c check_irq_off(); spin_lock(&l3->list_lock); -@@ -3055,7 +3098,7 @@ opps1: +@@ -2954,7 +3006,7 @@ opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) @@ -254,7 +261,7 @@ Index: linux-stable/mm/slab.c return 0; } -@@ -3469,11 +3512,11 @@ retry: +@@ -3368,11 +3420,11 @@ retry: * set and go into memory reserves if necessary. */ if (local_flags & __GFP_WAIT) @@ -268,8 +275,8 @@ Index: linux-stable/mm/slab.c if (obj) { /* * Insert into the appropriate per node queues -@@ -3591,7 +3634,7 @@ __cache_alloc_node(struct kmem_cache *ca - return NULL; +@@ -3492,7 +3544,7 @@ slab_alloc_node(struct kmem_cache *cache + cachep = memcg_kmem_get_cache(cachep, flags); cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); @@ -277,7 +284,7 @@ Index: linux-stable/mm/slab.c if (nodeid == NUMA_NO_NODE) nodeid = slab_node; -@@ -3616,7 +3659,7 @@ __cache_alloc_node(struct kmem_cache *ca +@@ -3517,7 +3569,7 @@ slab_alloc_node(struct kmem_cache *cache /* ___cache_alloc_node can fall back to other nodes */ ptr = ____cache_alloc_node(cachep, flags, nodeid); out: @@ -286,8 +293,8 @@ Index: linux-stable/mm/slab.c ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, flags); -@@ -3676,9 +3719,9 @@ __cache_alloc(struct kmem_cache *cachep, - return NULL; +@@ -3579,9 +3631,9 @@ slab_alloc(struct kmem_cache *cachep, gf + cachep = memcg_kmem_get_cache(cachep, flags); cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); @@ -298,31 +305,31 @@ Index: linux-stable/mm/slab.c objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, flags); -@@ -3994,9 +4037,9 @@ void kmem_cache_free(struct kmem_cache * +@@ -3898,9 +3950,9 @@ void kmem_cache_free(struct kmem_cache * debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); - local_irq_save(flags); + local_lock_irqsave(slab_lock, flags); - __cache_free(cachep, objp, __builtin_return_address(0)); + __cache_free(cachep, objp, _RET_IP_); - local_irq_restore(flags); + local_unlock_irqrestore(slab_lock, flags); trace_kmem_cache_free(_RET_IP_, objp); } -@@ -4025,9 +4068,9 @@ void kfree(const void *objp) +@@ -3929,9 +3981,9 @@ void kfree(const void *objp) debug_check_no_locks_freed(objp, c->object_size); debug_check_no_obj_freed(objp, c->object_size); - local_irq_save(flags); + local_lock_irqsave(slab_lock, flags); - __cache_free(c, (void *)objp, __builtin_return_address(0)); + __cache_free(c, (void *)objp, _RET_IP_); - local_irq_restore(flags); + local_unlock_irqrestore(slab_lock, flags); } EXPORT_SYMBOL(kfree); -@@ -4070,7 +4113,7 @@ static int alloc_kmemlist(struct kmem_ca +@@ -3968,7 +4020,7 @@ static int alloc_kmemlist(struct kmem_ca if (l3) { struct array_cache *shared = l3->shared; @@ -331,7 +338,7 @@ Index: linux-stable/mm/slab.c if (shared) free_block(cachep, shared->entry, -@@ -4083,7 +4126,7 @@ static int alloc_kmemlist(struct kmem_ca +@@ -3981,7 +4033,7 @@ static int alloc_kmemlist(struct kmem_ca } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; @@ -340,7 +347,7 @@ Index: linux-stable/mm/slab.c kfree(shared); free_alien_cache(new_alien); continue; -@@ -4130,17 +4173,28 @@ struct ccupdate_struct { +@@ -4028,17 +4080,28 @@ struct ccupdate_struct { struct array_cache *new[0]; }; @@ -353,13 +360,13 @@ Index: linux-stable/mm/slab.c - check_irq_off(); - old = cpu_cache_get(new->cachep); + old = cpu_cache_get_on_cpu(new->cachep, cpu); -+ -+ new->cachep->array[cpu] = new->new[cpu]; -+ new->new[cpu] = old; -+} - new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; ++ new->cachep->array[cpu] = new->new[cpu]; ++ new->new[cpu] = old; ++} ++ +#ifndef CONFIG_PREEMPT_RT_BASE +static void do_ccupdate_local(void *info) +{ @@ -373,8 +380,8 @@ Index: linux-stable/mm/slab.c +#endif /* Always called with the slab_mutex held */ - static int do_tune_cpucache(struct kmem_cache *cachep, int limit, -@@ -4166,7 +4220,7 @@ static int do_tune_cpucache(struct kmem_ + static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, +@@ -4064,7 +4127,7 @@ static int __do_tune_cpucache(struct kme } new->cachep = cachep; @@ -383,7 +390,7 @@ Index: linux-stable/mm/slab.c check_irq_on(); cachep->batchcount = batchcount; -@@ -4177,9 +4231,11 @@ static int do_tune_cpucache(struct kmem_ +@@ -4075,9 +4138,11 @@ static int __do_tune_cpucache(struct kme struct array_cache *ccold = new->new[i]; if (!ccold) continue; @@ -397,7 +404,7 @@ Index: linux-stable/mm/slab.c kfree(ccold); } kfree(new); -@@ -4255,7 +4311,7 @@ static void drain_array(struct kmem_cach +@@ -4192,7 +4257,7 @@ static void drain_array(struct kmem_cach if (ac->touched && !force) { ac->touched = 0; } else { @@ -406,7 +413,7 @@ Index: linux-stable/mm/slab.c if (ac->avail) { tofree = force ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) -@@ -4265,7 +4321,7 @@ static void drain_array(struct kmem_cach +@@ -4202,7 +4267,7 @@ static void drain_array(struct kmem_cach memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } @@ -415,7 +422,7 @@ Index: linux-stable/mm/slab.c } } -@@ -4404,7 +4460,7 @@ static int s_show(struct seq_file *m, vo +@@ -4295,7 +4360,7 @@ void get_slabinfo(struct kmem_cache *cac continue; check_irq_on(); @@ -424,7 +431,7 @@ Index: linux-stable/mm/slab.c list_for_each_entry(slabp, &l3->slabs_full, list) { if (slabp->inuse != cachep->num && !error) -@@ -4429,7 +4485,7 @@ static int s_show(struct seq_file *m, vo +@@ -4320,7 +4385,7 @@ void get_slabinfo(struct kmem_cache *cac if (l3->shared) shared_avail += l3->shared->avail; @@ -433,7 +440,7 @@ Index: linux-stable/mm/slab.c } num_slabs += active_slabs; num_objs = num_slabs * cachep->num; -@@ -4658,13 +4714,13 @@ static int leaks_show(struct seq_file *m +@@ -4520,13 +4585,13 @@ static int leaks_show(struct seq_file *m continue; check_irq_on(); diff --git a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch index 59d9bf44f..61f6d7fb5 100644 --- a/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch +++ b/debian/patches/features/all/rt/mm-vmalloc-use-get-cpu-light.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner mm/vmalloc.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) -Index: linux-stable/mm/vmalloc.c -=================================================================== ---- linux-stable.orig/mm/vmalloc.c -+++ linux-stable/mm/vmalloc.c +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c @@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block struct vmap_block *vb; struct vmap_area *va; diff --git a/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch new file mode 100644 index 000000000..92508952d --- /dev/null +++ b/debian/patches/features/all/rt/mmci-remove-bogus-irq-save.patch @@ -0,0 +1,39 @@ +Subject: mmci: Remove bogus local_irq_save() +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:11:12 +0100 + +On !RT interrupt runs with interrupts disabled. On RT it's in a +thread, so no need to disable interrupts at all. + +Signed-off-by: Thomas Gleixner +--- + drivers/mmc/host/mmci.c | 5 ----- + 1 file changed, 5 deletions(-) + +--- a/drivers/mmc/host/mmci.c ++++ b/drivers/mmc/host/mmci.c +@@ -930,15 +930,12 @@ static irqreturn_t mmci_pio_irq(int irq, + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -978,8 +975,6 @@ static irqreturn_t mmci_pio_irq(int irq, + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. diff --git a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch index 4294d9c92..23b771df7 100644 --- a/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch +++ b/debian/patches/features/all/rt/mutex-no-spin-on-rt.patch @@ -7,13 +7,11 @@ Signed-off-by: Thomas Gleixner kernel/Kconfig.locks | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/Kconfig.locks -=================================================================== ---- linux-stable.orig/kernel/Kconfig.locks -+++ linux-stable/kernel/Kconfig.locks -@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE +--- a/kernel/Kconfig.locks ++++ b/kernel/Kconfig.locks +@@ -222,4 +222,4 @@ endif config MUTEX_SPIN_ON_OWNER -- def_bool SMP && !DEBUG_MUTEXES -+ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL + def_bool y +- depends on SMP && !DEBUG_MUTEXES ++ depends on SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL diff --git a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch index f6f222600..e14da8331 100644 --- a/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch +++ b/debian/patches/features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner net/core/skbuff.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) -Index: linux-stable/net/core/skbuff.c -=================================================================== ---- linux-stable.orig/net/core/skbuff.c -+++ linux-stable/net/core/skbuff.c +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c @@ -60,6 +60,7 @@ #include #include @@ -21,25 +19,25 @@ Index: linux-stable/net/core/skbuff.c #include #include -@@ -345,6 +346,7 @@ struct netdev_alloc_cache { - unsigned int pagecnt_bias; +@@ -347,6 +348,7 @@ struct netdev_alloc_cache { + unsigned int pagecnt_bias; }; static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); - #define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES) - -@@ -354,7 +356,7 @@ static void *__netdev_alloc_frag(unsigne - void *data = NULL; + #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) + #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) +@@ -359,7 +361,7 @@ static void *__netdev_alloc_frag(unsigne + int order; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = &__get_cpu_var(netdev_alloc_cache); - if (unlikely(!nc->page)) { + if (unlikely(!nc->frag.page)) { refill: -@@ -379,7 +381,7 @@ recycle: - nc->offset += fragsz; +@@ -393,7 +395,7 @@ recycle: + nc->frag.offset += fragsz; nc->pagecnt_bias--; end: - local_irq_restore(flags); diff --git a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch index 5ee555b18..4234945d9 100644 --- a/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch +++ b/debian/patches/features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch @@ -15,10 +15,8 @@ Cc: stable-rt@vger.kernel.org net/netfilter/core.c | 6 ++++++ 3 files changed, 17 insertions(+) -Index: linux-stable/include/linux/locallock.h -=================================================================== ---- linux-stable.orig/include/linux/locallock.h -+++ linux-stable/include/linux/locallock.h +--- a/include/linux/locallock.h ++++ b/include/linux/locallock.h @@ -25,6 +25,9 @@ struct local_irq_lock { DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } @@ -37,19 +35,17 @@ Index: linux-stable/include/linux/locallock.h static inline void local_irq_lock_init(int lvar) { } -Index: linux-stable/include/linux/netfilter/x_tables.h -=================================================================== ---- linux-stable.orig/include/linux/netfilter/x_tables.h -+++ linux-stable/include/linux/netfilter/x_tables.h -@@ -186,6 +186,7 @@ struct xt_counters_info { - #ifdef __KERNEL__ +--- a/include/linux/netfilter/x_tables.h ++++ b/include/linux/netfilter/x_tables.h +@@ -3,6 +3,7 @@ + #include +#include + #include /** - * struct xt_action_param - parameters for matches/targets -@@ -466,6 +467,8 @@ extern void xt_free_table_info(struct xt +@@ -284,6 +285,8 @@ extern void xt_free_table_info(struct xt */ DECLARE_PER_CPU(seqcount_t, xt_recseq); @@ -58,7 +54,7 @@ Index: linux-stable/include/linux/netfilter/x_tables.h /** * xt_write_recseq_begin - start of a write section * -@@ -480,6 +483,9 @@ static inline unsigned int xt_write_recs +@@ -298,6 +301,9 @@ static inline unsigned int xt_write_recs { unsigned int addend; @@ -68,7 +64,7 @@ Index: linux-stable/include/linux/netfilter/x_tables.h /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). -@@ -510,6 +516,7 @@ static inline void xt_write_recseq_end(u +@@ -328,6 +334,7 @@ static inline void xt_write_recseq_end(u /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); @@ -76,10 +72,8 @@ Index: linux-stable/include/linux/netfilter/x_tables.h } /* -Index: linux-stable/net/netfilter/core.c -=================================================================== ---- linux-stable.orig/net/netfilter/core.c -+++ linux-stable/net/netfilter/core.c +--- a/net/netfilter/core.c ++++ b/net/netfilter/core.c @@ -20,11 +20,17 @@ #include #include diff --git a/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch index aa2628a0a..f4fed6173 100644 --- a/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch +++ b/debian/patches/features/all/rt/net-flip-lock-dep-thingy.patch @@ -93,11 +93,9 @@ Signed-off-by: Thomas Gleixner net/core/sock.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -Index: linux-stable/net/core/sock.c -=================================================================== ---- linux-stable.orig/net/core/sock.c -+++ linux-stable/net/core/sock.c -@@ -2204,12 +2204,11 @@ void lock_sock_nested(struct sock *sk, i +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -2287,12 +2287,11 @@ void lock_sock_nested(struct sock *sk, i if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; diff --git a/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch new file mode 100644 index 000000000..f4f675a5f --- /dev/null +++ b/debian/patches/features/all/rt/net-make-devnet_rename_seq-a-mutex.patch @@ -0,0 +1,150 @@ +From b83c29516a334722b4c38dbc2bc3caf58ce46b88 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Wed, 20 Mar 2013 18:06:20 +0100 +Subject: [PATCH] net: make devnet_rename_seq a mutex + +On RT write_seqcount_begin() disables preemption and device_rename() +allocates memory with GFP_KERNEL and grabs later the sysfs_mutex mutex. +Since I don't see a reason why this can't be a mutex, make it one. We +probably don't have that much reads at the same time in the hot path. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/netdevice.h | 2 +- + net/core/dev.c | 23 +++++++++++------------ + net/core/sock.c | 8 +++----- + 3 files changed, 15 insertions(+), 18 deletions(-) + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1579,7 +1579,7 @@ extern int call_netdevice_notifiers(unsi + + extern rwlock_t dev_base_lock; /* Device list lock */ + +-extern seqcount_t devnet_rename_seq; /* Device rename seq */ ++extern struct mutex devnet_rename_mutex; + + + #define for_each_netdev(net, d) \ +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -203,7 +203,7 @@ static struct list_head offload_base __r + DEFINE_RWLOCK(dev_base_lock); + EXPORT_SYMBOL(dev_base_lock); + +-seqcount_t devnet_rename_seq; ++DEFINE_MUTEX(devnet_rename_mutex); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -1093,10 +1093,11 @@ int dev_change_name(struct net_device *d + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ ++ mutex_lock(&devnet_rename_mutex); + + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + return 0; + } + +@@ -1104,7 +1105,7 @@ int dev_change_name(struct net_device *d + + err = dev_get_valid_name(net, dev, newname); + if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + return err; + } + +@@ -1112,11 +1113,11 @@ rollback: + ret = device_rename(&dev->dev, dev->name); + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); +- write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + return ret; + } + +- write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + + write_lock_bh(&dev_base_lock); + hlist_del_rcu(&dev->name_hlist); +@@ -1135,7 +1136,7 @@ rollback: + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); + memcpy(dev->name, oldname, IFNAMSIZ); + goto rollback; + } else { +@@ -4214,7 +4215,6 @@ static int dev_ifname(struct net *net, s + { + struct net_device *dev; + struct ifreq ifr; +- unsigned seq; + + /* + * Fetch the caller's info block. +@@ -4223,19 +4223,18 @@ static int dev_ifname(struct net *net, s + if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) + return -EFAULT; + +-retry: +- seq = read_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); + if (!dev) { + rcu_read_unlock(); ++ mutex_unlock(&devnet_rename_mutex); + return -ENODEV; + } + + strcpy(ifr.ifr_name, dev->name); + rcu_read_unlock(); +- if (read_seqcount_retry(&devnet_rename_seq, seq)) +- goto retry; ++ mutex_unlock(&devnet_rename_mutex); + + if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) + return -EFAULT; +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -571,7 +571,6 @@ static int sock_getbindtodevice(struct s + struct net *net = sock_net(sk); + struct net_device *dev; + char devname[IFNAMSIZ]; +- unsigned seq; + + if (sk->sk_bound_dev_if == 0) { + len = 0; +@@ -582,20 +581,19 @@ static int sock_getbindtodevice(struct s + if (len < IFNAMSIZ) + goto out; + +-retry: +- seq = read_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); + ret = -ENODEV; + if (!dev) { + rcu_read_unlock(); ++ mutex_unlock(&devnet_rename_mutex); + goto out; + } + + strcpy(devname, dev->name); + rcu_read_unlock(); +- if (read_seqcount_retry(&devnet_rename_seq, seq)) +- goto retry; ++ mutex_unlock(&devnet_rename_mutex); + + len = strlen(devname) + 1; + diff --git a/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch index 1cfa43b73..5d96fc466 100644 --- a/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch +++ b/debian/patches/features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch @@ -13,11 +13,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -2993,11 +2993,9 @@ int netif_rx_ni(struct sk_buff *skb) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3118,11 +3118,9 @@ int netif_rx_ni(struct sk_buff *skb) { int err; diff --git a/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch index c14891347..b5b5e7534 100644 --- a/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch +++ b/debian/patches/features/all/rt/net-netif_rx_ni-migrate-disable.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -2990,11 +2990,11 @@ int netif_rx_ni(struct sk_buff *skb) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3115,11 +3115,11 @@ int netif_rx_ni(struct sk_buff *skb) { int err; diff --git a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch index 38eadcd6f..92a988ae2 100644 --- a/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch +++ b/debian/patches/features/all/rt/net-tx-action-avoid-livelock-on-rt.patch @@ -42,11 +42,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -3000,6 +3000,36 @@ int netif_rx_ni(struct sk_buff *skb) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3125,6 +3125,36 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); @@ -83,7 +81,7 @@ Index: linux-stable/net/core/dev.c static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); -@@ -3038,7 +3068,7 @@ static void net_tx_action(struct softirq +@@ -3163,7 +3193,7 @@ static void net_tx_action(struct softirq head = head->next_sched; root_lock = qdisc_lock(q); diff --git a/debian/patches/features/all/rt/net-use-cpu-chill.patch b/debian/patches/features/all/rt/net-use-cpu-chill.patch index 14565a92f..d0451fb12 100644 --- a/debian/patches/features/all/rt/net-use-cpu-chill.patch +++ b/debian/patches/features/all/rt/net-use-cpu-chill.patch @@ -13,10 +13,8 @@ Cc: stable-rt@vger.kernel.org net/rds/ib_rdma.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) -Index: linux-stable/net/packet/af_packet.c -=================================================================== ---- linux-stable.orig/net/packet/af_packet.c -+++ linux-stable/net/packet/af_packet.c +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c @@ -88,6 +88,7 @@ #include #include @@ -25,7 +23,7 @@ Index: linux-stable/net/packet/af_packet.c #ifdef CONFIG_INET #include -@@ -666,7 +667,7 @@ static void prb_retire_rx_blk_timer_expi +@@ -553,7 +554,7 @@ static void prb_retire_rx_blk_timer_expi if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -34,7 +32,7 @@ Index: linux-stable/net/packet/af_packet.c } } -@@ -920,7 +921,7 @@ static void prb_retire_current_block(str +@@ -807,7 +808,7 @@ static void prb_retire_current_block(str if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -43,10 +41,8 @@ Index: linux-stable/net/packet/af_packet.c } } prb_close_block(pkc, pbd, po, status); -Index: linux-stable/net/rds/ib_rdma.c -=================================================================== ---- linux-stable.orig/net/rds/ib_rdma.c -+++ linux-stable/net/rds/ib_rdma.c +--- a/net/rds/ib_rdma.c ++++ b/net/rds/ib_rdma.c @@ -34,6 +34,7 @@ #include #include diff --git a/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch index 438028250..c93490405 100644 --- a/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch +++ b/debian/patches/features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner net/ipv4/ip_output.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -Index: linux-stable/net/ipv4/ip_output.c -=================================================================== ---- linux-stable.orig/net/ipv4/ip_output.c -+++ linux-stable/net/ipv4/ip_output.c -@@ -1516,7 +1516,8 @@ void ip_send_unicast_reply(struct net *n +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -1508,7 +1508,8 @@ void ip_send_unicast_reply(struct net *n if (IS_ERR(rt)) return; @@ -21,7 +19,7 @@ Index: linux-stable/net/ipv4/ip_output.c inet->tos = arg->tos; sk = &inet->sk; -@@ -1540,7 +1541,7 @@ void ip_send_unicast_reply(struct net *n +@@ -1532,7 +1533,7 @@ void ip_send_unicast_reply(struct net *n ip_push_pending_frames(sk, &fl4); } diff --git a/debian/patches/features/all/rt/net-wireless-warn-nort.patch b/debian/patches/features/all/rt/net-wireless-warn-nort.patch index 3cdda5446..005ecd767 100644 --- a/debian/patches/features/all/rt/net-wireless-warn-nort.patch +++ b/debian/patches/features/all/rt/net-wireless-warn-nort.patch @@ -7,16 +7,14 @@ Signed-off-by: Thomas Gleixner net/mac80211/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/net/mac80211/rx.c -=================================================================== ---- linux-stable.orig/net/mac80211/rx.c -+++ linux-stable/net/mac80211/rx.c -@@ -2960,7 +2960,7 @@ void ieee80211_rx(struct ieee80211_hw *h +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -3144,7 +3144,7 @@ void ieee80211_rx(struct ieee80211_hw *h struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - WARN_ON_ONCE(softirq_count() == 0); + WARN_ON_ONCE_NONRT(softirq_count() == 0); - if (WARN_ON(status->band < 0 || - status->band >= IEEE80211_NUM_BANDS)) + if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + goto drop; diff --git a/debian/patches/features/all/rt/ntp-make-ntp-lock-raw-sigh.patch b/debian/patches/features/all/rt/ntp-make-ntp-lock-raw-sigh.patch index 72d9875e1..a3ab0e799 100644 --- a/debian/patches/features/all/rt/ntp-make-ntp-lock-raw-sigh.patch +++ b/debian/patches/features/all/rt/ntp-make-ntp-lock-raw-sigh.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner kernel/time/ntp.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) -Index: linux-stable/kernel/time/ntp.c -=================================================================== ---- linux-stable.orig/kernel/time/ntp.c -+++ linux-stable/kernel/time/ntp.c +--- a/kernel/time/ntp.c ++++ b/kernel/time/ntp.c @@ -22,7 +22,7 @@ * NTP timekeeping variables: */ diff --git a/debian/patches/features/all/rt/of-convert-devtree-lock.patch b/debian/patches/features/all/rt/of-convert-devtree-lock-from-rw_lock-to-raw-spinlock.patch similarity index 64% rename from debian/patches/features/all/rt/of-convert-devtree-lock.patch rename to debian/patches/features/all/rt/of-convert-devtree-lock-from-rw_lock-to-raw-spinlock.patch index 965e0021a..38caa0035 100644 --- a/debian/patches/features/all/rt/of-convert-devtree-lock.patch +++ b/debian/patches/features/all/rt/of-convert-devtree-lock-from-rw_lock-to-raw-spinlock.patch @@ -1,18 +1,45 @@ -Subject: of-convert-devtree-lock.patch +Subject: OF: Convert devtree lock from rw_lock to raw spinlock +From: Paul Gortmaker +Date: Wed, 6 Feb 2013 15:30:56 -0500 + From: Thomas Gleixner -Date: Mon, 21 Mar 2011 14:35:34 +0100 + +With the locking cleanup in place (from "OF: Fixup resursive +locking code paths"), we can now do the conversion from the +rw_lock to a raw spinlock as required for preempt-rt. + +The previous cleanup and this conversion were originally +separate since they predated when mainline got raw spinlock (in +commit c2f21ce2e31286a "locking: Implement new raw_spinlock"). + +So, at that point in time, the cleanup was considered plausible +for mainline, but not this conversion. In any case, we've kept +them separate as it makes for easier review and better bisection. Signed-off-by: Thomas Gleixner +Cc: Grant Likely +Cc: Sam Ravnborg +Cc: +Cc: Rob Herring +Link: http://lkml.kernel.org/r/1360182656-15898-1-git-send-email-paul.gortmaker@windriver.com +Signed-off-by: Thomas Gleixner +[PG: taken from preempt-rt, update subject & add a commit log] +Signed-off-by: Paul Gortmaker --- - arch/sparc/kernel/prom_common.c | 4 - - drivers/of/base.c | 96 ++++++++++++++++++++++------------------ - include/linux/of.h | 2 - 3 files changed, 57 insertions(+), 45 deletions(-) -Index: linux-stable/arch/sparc/kernel/prom_common.c -=================================================================== ---- linux-stable.orig/arch/sparc/kernel/prom_common.c -+++ linux-stable/arch/sparc/kernel/prom_common.c +[v2: recent commit e81b329 ("powerpc+of: Add /proc device tree + updating to of node add/remove") added two more instances of + write_unlock that also needed converting to raw_spin_unlock. + Retested (boot) on sbc8548, defconfig builds on arm/sparc; no + new warnings observed.] + + arch/sparc/kernel/prom_common.c | 4 - + drivers/of/base.c | 100 ++++++++++++++++++++++------------------ + include/linux/of.h | 2 + 3 files changed, 59 insertions(+), 47 deletions(-) + +--- a/arch/sparc/kernel/prom_common.c ++++ b/arch/sparc/kernel/prom_common.c @@ -64,7 +64,7 @@ int of_set_property(struct device_node * err = -ENODEV; @@ -31,11 +58,9 @@ Index: linux-stable/arch/sparc/kernel/prom_common.c mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ -Index: linux-stable/drivers/of/base.c -=================================================================== ---- linux-stable.orig/drivers/of/base.c -+++ linux-stable/drivers/of/base.c -@@ -54,7 +54,7 @@ static DEFINE_MUTEX(of_aliases_mutex); +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -55,7 +55,7 @@ static DEFINE_MUTEX(of_aliases_mutex); /* use when traversing tree through the allnext, child, sibling, * or parent members of struct device_node. */ @@ -44,7 +69,7 @@ Index: linux-stable/drivers/of/base.c int of_n_addr_cells(struct device_node *np) { -@@ -187,10 +187,11 @@ struct property *of_find_property(const +@@ -188,10 +188,11 @@ struct property *of_find_property(const int *lenp) { struct property *pp; @@ -58,13 +83,13 @@ Index: linux-stable/drivers/of/base.c return pp; } -@@ -208,13 +209,13 @@ struct device_node *of_find_all_nodes(st +@@ -209,13 +210,13 @@ struct device_node *of_find_all_nodes(st { struct device_node *np; - read_lock(&devtree_lock); + raw_spin_lock(&devtree_lock); - np = prev ? prev->allnext : allnodes; + np = prev ? prev->allnext : of_allnodes; for (; np != NULL; np = np->allnext) if (of_node_get(np)) break; @@ -74,7 +99,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_all_nodes); -@@ -273,11 +274,12 @@ static int __of_device_is_compatible(con +@@ -274,11 +275,12 @@ static int __of_device_is_compatible(con int of_device_is_compatible(const struct device_node *device, const char *compat) { @@ -89,7 +114,7 @@ Index: linux-stable/drivers/of/base.c return res; } EXPORT_SYMBOL(of_device_is_compatible); -@@ -339,13 +341,14 @@ EXPORT_SYMBOL(of_device_is_available); +@@ -340,13 +342,14 @@ EXPORT_SYMBOL(of_device_is_available); struct device_node *of_get_parent(const struct device_node *node) { struct device_node *np; @@ -106,7 +131,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_get_parent); -@@ -364,14 +367,15 @@ EXPORT_SYMBOL(of_get_parent); +@@ -365,14 +368,15 @@ EXPORT_SYMBOL(of_get_parent); struct device_node *of_get_next_parent(struct device_node *node) { struct device_node *parent; @@ -124,7 +149,7 @@ Index: linux-stable/drivers/of/base.c return parent; } -@@ -387,14 +391,15 @@ struct device_node *of_get_next_child(co +@@ -388,14 +392,15 @@ struct device_node *of_get_next_child(co struct device_node *prev) { struct device_node *next; @@ -142,7 +167,7 @@ Index: linux-stable/drivers/of/base.c return next; } EXPORT_SYMBOL(of_get_next_child); -@@ -412,7 +417,7 @@ struct device_node *of_get_next_availabl +@@ -413,7 +418,7 @@ struct device_node *of_get_next_availabl { struct device_node *next; @@ -151,7 +176,7 @@ Index: linux-stable/drivers/of/base.c next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) { if (!of_device_is_available(next)) -@@ -421,7 +426,7 @@ struct device_node *of_get_next_availabl +@@ -422,7 +427,7 @@ struct device_node *of_get_next_availabl break; } of_node_put(prev); @@ -160,10 +185,10 @@ Index: linux-stable/drivers/of/base.c return next; } EXPORT_SYMBOL(of_get_next_available_child); -@@ -436,14 +441,15 @@ EXPORT_SYMBOL(of_get_next_available_chil +@@ -460,14 +465,15 @@ EXPORT_SYMBOL(of_get_child_by_name); struct device_node *of_find_node_by_path(const char *path) { - struct device_node *np = allnodes; + struct device_node *np = of_allnodes; + unsigned long flags; - read_lock(&devtree_lock); @@ -178,7 +203,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_node_by_path); -@@ -463,15 +469,16 @@ struct device_node *of_find_node_by_name +@@ -487,15 +493,16 @@ struct device_node *of_find_node_by_name const char *name) { struct device_node *np; @@ -186,7 +211,7 @@ Index: linux-stable/drivers/of/base.c - read_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) if (np->name && (of_node_cmp(np->name, name) == 0) && of_node_get(np)) @@ -197,7 +222,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_node_by_name); -@@ -492,15 +499,16 @@ struct device_node *of_find_node_by_type +@@ -516,15 +523,16 @@ struct device_node *of_find_node_by_type const char *type) { struct device_node *np; @@ -205,7 +230,7 @@ Index: linux-stable/drivers/of/base.c - read_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) if (np->type && (of_node_cmp(np->type, type) == 0) && of_node_get(np)) @@ -216,7 +241,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_node_by_type); -@@ -523,8 +531,9 @@ struct device_node *of_find_compatible_n +@@ -547,8 +555,9 @@ struct device_node *of_find_compatible_n const char *type, const char *compatible) { struct device_node *np; @@ -224,10 +249,10 @@ Index: linux-stable/drivers/of/base.c - read_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { if (type -@@ -535,7 +544,7 @@ struct device_node *of_find_compatible_n +@@ -559,7 +568,7 @@ struct device_node *of_find_compatible_n break; } of_node_put(from); @@ -236,7 +261,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_compatible_node); -@@ -557,8 +566,9 @@ struct device_node *of_find_node_with_pr +@@ -581,8 +590,9 @@ struct device_node *of_find_node_with_pr { struct device_node *np; struct property *pp; @@ -244,10 +269,10 @@ Index: linux-stable/drivers/of/base.c - read_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { for (pp = np->properties; pp; pp = pp->next) { -@@ -570,7 +580,7 @@ struct device_node *of_find_node_with_pr +@@ -594,7 +604,7 @@ struct device_node *of_find_node_with_pr } out: of_node_put(from); @@ -256,7 +281,7 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_node_with_property); -@@ -611,10 +621,11 @@ const struct of_device_id *of_match_node +@@ -635,10 +645,11 @@ const struct of_device_id *of_match_node const struct device_node *node) { const struct of_device_id *match; @@ -270,32 +295,36 @@ Index: linux-stable/drivers/of/base.c return match; } EXPORT_SYMBOL(of_match_node); -@@ -635,15 +646,16 @@ struct device_node *of_find_matching_nod - const struct of_device_id *matches) +@@ -661,11 +672,12 @@ struct device_node *of_find_matching_nod + const struct of_device_id **match) { struct device_node *np; + unsigned long flags; + if (match) + *match = NULL; + - read_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { - if (__of_match_node(matches, np) && of_node_get(np)) - break; + if (__of_match_node(matches, np) && of_node_get(np)) { +@@ -675,7 +687,7 @@ struct device_node *of_find_matching_nod + } } of_node_put(from); - read_unlock(&devtree_lock); + raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } - EXPORT_SYMBOL(of_find_matching_node); -@@ -686,12 +698,12 @@ struct device_node *of_find_node_by_phan + EXPORT_SYMBOL(of_find_matching_node_and_match); +@@ -718,12 +730,12 @@ struct device_node *of_find_node_by_phan { struct device_node *np; - read_lock(&devtree_lock); + raw_spin_lock(&devtree_lock); - for (np = allnodes; np; np = np->allnext) + for (np = of_allnodes; np; np = np->allnext) if (np->phandle == handle) break; of_node_get(np); @@ -304,8 +333,8 @@ Index: linux-stable/drivers/of/base.c return np; } EXPORT_SYMBOL(of_find_node_by_phandle); -@@ -1063,18 +1075,18 @@ int prom_add_property(struct device_node - unsigned long flags; +@@ -1195,18 +1207,18 @@ int of_add_property(struct device_node * + return rc; prop->next = NULL; - write_lock_irqsave(&devtree_lock, flags); @@ -326,16 +355,16 @@ Index: linux-stable/drivers/of/base.c #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ -@@ -1099,7 +1111,7 @@ int prom_remove_property(struct device_n - unsigned long flags; - int found = 0; +@@ -1236,7 +1248,7 @@ int of_remove_property(struct device_nod + if (rc) + return rc; - write_lock_irqsave(&devtree_lock, flags); + raw_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { -@@ -1112,7 +1124,7 @@ int prom_remove_property(struct device_n +@@ -1249,7 +1261,7 @@ int of_remove_property(struct device_nod } next = &(*next)->next; } @@ -344,16 +373,16 @@ Index: linux-stable/drivers/of/base.c if (!found) return -ENODEV; -@@ -1149,7 +1161,7 @@ int prom_update_property(struct device_n +@@ -1289,7 +1301,7 @@ int of_update_property(struct device_nod if (!oldprop) - return prom_add_property(np, newprop); + return of_add_property(np, newprop); - write_lock_irqsave(&devtree_lock, flags); + raw_spin_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == oldprop) { -@@ -1163,7 +1175,7 @@ int prom_update_property(struct device_n +@@ -1303,7 +1315,7 @@ int of_update_property(struct device_nod } next = &(*next)->next; } @@ -362,45 +391,55 @@ Index: linux-stable/drivers/of/base.c if (!found) return -ENODEV; -@@ -1193,12 +1205,12 @@ void of_attach_node(struct device_node * - { - unsigned long flags; +@@ -1376,12 +1388,12 @@ int of_attach_node(struct device_node *n + if (rc) + return rc; - write_lock_irqsave(&devtree_lock, flags); + raw_spin_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; - np->allnext = allnodes; + np->allnext = of_allnodes; np->parent->child = np; - allnodes = np; + of_allnodes = np; - write_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock_irqrestore(&devtree_lock, flags); - } - /** -@@ -1212,7 +1224,7 @@ void of_detach_node(struct device_node * - struct device_node *parent; - unsigned long flags; + of_add_proc_dt_entry(np); + return 0; +@@ -1424,17 +1436,17 @@ int of_detach_node(struct device_node *n + if (rc) + return rc; - write_lock_irqsave(&devtree_lock, flags); + raw_spin_lock_irqsave(&devtree_lock, flags); - parent = np->parent; - if (!parent) -@@ -1243,7 +1255,7 @@ void of_detach_node(struct device_node * - of_node_set_flag(np, OF_DETACHED); + if (of_node_check_flag(np, OF_DETACHED)) { + /* someone already detached it */ +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return rc; + } - out_unlock: + parent = np->parent; + if (!parent) { +- write_unlock_irqrestore(&devtree_lock, flags); ++ raw_spin_unlock_irqrestore(&devtree_lock, flags); + return rc; + } + +@@ -1461,7 +1473,7 @@ int of_detach_node(struct device_node *n + } + + of_node_set_flag(np, OF_DETACHED); - write_unlock_irqrestore(&devtree_lock, flags); + raw_spin_unlock_irqrestore(&devtree_lock, flags); - } - #endif /* defined(CONFIG_OF_DYNAMIC) */ -Index: linux-stable/include/linux/of.h -=================================================================== ---- linux-stable.orig/include/linux/of.h -+++ linux-stable/include/linux/of.h -@@ -91,7 +91,7 @@ static inline void of_node_put(struct de - extern struct device_node *allnodes; + of_remove_proc_dt_entry(np); + return rc; +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -92,7 +92,7 @@ static inline void of_node_put(struct de + extern struct device_node *of_allnodes; extern struct device_node *of_chosen; extern struct device_node *of_aliases; -extern rwlock_t devtree_lock; diff --git a/debian/patches/features/all/rt/of-fixup-recursive-locking.patch b/debian/patches/features/all/rt/of-fixup-resursive-locking-code-paths.patch similarity index 68% rename from debian/patches/features/all/rt/of-fixup-recursive-locking.patch rename to debian/patches/features/all/rt/of-fixup-resursive-locking-code-paths.patch index 29cd9aff7..9736d9208 100644 --- a/debian/patches/features/all/rt/of-fixup-recursive-locking.patch +++ b/debian/patches/features/all/rt/of-fixup-resursive-locking-code-paths.patch @@ -1,27 +1,42 @@ -From: Thomas Gleixner -Date: Thu, 13 Aug 2009 09:04:10 +0200 Subject: OF: Fixup resursive locking code paths +From: Paul Gortmaker +Date: Fri, 25 Jan 2013 13:21:47 -0500 + +From: Thomas Gleixner There is no real reason to use a rwlock for devtree_lock. It even could be a mutex, but unfortunately it's locked from cpu hotplug -pathes which can't schedule :( +paths which can't schedule :( -So it needs to become a raw lock on rt as well. devtree_lock would be -the only user of a raw_rw_lock, so we are better of cleaning the -recursive locking pathes which allows us to convert devtree_lock to a +So it needs to become a raw lock on rt as well. The devtree_lock would +be the only user of a raw_rw_lock, so we are better off cleaning up the +recursive locking paths which allows us to convert devtree_lock to a read_lock. +Here we do the standard thing of introducing __foo() as the "raw" +version of foo(), so that we can take better control of the locking. +The "raw" versions are not exported and are for internal use within +the file itself. + +Signed-off-by: Thomas Gleixner +Signed-off-by: Paul Gortmaker +Cc: devicetree-discuss@lists.ozlabs.org +Cc: Grant Likely +Cc: Rob Herring +Link: http://lkml.kernel.org/r/1359138107-14159-1-git-send-email-paul.gortmaker@windriver.com Signed-off-by: Thomas Gleixner - --- - drivers/of/base.c | 93 +++++++++++++++++++++++++++++++++++++++++------------- - 1 file changed, 71 insertions(+), 22 deletions(-) -Index: linux-stable/drivers/of/base.c -=================================================================== ---- linux-stable.orig/drivers/of/base.c -+++ linux-stable/drivers/of/base.c -@@ -163,16 +163,14 @@ void of_node_put(struct device_node *nod +[This has been living in the RT tree for several releases, and I've + put it on top of 3.8-rc4 mainline and tested it independently there + on a ppc sbc8548 board as well. So it would be nice to get this in 3.9] + + drivers/of/base.c | 91 +++++++++++++++++++++++++++++++++++++++++------------- + 1 file changed, 70 insertions(+), 21 deletions(-) + +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -164,16 +164,14 @@ void of_node_put(struct device_node *nod EXPORT_SYMBOL(of_node_put); #endif /* CONFIG_OF_DYNAMIC */ @@ -40,7 +55,7 @@ Index: linux-stable/drivers/of/base.c for (pp = np->properties; pp; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp) -@@ -180,6 +178,18 @@ struct property *of_find_property(const +@@ -181,6 +179,18 @@ struct property *of_find_property(const break; } } @@ -59,7 +74,7 @@ Index: linux-stable/drivers/of/base.c read_unlock(&devtree_lock); return pp; -@@ -213,8 +223,20 @@ EXPORT_SYMBOL(of_find_all_nodes); +@@ -214,8 +224,20 @@ EXPORT_SYMBOL(of_find_all_nodes); * Find a property with a given name for a given node * and return the value. */ @@ -81,7 +96,7 @@ Index: linux-stable/drivers/of/base.c { struct property *pp = of_find_property(np, name, lenp); -@@ -225,13 +247,13 @@ EXPORT_SYMBOL(of_get_property); +@@ -226,13 +248,13 @@ EXPORT_SYMBOL(of_get_property); /** Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ @@ -91,15 +106,14 @@ Index: linux-stable/drivers/of/base.c + const char *compat) { const char* cp; -- int cplen, l; -+ int uninitialized_var(cplen), l; + int cplen, l; - cp = of_get_property(device, "compatible", &cplen); + cp = __of_get_property(device, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { -@@ -244,6 +266,20 @@ int of_device_is_compatible(const struct +@@ -245,6 +267,20 @@ int of_device_is_compatible(const struct return 0; } @@ -120,7 +134,7 @@ Index: linux-stable/drivers/of/base.c EXPORT_SYMBOL(of_device_is_compatible); /** -@@ -494,7 +530,8 @@ struct device_node *of_find_compatible_n +@@ -518,7 +554,8 @@ struct device_node *of_find_compatible_n if (type && !(np->type && (of_node_cmp(np->type, type) == 0))) continue; @@ -130,7 +144,7 @@ Index: linux-stable/drivers/of/base.c break; } of_node_put(from); -@@ -538,15 +575,9 @@ out: +@@ -562,15 +599,9 @@ out: } EXPORT_SYMBOL(of_find_node_with_property); @@ -149,7 +163,7 @@ Index: linux-stable/drivers/of/base.c { if (!matches) return NULL; -@@ -560,14 +591,32 @@ const struct of_device_id *of_match_node +@@ -584,14 +615,32 @@ const struct of_device_id *of_match_node match &= node->type && !strcmp(matches->type, node->type); if (matches->compatible[0]) @@ -184,12 +198,12 @@ Index: linux-stable/drivers/of/base.c EXPORT_SYMBOL(of_match_node); /** -@@ -590,7 +639,7 @@ struct device_node *of_find_matching_nod +@@ -619,7 +668,7 @@ struct device_node *of_find_matching_nod read_lock(&devtree_lock); - np = from ? from->allnext : allnodes; + np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { -- if (of_match_node(matches, np) && of_node_get(np)) -+ if (__of_match_node(matches, np) && of_node_get(np)) +- if (of_match_node(matches, np) && of_node_get(np)) { ++ if (__of_match_node(matches, np) && of_node_get(np)) { + if (match) + *match = matches; break; - } - of_node_put(from); diff --git a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch index d849453c2..b69ab2bdd 100644 --- a/debian/patches/features/all/rt/oleg-signal-rt-fix.patch +++ b/debian/patches/features/all/rt/oleg-signal-rt-fix.patch @@ -35,35 +35,31 @@ Signed-off-by: Thomas Gleixner kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++-- 4 files changed, 60 insertions(+), 2 deletions(-) -Index: linux-stable/arch/x86/include/asm/signal.h -=================================================================== ---- linux-stable.orig/arch/x86/include/asm/signal.h -+++ linux-stable/arch/x86/include/asm/signal.h -@@ -31,6 +31,19 @@ typedef struct { +--- a/arch/x86/include/asm/signal.h ++++ b/arch/x86/include/asm/signal.h +@@ -23,6 +23,19 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +/* -+ * Because some traps use the IST stack, we must keep -+ * preemption disabled while calling do_trap(), but do_trap() -+ * may call force_sig_info() which will grab the signal spin_locks -+ * for the task, which in PREEMPT_RT_FULL are mutexes. -+ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will -+ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit -+ * of the trap. ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. + */ +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) +#define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + - #else - /* Here we must cater to libcs that poke about in kernel headers. */ - -Index: linux-stable/arch/x86/kernel/signal.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/signal.c -+++ linux-stable/arch/x86/kernel/signal.c -@@ -785,6 +785,14 @@ do_notify_resume(struct pt_regs *regs, v + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -808,6 +808,14 @@ do_notify_resume(struct pt_regs *regs, v mce_notify_process(); #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ @@ -75,14 +71,12 @@ Index: linux-stable/arch/x86/kernel/signal.c + } +#endif + - if (thread_info_flags & _TIF_UPROBE) { - clear_thread_flag(TIF_UPROBE); + if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1408,6 +1408,10 @@ struct task_struct { + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1412,6 +1412,10 @@ struct task_struct { sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ struct sigpending pending; @@ -93,11 +87,9 @@ Index: linux-stable/include/linux/sched.h unsigned long sas_ss_sp; size_t sas_ss_size; -Index: linux-stable/kernel/signal.c -=================================================================== ---- linux-stable.orig/kernel/signal.c -+++ linux-stable/kernel/signal.c -@@ -1305,8 +1305,8 @@ int do_send_sig_info(int sig, struct sig +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1302,8 +1302,8 @@ int do_send_sig_info(int sig, struct sig * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ @@ -108,7 +100,7 @@ Index: linux-stable/kernel/signal.c { unsigned long int flags; int ret, blocked, ignored; -@@ -1331,6 +1331,39 @@ force_sig_info(int sig, struct siginfo * +@@ -1328,6 +1328,39 @@ force_sig_info(int sig, struct siginfo * return ret; } diff --git a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch index 6aba7d767..23d4254c7 100644 --- a/debian/patches/features/all/rt/panic-disable-random-on-rt.patch +++ b/debian/patches/features/all/rt/panic-disable-random-on-rt.patch @@ -5,10 +5,8 @@ Subject: panic: skip get_random_bytes for RT_FULL in init_oops_id kernel/panic.c | 2 ++ 1 file changed, 2 insertions(+) -Index: linux-stable/kernel/panic.c -=================================================================== ---- linux-stable.orig/kernel/panic.c -+++ linux-stable/kernel/panic.c +--- a/kernel/panic.c ++++ b/kernel/panic.c @@ -371,9 +371,11 @@ static u64 oops_id; static int init_oops_id(void) diff --git a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch index cbbb61cd2..a41bbdfb8 100644 --- a/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch +++ b/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch @@ -24,17 +24,14 @@ Signed-off-by: Thomas Gleixner --- include/linux/rcupdate.h | 6 ------ - kernel/rcutree.c | 7 ++++++- - kernel/rcutree.h | 1 + - kernel/rcutree_plugin.h | 2 +- + kernel/rcutree.c | 9 ++++++++- + kernel/rcutree_plugin.h | 7 ++++++- kernel/softirq.c | 20 +++++++++++++------- - 5 files changed, 21 insertions(+), 15 deletions(-) + 4 files changed, 27 insertions(+), 15 deletions(-) -Index: linux-stable/include/linux/rcupdate.h -=================================================================== ---- linux-stable.orig/include/linux/rcupdate.h -+++ linux-stable/include/linux/rcupdate.h -@@ -195,13 +195,7 @@ static inline int rcu_preempt_depth(void +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -220,13 +220,7 @@ static inline int rcu_preempt_depth(void /* Internal to kernel */ extern void rcu_sched_qs(int cpu); @@ -48,16 +45,16 @@ Index: linux-stable/include/linux/rcupdate.h extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); -Index: linux-stable/kernel/rcutree.c -=================================================================== ---- linux-stable.orig/kernel/rcutree.c -+++ linux-stable/kernel/rcutree.c -@@ -182,7 +182,12 @@ void rcu_sched_qs(int cpu) +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -181,7 +181,14 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } -#ifndef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(int cpu); ++ +void rcu_bh_qs(int cpu) +{ + rcu_preempt_qs(cpu); @@ -66,23 +63,9 @@ Index: linux-stable/kernel/rcutree.c void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); -Index: linux-stable/kernel/rcutree.h -=================================================================== ---- linux-stable.orig/kernel/rcutree.h -+++ linux-stable/kernel/rcutree.h -@@ -463,6 +463,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); - /* Forward declarations for rcutree_plugin.h */ - static void rcu_bootup_announce(void); - long rcu_batches_completed(void); -+static void rcu_preempt_qs(int cpu); - static void rcu_preempt_note_context_switch(int cpu); - static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); - #ifdef CONFIG_HOTPLUG_CPU -Index: linux-stable/kernel/rcutree_plugin.h -=================================================================== ---- linux-stable.orig/kernel/rcutree_plugin.h -+++ linux-stable/kernel/rcutree_plugin.h -@@ -1727,7 +1727,7 @@ static void __cpuinit rcu_prepare_kthrea +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -1519,7 +1519,7 @@ static void __cpuinit rcu_prepare_kthrea #endif /* #else #ifdef CONFIG_RCU_BOOST */ @@ -91,11 +74,35 @@ Index: linux-stable/kernel/rcutree_plugin.h /* * Check to see if any future RCU-related work will need to be done -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -139,7 +139,7 @@ static void wakeup_softirqd(void) +@@ -1535,6 +1535,9 @@ int rcu_needs_cpu(int cpu, unsigned long + *delta_jiffies = ULONG_MAX; + return rcu_cpu_has_callbacks(cpu); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ ++ ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it. +@@ -1651,6 +1654,7 @@ static bool rcu_cpu_has_nonlazy_callback + rcu_preempt_cpu_has_nonlazy_callbacks(cpu); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Allow the CPU to enter dyntick-idle mode if either: (1) There are no + * callbacks on this CPU, (2) this CPU has not yet attempted to enter +@@ -1694,6 +1698,7 @@ int rcu_needs_cpu(int cpu, unsigned long + } + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Handler for smp_call_function_single(). The only point of this +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -142,7 +142,7 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } @@ -104,7 +111,7 @@ Index: linux-stable/kernel/softirq.c { struct softirq_action *h = softirq_vec; unsigned int prev_count = preempt_count(); -@@ -162,7 +162,8 @@ static void handle_pending_softirqs(u32 +@@ -165,7 +165,8 @@ static void handle_pending_softirqs(u32 prev_count, (unsigned int) preempt_count()); preempt_count() = prev_count; } @@ -114,7 +121,7 @@ Index: linux-stable/kernel/softirq.c } local_irq_disable(); } -@@ -322,7 +323,7 @@ restart: +@@ -325,7 +326,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -123,7 +130,7 @@ Index: linux-stable/kernel/softirq.c pending = local_softirq_pending(); if (pending && --max_restart) -@@ -393,7 +394,12 @@ static inline void ksoftirqd_clr_sched_p +@@ -376,7 +377,12 @@ static void ksoftirqd_clr_sched_params(u static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner); @@ -137,7 +144,7 @@ Index: linux-stable/kernel/softirq.c void __init softirq_early_init(void) { -@@ -464,7 +470,7 @@ EXPORT_SYMBOL(in_serving_softirq); +@@ -447,7 +453,7 @@ EXPORT_SYMBOL(in_serving_softirq); * Called with bh and local interrupts disabled. For full RT cpu must * be pinned. */ @@ -146,7 +153,7 @@ Index: linux-stable/kernel/softirq.c { u32 pending = local_softirq_pending(); int cpu = smp_processor_id(); -@@ -478,7 +484,7 @@ static void __do_softirq(void) +@@ -461,7 +467,7 @@ static void __do_softirq(void) lockdep_softirq_enter(); @@ -155,7 +162,7 @@ Index: linux-stable/kernel/softirq.c pending = local_softirq_pending(); if (pending) -@@ -517,7 +523,7 @@ static int __thread_do_softirq(int cpu) +@@ -500,7 +506,7 @@ static int __thread_do_softirq(int cpu) * schedule! */ if (local_softirq_pending()) diff --git a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch index 844ca4a80..6eff68a70 100644 --- a/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch +++ b/debian/patches/features/all/rt/pci-access-use-__wake_up_all_locked.patch @@ -12,10 +12,8 @@ Cc: stable-rt@vger.kernel.org drivers/pci/access.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/drivers/pci/access.c -=================================================================== ---- linux-stable.orig/drivers/pci/access.c -+++ linux-stable/drivers/pci/access.c +--- a/drivers/pci/access.c ++++ b/drivers/pci/access.c @@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_de WARN_ON(!dev->block_cfg_access); diff --git a/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch new file mode 100644 index 000000000..d6f1fd8ee --- /dev/null +++ b/debian/patches/features/all/rt/percpu-rwsem-compilefix.patch @@ -0,0 +1,19 @@ +--- + lib/percpu-rwsem.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/lib/percpu-rwsem.c ++++ b/lib/percpu-rwsem.c +@@ -84,8 +84,12 @@ void percpu_down_read(struct percpu_rw_s + + down_read(&brw->rw_sem); + atomic_inc(&brw->slow_read_ctr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ up_read(&brw->rw_sem); ++#else + /* avoid up_read()->rwsem_release() */ + __up_read(&brw->rw_sem); ++#endif + } + + void percpu_up_read(struct percpu_rw_semaphore *brw) diff --git a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch index 4c9d583e5..2215a3efa 100644 --- a/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch +++ b/debian/patches/features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch @@ -56,11 +56,9 @@ Signed-off-by: Steven Rostedt kernel/events/core.c | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/kernel/events/core.c -=================================================================== ---- linux-stable.orig/kernel/events/core.c -+++ linux-stable/kernel/events/core.c -@@ -5441,6 +5441,7 @@ static void perf_swevent_init_hrtimer(st +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -5638,6 +5638,7 @@ static void perf_swevent_init_hrtimer(st hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; diff --git a/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch index 253a86bd3..157a8bad8 100644 --- a/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch +++ b/debian/patches/features/all/rt/perf-move-irq-work-to-softirq-in-rt.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner kernel/timer.c | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) -Index: linux-stable/arch/x86/kernel/irq_work.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/irq_work.c -+++ linux-stable/arch/x86/kernel/irq_work.c +--- a/arch/x86/kernel/irq_work.c ++++ b/arch/x86/kernel/irq_work.c @@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_re irq_exit(); } @@ -26,10 +24,8 @@ Index: linux-stable/arch/x86/kernel/irq_work.c #endif } +#endif -Index: linux-stable/kernel/irq_work.c -=================================================================== ---- linux-stable.orig/kernel/irq_work.c -+++ linux-stable/kernel/irq_work.c +--- a/kernel/irq_work.c ++++ b/kernel/irq_work.c @@ -107,8 +107,10 @@ void irq_work_run(void) if (llist_empty(this_list)) return; @@ -41,11 +37,9 @@ Index: linux-stable/kernel/irq_work.c llnode = llist_del_all(this_list); while (llnode != NULL) { -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1432,7 +1432,7 @@ void update_process_times(int user_tick) +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1419,7 +1419,7 @@ void update_process_times(int user_tick) scheduler_tick(); run_local_timers(); rcu_check_callbacks(cpu, user_tick); @@ -54,7 +48,7 @@ Index: linux-stable/kernel/timer.c if (in_irq()) irq_work_run(); #endif -@@ -1446,6 +1446,10 @@ static void run_timer_softirq(struct sof +@@ -1433,6 +1433,10 @@ static void run_timer_softirq(struct sof { struct tvec_base *base = __this_cpu_read(tvec_bases); diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-hrtimer.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-hrtimer.patch deleted file mode 100644 index 0299ee857..000000000 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-hrtimer.patch +++ /dev/null @@ -1,106 +0,0 @@ -Subject: hrtimer: Don't call the timer handler from hrtimer_start -From: Peter Zijlstra -Date: Fri Aug 12 17:39:54 CEST 2011 - - [] __delay+0xf/0x11 - [] do_raw_spin_lock+0xd2/0x13c - [] _raw_spin_lock+0x60/0x73 rt_b->rt_runtime_lock - [] ? sched_rt_period_timer+0xad/0x281 - [] sched_rt_period_timer+0xad/0x281 - [] __run_hrtimer+0x1e4/0x347 - [] ? enqueue_rt_entity+0x36/0x36 - [] __hrtimer_start_range_ns+0x2b5/0x40a base->cpu_base->lock (lock_hrtimer_base) - [] __enqueue_rt_entity+0x26f/0x2aa rt_b->rt_runtime_lock (start_rt_bandwidth) - [] enqueue_rt_entity+0x28/0x36 - [] enqueue_task_rt+0x3d/0xb0 - [] enqueue_task+0x5d/0x64 - [] task_setprio+0x210/0x29c rq->lock - [] __rt_mutex_adjust_prio+0x25/0x2a p->pi_lock - [] task_blocks_on_rt_mutex+0x196/0x20f - -Instead make __hrtimer_start_range_ns() return -ETIME when the timer -is in the past. Since body actually uses the hrtimer_start*() return -value its pretty safe to wreck it. - -Also, it will only ever return -ETIME for timer->irqsafe || !wakeup -timers. - -Signed-off-by: Peter Zijlstra ---- - kernel/hrtimer.c | 48 +++++++++++++++++++++++------------------------- - 1 file changed, 23 insertions(+), 25 deletions(-) - -Index: linux-stable/kernel/hrtimer.c -=================================================================== ---- linux-stable.orig/kernel/hrtimer.c -+++ linux-stable/kernel/hrtimer.c -@@ -646,37 +646,24 @@ static inline int hrtimer_enqueue_reprog - struct hrtimer_clock_base *base, - int wakeup) - { --#ifdef CONFIG_PREEMPT_RT_BASE --again: - if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { -+ if (!wakeup) -+ return -ETIME; -+ -+#ifdef CONFIG_PREEMPT_RT_BASE - /* - * Move softirq based timers away from the rbtree in - * case it expired already. Otherwise we would have a - * stale base->first entry until the softirq runs. - */ -- if (!hrtimer_rt_defer(timer)) { -- ktime_t now = ktime_get(); -- -- __run_hrtimer(timer, &now); -- /* -- * __run_hrtimer might have requeued timer and -- * it could be base->first again. -- */ -- if (&timer->node == base->active.next) -- goto again; -- return 1; -- } --#else -- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { -+ if (!hrtimer_rt_defer(timer)) -+ return -ETIME; - #endif -- if (wakeup) { -- raw_spin_unlock(&base->cpu_base->lock); -- raise_softirq_irqoff(HRTIMER_SOFTIRQ); -- raw_spin_lock(&base->cpu_base->lock); -- } else -- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); -+ raw_spin_unlock(&base->cpu_base->lock); -+ raise_softirq_irqoff(HRTIMER_SOFTIRQ); -+ raw_spin_lock(&base->cpu_base->lock); - -- return 1; -+ return 0; - } - - return 0; -@@ -1067,8 +1054,19 @@ int __hrtimer_start_range_ns(struct hrti - * - * XXX send_remote_softirq() ? - */ -- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) -- hrtimer_enqueue_reprogram(timer, new_base, wakeup); -+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { -+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); -+ if (ret) { -+ /* -+ * In case we failed to reprogram the timer (mostly -+ * because out current timer is already elapsed), -+ * remove it again and report a failure. This avoids -+ * stale base->first entries. -+ */ -+ __remove_hrtimer(timer, new_base, -+ timer->state & HRTIMER_STATE_CALLBACK, 0); -+ } -+ } - - unlock_hrtimer_base(timer, &flags); - diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch index d22659861..3c05ad9ef 100644 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch @@ -21,10 +21,8 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org lib/smp_processor_id.c | 2 +- 6 files changed, 30 insertions(+), 23 deletions(-) -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -108,28 +108,25 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ @@ -63,11 +61,9 @@ Index: linux-stable/include/linux/preempt.h #endif #ifdef CONFIG_PREEMPT_NOTIFIERS -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1275,7 +1275,9 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1279,7 +1279,9 @@ struct task_struct { #endif unsigned int policy; @@ -77,7 +73,7 @@ Index: linux-stable/include/linux/sched.h int nr_cpus_allowed; cpumask_t cpus_allowed; -@@ -2771,11 +2773,22 @@ static inline void set_task_cpu(struct t +@@ -2810,11 +2812,22 @@ static inline void set_task_cpu(struct t #endif /* CONFIG_SMP */ @@ -100,10 +96,8 @@ Index: linux-stable/include/linux/sched.h return &p->cpus_allowed; } -Index: linux-stable/include/linux/smp.h -=================================================================== ---- linux-stable.orig/include/linux/smp.h -+++ linux-stable/include/linux/smp.h +--- a/include/linux/smp.h ++++ b/include/linux/smp.h @@ -218,13 +218,8 @@ static inline void kick_all_cpus_sync(vo #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() @@ -120,11 +114,9 @@ Index: linux-stable/include/linux/smp.h /* * Callback to arch code if there's nosmp or maxcpus=0 on the -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -5203,7 +5203,7 @@ void __cpuinit init_idle(struct task_str +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4734,7 +4734,7 @@ void __cpuinit init_idle(struct task_str #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -133,7 +125,7 @@ Index: linux-stable/kernel/sched/core.c if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); -@@ -5259,7 +5259,7 @@ int set_cpus_allowed_ptr(struct task_str +@@ -4790,7 +4790,7 @@ int set_cpus_allowed_ptr(struct task_str do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ @@ -142,7 +134,7 @@ Index: linux-stable/kernel/sched/core.c goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -5278,6 +5278,7 @@ out: +@@ -4809,6 +4809,7 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); @@ -150,7 +142,7 @@ Index: linux-stable/kernel/sched/core.c void migrate_disable(void) { struct task_struct *p = current; -@@ -5370,6 +5371,7 @@ void migrate_enable(void) +@@ -4901,6 +4902,7 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); @@ -158,11 +150,9 @@ Index: linux-stable/kernel/sched/core.c /* * Move (not current) task off this cpu, onto dest cpu. We're doing -Index: linux-stable/kernel/trace/trace.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace.c -+++ linux-stable/kernel/trace/trace.c -@@ -1156,7 +1156,7 @@ tracing_generic_entry_update(struct trac +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1178,7 +1178,7 @@ tracing_generic_entry_update(struct trac ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); @@ -171,10 +161,8 @@ Index: linux-stable/kernel/trace/trace.c } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -Index: linux-stable/lib/smp_processor_id.c -=================================================================== ---- linux-stable.orig/lib/smp_processor_id.c -+++ linux-stable/lib/smp_processor_id.c +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c @@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] " diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch index 1cce90c23..a6bb210fd 100644 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable.patch @@ -11,11 +11,9 @@ Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org kernel/sched/core.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -5298,7 +5298,19 @@ void migrate_disable(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4829,7 +4829,19 @@ void migrate_disable(void) preempt_enable(); return; } @@ -36,7 +34,7 @@ Index: linux-stable/kernel/sched/core.c p->migrate_disable = 1; mask = tsk_cpus_allowed(p); -@@ -5309,7 +5321,7 @@ void migrate_disable(void) +@@ -4840,7 +4852,7 @@ void migrate_disable(void) p->sched_class->set_cpus_allowed(p, mask); p->nr_cpus_allowed = cpumask_weight(mask); } @@ -45,7 +43,7 @@ Index: linux-stable/kernel/sched/core.c preempt_enable(); } EXPORT_SYMBOL(migrate_disable); -@@ -5337,7 +5349,11 @@ void migrate_enable(void) +@@ -4868,7 +4880,11 @@ void migrate_enable(void) return; } @@ -58,7 +56,7 @@ Index: linux-stable/kernel/sched/core.c p->migrate_disable = 0; mask = tsk_cpus_allowed(p); -@@ -5349,7 +5365,7 @@ void migrate_enable(void) +@@ -4880,7 +4896,7 @@ void migrate_enable(void) p->nr_cpus_allowed = cpumask_weight(mask); } diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch index 539c0c86b..cd632054b 100644 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch @@ -21,7 +21,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org arch/mn10300/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- - arch/s390/mm/fault.c | 6 +++--- + arch/s390/mm/fault.c | 8 ++++---- arch/score/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 2 +- @@ -32,12 +32,10 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org arch/xtensa/mm/fault.c | 2 +- include/linux/sched.h | 14 ++++++++++++++ kernel/fork.c | 2 ++ - 24 files changed, 40 insertions(+), 25 deletions(-) + 24 files changed, 41 insertions(+), 26 deletions(-) -Index: linux-stable/arch/alpha/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/alpha/mm/fault.c -+++ linux-stable/arch/alpha/mm/fault.c +--- a/arch/alpha/mm/fault.c ++++ b/arch/alpha/mm/fault.c @@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns /* If we're in an interrupt context, or have no user context, @@ -47,10 +45,8 @@ Index: linux-stable/arch/alpha/mm/fault.c goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC -Index: linux-stable/arch/arm/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/arm/mm/fault.c -+++ linux-stable/arch/arm/mm/fault.c +--- a/arch/arm/mm/fault.c ++++ b/arch/arm/mm/fault.c @@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -60,10 +56,8 @@ Index: linux-stable/arch/arm/mm/fault.c goto no_context; /* -Index: linux-stable/arch/avr32/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/avr32/mm/fault.c -+++ linux-stable/arch/avr32/mm/fault.c +--- a/arch/avr32/mm/fault.c ++++ b/arch/avr32/mm/fault.c @@ -81,8 +81,7 @@ asmlinkage void do_page_fault(unsigned l * If we're in an interrupt or have no user context, we must * not take the fault... @@ -74,10 +68,8 @@ Index: linux-stable/arch/avr32/mm/fault.c goto no_context; local_irq_enable(); -Index: linux-stable/arch/cris/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/cris/mm/fault.c -+++ linux-stable/arch/cris/mm/fault.c +--- a/arch/cris/mm/fault.c ++++ b/arch/cris/mm/fault.c @@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str * user context, we must not take the fault. */ @@ -87,10 +79,8 @@ Index: linux-stable/arch/cris/mm/fault.c goto no_context; retry: -Index: linux-stable/arch/frv/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/frv/mm/fault.c -+++ linux-stable/arch/frv/mm/fault.c +--- a/arch/frv/mm/fault.c ++++ b/arch/frv/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -100,10 +90,8 @@ Index: linux-stable/arch/frv/mm/fault.c goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/ia64/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/ia64/mm/fault.c -+++ linux-stable/arch/ia64/mm/fault.c +--- a/arch/ia64/mm/fault.c ++++ b/arch/ia64/mm/fault.c @@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres /* * If we're in an interrupt or have no user context, we must not take the fault.. @@ -113,10 +101,8 @@ Index: linux-stable/arch/ia64/mm/fault.c goto no_context; #ifdef CONFIG_VIRTUAL_MEM_MAP -Index: linux-stable/arch/m32r/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/m32r/mm/fault.c -+++ linux-stable/arch/m32r/mm/fault.c +--- a/arch/m32r/mm/fault.c ++++ b/arch/m32r/mm/fault.c @@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user context or are running in an * atomic region then we must not take the fault.. @@ -126,11 +112,9 @@ Index: linux-stable/arch/m32r/mm/fault.c goto bad_area_nosemaphore; /* When running in the kernel we expect faults to occur only to -Index: linux-stable/arch/m68k/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/m68k/mm/fault.c -+++ linux-stable/arch/m68k/mm/fault.c -@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, +--- a/arch/m68k/mm/fault.c ++++ b/arch/m68k/mm/fault.c +@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -139,10 +123,8 @@ Index: linux-stable/arch/m68k/mm/fault.c goto no_context; retry: -Index: linux-stable/arch/microblaze/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/microblaze/mm/fault.c -+++ linux-stable/arch/microblaze/mm/fault.c +--- a/arch/microblaze/mm/fault.c ++++ b/arch/microblaze/mm/fault.c @@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs, if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) is_write = 0; @@ -152,10 +134,8 @@ Index: linux-stable/arch/microblaze/mm/fault.c if (kernel_mode(regs)) goto bad_area_nosemaphore; -Index: linux-stable/arch/mips/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/mips/mm/fault.c -+++ linux-stable/arch/mips/mm/fault.c +--- a/arch/mips/mm/fault.c ++++ b/arch/mips/mm/fault.c @@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault( * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -165,11 +145,9 @@ Index: linux-stable/arch/mips/mm/fault.c goto bad_area_nosemaphore; retry: -Index: linux-stable/arch/mn10300/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/mn10300/mm/fault.c -+++ linux-stable/arch/mn10300/mm/fault.c -@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_ +--- a/arch/mn10300/mm/fault.c ++++ b/arch/mn10300/mm/fault.c +@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -177,11 +155,9 @@ Index: linux-stable/arch/mn10300/mm/fault.c + if (!mm || pagefault_disabled()) goto no_context; - down_read(&mm->mmap_sem); -Index: linux-stable/arch/parisc/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/parisc/mm/fault.c -+++ linux-stable/arch/parisc/mm/fault.c + retry: +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c @@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long acc_type; int fault; @@ -191,11 +167,9 @@ Index: linux-stable/arch/parisc/mm/fault.c goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/powerpc/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/powerpc/mm/fault.c -+++ linux-stable/arch/powerpc/mm/fault.c -@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re +--- a/arch/powerpc/mm/fault.c ++++ b/arch/powerpc/mm/fault.c +@@ -259,7 +259,7 @@ int __kprobes do_page_fault(struct pt_re if (!arch_irq_disabled_regs(regs)) local_irq_enable(); @@ -204,11 +178,9 @@ Index: linux-stable/arch/powerpc/mm/fault.c if (!user_mode(regs)) return SIGSEGV; /* in_atomic() in user mode is really bad, -Index: linux-stable/arch/s390/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/s390/mm/fault.c -+++ linux-stable/arch/s390/mm/fault.c -@@ -286,8 +286,8 @@ static inline int do_exception(struct pt +--- a/arch/s390/mm/fault.c ++++ b/arch/s390/mm/fault.c +@@ -296,8 +296,8 @@ static inline int do_exception(struct pt * user context. */ fault = VM_FAULT_BADCONTEXT; @@ -219,19 +191,19 @@ Index: linux-stable/arch/s390/mm/fault.c goto out; address = trans_exc_code & __FAIL_ADDR_MASK; -@@ -425,7 +425,7 @@ void __kprobes do_asce_exception(struct +@@ -436,8 +436,8 @@ void __kprobes do_asce_exception(struct + clear_tsk_thread_flag(current, TIF_PER_TRAP); trans_exc_code = regs->int_parm_long; - if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || -- current->pagefault_disabled)) +- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm || +- current->pagefault_disabled())); ++ if (unlikely(!user_space_fault(trans_exc_code) || !mm || + pagefault_disabled())) goto no_context; down_read(&mm->mmap_sem); -Index: linux-stable/arch/score/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/score/mm/fault.c -+++ linux-stable/arch/score/mm/fault.c +--- a/arch/score/mm/fault.c ++++ b/arch/score/mm/fault.c @@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_ * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -241,11 +213,9 @@ Index: linux-stable/arch/score/mm/fault.c goto bad_area_nosemaphore; down_read(&mm->mmap_sem); -Index: linux-stable/arch/sh/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/sh/mm/fault.c -+++ linux-stable/arch/sh/mm/fault.c -@@ -445,7 +445,7 @@ asmlinkage void __kprobes do_page_fault( +--- a/arch/sh/mm/fault.c ++++ b/arch/sh/mm/fault.c +@@ -440,7 +440,7 @@ asmlinkage void __kprobes do_page_fault( * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ @@ -254,10 +224,8 @@ Index: linux-stable/arch/sh/mm/fault.c bad_area_nosemaphore(regs, error_code, address); return; } -Index: linux-stable/arch/sparc/mm/fault_32.c -=================================================================== ---- linux-stable.orig/arch/sparc/mm/fault_32.c -+++ linux-stable/arch/sparc/mm/fault_32.c +--- a/arch/sparc/mm/fault_32.c ++++ b/arch/sparc/mm/fault_32.c @@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -267,11 +235,9 @@ Index: linux-stable/arch/sparc/mm/fault_32.c goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -Index: linux-stable/arch/sparc/mm/fault_64.c -=================================================================== ---- linux-stable.orig/arch/sparc/mm/fault_64.c -+++ linux-stable/arch/sparc/mm/fault_64.c -@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fau +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -321,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fau * If we're in an interrupt or have no user * context, we must not take the fault.. */ @@ -280,11 +246,9 @@ Index: linux-stable/arch/sparc/mm/fault_64.c goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -Index: linux-stable/arch/tile/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/tile/mm/fault.c -+++ linux-stable/arch/tile/mm/fault.c -@@ -359,7 +359,7 @@ static int handle_page_fault(struct pt_r +--- a/arch/tile/mm/fault.c ++++ b/arch/tile/mm/fault.c +@@ -360,7 +360,7 @@ static int handle_page_fault(struct pt_r * If we're in an interrupt, have no user context or are running in an * atomic region then we must not take the fault. */ @@ -293,10 +257,8 @@ Index: linux-stable/arch/tile/mm/fault.c vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } -Index: linux-stable/arch/um/kernel/trap.c -=================================================================== ---- linux-stable.orig/arch/um/kernel/trap.c -+++ linux-stable/arch/um/kernel/trap.c +--- a/arch/um/kernel/trap.c ++++ b/arch/um/kernel/trap.c @@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr * If the fault was during atomic operation, don't take the fault, just * fail. @@ -306,11 +268,9 @@ Index: linux-stable/arch/um/kernel/trap.c goto out_nosemaphore; retry: -Index: linux-stable/arch/x86/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/x86/mm/fault.c -+++ linux-stable/arch/x86/mm/fault.c -@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsi +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -1108,7 +1108,7 @@ __do_page_fault(struct pt_regs *regs, un * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ @@ -319,10 +279,8 @@ Index: linux-stable/arch/x86/mm/fault.c bad_area_nosemaphore(regs, error_code, address); return; } -Index: linux-stable/arch/xtensa/mm/fault.c -=================================================================== ---- linux-stable.orig/arch/xtensa/mm/fault.c -+++ linux-stable/arch/xtensa/mm/fault.c +--- a/arch/xtensa/mm/fault.c ++++ b/arch/xtensa/mm/fault.c @@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs) /* If we're in an interrupt or have no user * context, we must not take the fault.. @@ -332,11 +290,9 @@ Index: linux-stable/arch/xtensa/mm/fault.c bad_page_fault(regs, address, SIGSEGV); return; } -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -91,6 +91,7 @@ struct sched_param { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -51,6 +51,7 @@ struct sched_param { #include #include #include @@ -344,7 +300,7 @@ Index: linux-stable/include/linux/sched.h #include -@@ -1448,7 +1449,9 @@ struct task_struct { +@@ -1452,7 +1453,9 @@ struct task_struct { /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif @@ -354,9 +310,9 @@ Index: linux-stable/include/linux/sched.h #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; unsigned long hardirq_enable_ip; -@@ -1600,6 +1603,17 @@ struct task_struct { - /* Future-safe accessor for struct task_struct's cpus_allowed. */ - #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +@@ -1628,6 +1631,17 @@ static inline void set_numabalancing_sta + } + #endif +#ifdef CONFIG_PREEMPT_RT_FULL +static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } @@ -372,11 +328,9 @@ Index: linux-stable/include/linux/sched.h /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -1298,7 +1298,9 @@ static struct task_struct *copy_process( +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1285,7 +1285,9 @@ static struct task_struct *copy_process( p->hardirq_context = 0; p->softirq_context = 0; #endif diff --git a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch index 35b36bc31..8a26a159f 100644 --- a/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch +++ b/debian/patches/features/all/rt/peter_zijlstra-frob-rcu.patch @@ -153,11 +153,9 @@ Signed-off-by: Peter Zijlstra kernel/rcutree_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/rcutree_plugin.h -=================================================================== ---- linux-stable.orig/kernel/rcutree_plugin.h -+++ linux-stable/kernel/rcutree_plugin.h -@@ -331,7 +331,7 @@ void rcu_read_unlock_special(struct task +--- a/kernel/rcutree_plugin.h ++++ b/kernel/rcutree_plugin.h +@@ -351,7 +351,7 @@ void rcu_read_unlock_special(struct task } /* Hardware IRQ handlers cannot block. */ diff --git a/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch index 071208179..0ca7733ad 100644 --- a/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch +++ b/debian/patches/features/all/rt/peterz-raw_pagefault_disable.patch @@ -78,10 +78,8 @@ Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org mm/memory.c | 2 ++ 2 files changed, 30 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/uaccess.h -=================================================================== ---- linux-stable.orig/include/linux/uaccess.h -+++ linux-stable/include/linux/uaccess.h +--- a/include/linux/uaccess.h ++++ b/include/linux/uaccess.h @@ -8,8 +8,34 @@ * These routines enable/disable the pagefault handler in that * it will not take any MM locks and go straight to the fixup table. @@ -129,11 +127,9 @@ Index: linux-stable/include/linux/uaccess.h set_fs(old_fs); \ ret; \ }) -Index: linux-stable/mm/memory.c -=================================================================== ---- linux-stable.orig/mm/memory.c -+++ linux-stable/mm/memory.c -@@ -3484,6 +3484,7 @@ unlock: +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -3669,6 +3669,7 @@ unlock: return 0; } @@ -141,10 +137,10 @@ Index: linux-stable/mm/memory.c void pagefault_disable(void) { inc_preempt_count(); -@@ -3512,6 +3513,7 @@ void pagefault_enable(void) +@@ -3697,6 +3698,7 @@ void pagefault_enable(void) preempt_check_resched(); } - EXPORT_SYMBOL_GPL(pagefault_enable); + EXPORT_SYMBOL(pagefault_enable); +#endif /* diff --git a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch index d10596218..93d29d0bd 100644 --- a/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch +++ b/debian/patches/features/all/rt/peterz-srcu-crypto-chain.patch @@ -113,15 +113,13 @@ block the probe thread and the whole party is dead locked. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner --- - crypto/algapi.c | 5 +++-- + crypto/algapi.c | 4 ++-- crypto/api.c | 6 +++--- crypto/internal.h | 4 ++-- - 3 files changed, 8 insertions(+), 7 deletions(-) + 3 files changed, 7 insertions(+), 7 deletions(-) -Index: linux-stable/crypto/algapi.c -=================================================================== ---- linux-stable.orig/crypto/algapi.c -+++ linux-stable/crypto/algapi.c +--- a/crypto/algapi.c ++++ b/crypto/algapi.c @@ -683,13 +683,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); int crypto_register_notifier(struct notifier_block *nb) @@ -138,28 +136,18 @@ Index: linux-stable/crypto/algapi.c } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); -@@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(crypto_xor); - - static int __init crypto_algapi_init(void) - { -+ srcu_init_notifier_head(&crypto_chain); - crypto_init_proc(); - return 0; - } -Index: linux-stable/crypto/api.c -=================================================================== ---- linux-stable.orig/crypto/api.c -+++ linux-stable/crypto/api.c +--- a/crypto/api.c ++++ b/crypto/api.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); -BLOCKING_NOTIFIER_HEAD(crypto_chain); -+struct srcu_notifier_head crypto_chain; ++SRCU_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) -@@ -237,10 +237,10 @@ int crypto_probing_notify(unsigned long +@@ -237,10 +237,10 @@ int crypto_probing_notify(unsigned long { int ok; @@ -172,10 +160,8 @@ Index: linux-stable/crypto/api.c } return ok; -Index: linux-stable/crypto/internal.h -=================================================================== ---- linux-stable.orig/crypto/internal.h -+++ linux-stable/crypto/internal.h +--- a/crypto/internal.h ++++ b/crypto/internal.h @@ -48,7 +48,7 @@ struct crypto_larval { extern struct list_head crypto_alg_list; diff --git a/debian/patches/features/all/rt/pid-h-include-atomic-h.patch b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch index 6be98922f..61dcbae9e 100644 --- a/debian/patches/features/all/rt/pid-h-include-atomic-h.patch +++ b/debian/patches/features/all/rt/pid-h-include-atomic-h.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/pid.h | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/include/linux/pid.h -=================================================================== ---- linux-stable.orig/include/linux/pid.h -+++ linux-stable/include/linux/pid.h +--- a/include/linux/pid.h ++++ b/include/linux/pid.h @@ -2,6 +2,7 @@ #define _LINUX_PID_H diff --git a/debian/patches/features/all/rt/ping-sysrq.patch b/debian/patches/features/all/rt/ping-sysrq.patch index 44743a1e5..30ce365f4 100644 --- a/debian/patches/features/all/rt/ping-sysrq.patch +++ b/debian/patches/features/all/rt/ping-sysrq.patch @@ -18,10 +18,8 @@ Signed-off-by: Carsten Emde net/ipv4/sysctl_net_ipv4.c | 7 +++++++ 4 files changed, 47 insertions(+), 2 deletions(-) -Index: linux-stable/Documentation/sysrq.txt -=================================================================== ---- linux-stable.orig/Documentation/sysrq.txt -+++ linux-stable/Documentation/sysrq.txt +--- a/Documentation/sysrq.txt ++++ b/Documentation/sysrq.txt @@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen ( On other - If you know of the key combos for other architectures, please let me know so I can add them to this section. @@ -42,11 +40,9 @@ Index: linux-stable/Documentation/sysrq.txt * What are the 'command' keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'b' - Will immediately reboot the system without syncing or unmounting -Index: linux-stable/include/net/netns/ipv4.h -=================================================================== ---- linux-stable.orig/include/net/netns/ipv4.h -+++ linux-stable/include/net/netns/ipv4.h -@@ -57,6 +57,7 @@ struct netns_ipv4 { +--- a/include/net/netns/ipv4.h ++++ b/include/net/netns/ipv4.h +@@ -56,6 +56,7 @@ struct netns_ipv4 { int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; @@ -54,10 +50,8 @@ Index: linux-stable/include/net/netns/ipv4.h int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; -Index: linux-stable/net/ipv4/icmp.c -=================================================================== ---- linux-stable.orig/net/ipv4/icmp.c -+++ linux-stable/net/ipv4/icmp.c +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c @@ -69,6 +69,7 @@ #include #include @@ -66,7 +60,7 @@ Index: linux-stable/net/ipv4/icmp.c #include #include #include -@@ -767,6 +768,30 @@ static void icmp_redirect(struct sk_buff +@@ -768,6 +769,30 @@ static void icmp_redirect(struct sk_buff } /* @@ -97,7 +91,7 @@ Index: linux-stable/net/ipv4/icmp.c * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo -@@ -793,6 +818,11 @@ static void icmp_echo(struct sk_buff *sk +@@ -794,6 +819,11 @@ static void icmp_echo(struct sk_buff *sk icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); @@ -109,11 +103,9 @@ Index: linux-stable/net/ipv4/icmp.c } } -Index: linux-stable/net/ipv4/sysctl_net_ipv4.c -=================================================================== ---- linux-stable.orig/net/ipv4/sysctl_net_ipv4.c -+++ linux-stable/net/ipv4/sysctl_net_ipv4.c -@@ -756,6 +756,13 @@ static struct ctl_table ipv4_net_table[] +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -815,6 +815,13 @@ static struct ctl_table ipv4_net_table[] .proc_handler = proc_dointvec }, { diff --git a/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch index edf4ecc63..c92c2f62e 100644 --- a/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch +++ b/debian/patches/features/all/rt/posix-timers-avoid-wakeups-when-no-timers-are-active.patch @@ -10,11 +10,9 @@ Signed-off-by: Thomas Gleixner kernel/posix-cpu-timers.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) -Index: linux-stable/kernel/posix-cpu-timers.c -=================================================================== ---- linux-stable.orig/kernel/posix-cpu-timers.c -+++ linux-stable/kernel/posix-cpu-timers.c -@@ -1408,6 +1408,21 @@ wait_to_die: +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -1387,6 +1387,21 @@ wait_to_die: return 0; } @@ -36,7 +34,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c void run_posix_cpu_timers(struct task_struct *tsk) { unsigned long cpu = smp_processor_id(); -@@ -1420,7 +1435,7 @@ void run_posix_cpu_timers(struct task_st +@@ -1399,7 +1414,7 @@ void run_posix_cpu_timers(struct task_st tasklist = per_cpu(posix_timer_tasklist, cpu); /* check to see if we're already queued */ @@ -45,7 +43,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c get_task_struct(tsk); if (tasklist) { tsk->posix_timer_list = tasklist; -@@ -1432,9 +1447,9 @@ void run_posix_cpu_timers(struct task_st +@@ -1411,9 +1426,9 @@ void run_posix_cpu_timers(struct task_st tsk->posix_timer_list = tsk; } per_cpu(posix_timer_tasklist, cpu) = tsk; diff --git a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch index 46f0801c3..e0d9d7691 100644 --- a/debian/patches/features/all/rt/posix-timers-no-broadcast.patch +++ b/debian/patches/features/all/rt/posix-timers-no-broadcast.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner kernel/posix-timers.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -Index: linux-stable/kernel/posix-timers.c -=================================================================== ---- linux-stable.orig/kernel/posix-timers.c -+++ linux-stable/kernel/posix-timers.c +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c @@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_ static struct pid *good_sigevent(sigevent_t * event) { diff --git a/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch index f0b8a6995..52ff46957 100644 --- a/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch +++ b/debian/patches/features/all/rt/posix-timers-shorten-cpu-timers-thread.patch @@ -13,11 +13,9 @@ Signed-off-by: Thomas Gleixner kernel/posix-cpu-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/posix-cpu-timers.c -=================================================================== ---- linux-stable.orig/kernel/posix-cpu-timers.c -+++ linux-stable/kernel/posix-cpu-timers.c -@@ -1451,7 +1451,7 @@ static int posix_cpu_thread_call(struct +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -1430,7 +1430,7 @@ static int posix_cpu_thread_call(struct switch (action) { case CPU_UP_PREPARE: p = kthread_create(posix_cpu_timers_thread, hcpu, diff --git a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch index ffb6d437e..91fb81cb6 100644 --- a/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch +++ b/debian/patches/features/all/rt/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -18,10 +18,8 @@ Signed-off-by: Thomas Gleixner kernel/posix-cpu-timers.c | 182 ++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 190 insertions(+), 6 deletions(-) -Index: linux-stable/include/linux/init_task.h -=================================================================== ---- linux-stable.orig/include/linux/init_task.h -+++ linux-stable/include/linux/init_task.h +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h @@ -141,6 +141,12 @@ extern struct task_group root_task_group # define INIT_PERF_EVENTS(tsk) #endif @@ -43,11 +41,9 @@ Index: linux-stable/include/linux/init_task.h .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1373,6 +1373,9 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1377,6 +1377,9 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -57,23 +53,19 @@ Index: linux-stable/include/linux/sched.h /* process credentials */ const struct cred __rcu *real_cred; /* objective and real subjective task -Index: linux-stable/init/main.c -=================================================================== ---- linux-stable.orig/init/main.c -+++ linux-stable/init/main.c -@@ -69,6 +69,7 @@ - #include +--- a/init/main.c ++++ b/init/main.c +@@ -70,6 +70,7 @@ #include #include + #include +#include #include #include -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -1130,6 +1130,9 @@ void mm_init_owner(struct mm_struct *mm, +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1112,6 +1112,9 @@ void mm_init_owner(struct mm_struct *mm, */ static void posix_cpu_timers_init(struct task_struct *tsk) { @@ -83,11 +75,9 @@ Index: linux-stable/kernel/fork.c tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; -Index: linux-stable/kernel/posix-cpu-timers.c -=================================================================== ---- linux-stable.orig/kernel/posix-cpu-timers.c -+++ linux-stable/kernel/posix-cpu-timers.c -@@ -682,7 +682,7 @@ static int posix_cpu_timer_set(struct k_ +--- a/kernel/posix-cpu-timers.c ++++ b/kernel/posix-cpu-timers.c +@@ -661,7 +661,7 @@ static int posix_cpu_timer_set(struct k_ /* * Disarm any old timer after extracting its expiry time. */ @@ -96,7 +86,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c ret = 0; old_incr = timer->it.cpu.incr; -@@ -1198,7 +1198,7 @@ void posix_cpu_timer_schedule(struct k_i +@@ -1177,7 +1177,7 @@ void posix_cpu_timer_schedule(struct k_i /* * Now re-arm for the new expiry time. */ @@ -105,7 +95,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c arm_timer(timer); spin_unlock(&p->sighand->siglock); -@@ -1262,10 +1262,11 @@ static inline int fastpath_timer_check(s +@@ -1241,10 +1241,11 @@ static inline int fastpath_timer_check(s sig = tsk->signal; if (sig->cputimer.running) { struct task_cputime group_sample; @@ -119,7 +109,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c if (task_cputime_expired(&group_sample, &sig->cputime_expires)) return 1; -@@ -1279,13 +1280,13 @@ static inline int fastpath_timer_check(s +@@ -1258,13 +1259,13 @@ static inline int fastpath_timer_check(s * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ @@ -135,7 +125,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c /* * The fast path checks that there are no expired thread or thread -@@ -1343,6 +1344,175 @@ void run_posix_cpu_timers(struct task_st +@@ -1322,6 +1323,175 @@ void run_posix_cpu_timers(struct task_st } } @@ -280,7 +270,7 @@ Index: linux-stable/kernel/posix-cpu-timers.c +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ -+static struct notifier_block __devinitdata posix_cpu_thread_notifier = { ++static struct notifier_block posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; diff --git a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch index 481bfaff2..445bcf89b 100644 --- a/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch +++ b/debian/patches/features/all/rt/power-disable-highmem-on-rt.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/arch/powerpc/Kconfig -=================================================================== ---- linux-stable.orig/arch/powerpc/Kconfig -+++ linux-stable/arch/powerpc/Kconfig -@@ -278,7 +278,7 @@ menu "Kernel options" +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -291,7 +291,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" diff --git a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch index c68860d09..bb60e58a5 100644 --- a/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch +++ b/debian/patches/features/all/rt/power-use-generic-rwsem-on-rt.patch @@ -6,10 +6,8 @@ Signed-off-by: Thomas Gleixner arch/powerpc/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/arch/powerpc/Kconfig -=================================================================== ---- linux-stable.orig/arch/powerpc/Kconfig -+++ linux-stable/arch/powerpc/Kconfig +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig @@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT config RWSEM_GENERIC_SPINLOCK diff --git a/debian/patches/features/all/rt/powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch b/debian/patches/features/all/rt/powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch new file mode 100644 index 000000000..982d9c6bc --- /dev/null +++ b/debian/patches/features/all/rt/powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch @@ -0,0 +1,35 @@ +From bfc2bc8577d31ad04ae2f0619e50794e7cde9536 Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 10:00:28 +0100 +Subject: [PATCH 1/3] powerpc/fsl-msi: use a different locklcass for the + cascade interrupt + +lockdep thinks that it might deadlock because it grabs a lock of the +same class while calling the generic_irq_handler(). This annotation will +inform lockdep that it will not. + +Signed-off-by: Sebastian Andrzej Siewior +--- + arch/powerpc/sysdev/fsl_msi.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/powerpc/sysdev/fsl_msi.c ++++ b/arch/powerpc/sysdev/fsl_msi.c +@@ -333,6 +333,8 @@ static int fsl_of_msi_remove(struct plat + return 0; + } + ++static struct lock_class_key fsl_msi_irq_class; ++ + static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, + int offset, int irq_index) + { +@@ -351,7 +353,7 @@ static int fsl_msi_setup_hwirq(struct fs + dev_err(&dev->dev, "No memory for MSI cascade data\n"); + return -ENOMEM; + } +- ++ irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); + msi->msi_virqs[irq_index] = virt_msir; + cascade_data->index = offset; + cascade_data->msi_data = msi; diff --git a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch index 3bbabc8e6..1e0c13fea 100644 --- a/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch +++ b/debian/patches/features/all/rt/powerpc-preempt-lazy-support.patch @@ -5,28 +5,24 @@ Date: Thu, 01 Nov 2012 10:14:11 +0100 Signed-off-by: Thomas Gleixner --- arch/powerpc/Kconfig | 1 + - arch/powerpc/include/asm/thread_info.h | 7 ++++++- + arch/powerpc/include/asm/thread_info.h | 10 ++++++++-- arch/powerpc/kernel/asm-offsets.c | 1 + - arch/powerpc/kernel/entry_32.S | 19 +++++++++++++------ - arch/powerpc/kernel/entry_64.S | 17 +++++++++++------ - 5 files changed, 32 insertions(+), 13 deletions(-) + arch/powerpc/kernel/entry_32.S | 17 ++++++++++++----- + arch/powerpc/kernel/entry_64.S | 12 +++++++++--- + 5 files changed, 31 insertions(+), 10 deletions(-) -Index: linux-stable/arch/powerpc/Kconfig -=================================================================== ---- linux-stable.orig/arch/powerpc/Kconfig -+++ linux-stable/arch/powerpc/Kconfig -@@ -140,6 +140,7 @@ config PPC +--- a/arch/powerpc/Kconfig ++++ b/arch/powerpc/Kconfig +@@ -142,6 +142,7 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_PREEMPT_LAZY - - config EARLY_PRINTK - bool -Index: linux-stable/arch/powerpc/include/asm/thread_info.h -=================================================================== ---- linux-stable.orig/arch/powerpc/include/asm/thread_info.h -+++ linux-stable/arch/powerpc/include/asm/thread_info.h + select HAVE_MOD_ARCH_SPECIFIC + select MODULES_USE_ELF_RELA + select CLONE_BACKWARDS +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h @@ -43,6 +43,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, @@ -36,36 +32,42 @@ Index: linux-stable/arch/powerpc/include/asm/thread_info.h struct restart_block restart_block; unsigned long local_flags; /* private flags for thread */ -@@ -102,12 +104,14 @@ static inline struct thread_info *curren +@@ -97,7 +99,7 @@ static inline struct thread_info *curren + #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SINGLESTEP 8 /* singlestepping active */ +-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ - #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ -+#define TIF_NEED_RESCHED_LAZY 14 /* lazy rescheduling necessary */ +@@ -106,6 +108,7 @@ static inline struct thread_info *curren #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ + #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation + for stack store? */ ++#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< arch/powerpc/sysdev/cpm1.c | 1 + 2 files changed, 2 insertions(+) -Index: linux-stable/arch/powerpc/platforms/8xx/m8xx_setup.c -=================================================================== ---- linux-stable.orig/arch/powerpc/platforms/8xx/m8xx_setup.c -+++ linux-stable/arch/powerpc/platforms/8xx/m8xx_setup.c +--- a/arch/powerpc/platforms/8xx/m8xx_setup.c ++++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -43,6 +43,7 @@ static irqreturn_t timebase_interrupt(in static struct irqaction tbint_irqaction = { @@ -25,10 +23,8 @@ Index: linux-stable/arch/powerpc/platforms/8xx/m8xx_setup.c .name = "tbint", }; -Index: linux-stable/arch/powerpc/sysdev/cpm1.c -=================================================================== ---- linux-stable.orig/arch/powerpc/sysdev/cpm1.c -+++ linux-stable/arch/powerpc/sysdev/cpm1.c +--- a/arch/powerpc/sysdev/cpm1.c ++++ b/arch/powerpc/sysdev/cpm1.c @@ -120,6 +120,7 @@ static irqreturn_t cpm_error_interrupt(i static struct irqaction cpm_error_irqaction = { diff --git a/debian/patches/features/all/rt/preempt-lazy-support.patch b/debian/patches/features/all/rt/preempt-lazy-support.patch index 6a05b6151..8a3cfa15e 100644 --- a/debian/patches/features/all/rt/preempt-lazy-support.patch +++ b/debian/patches/features/all/rt/preempt-lazy-support.patch @@ -58,17 +58,15 @@ Signed-off-by: Thomas Gleixner kernel/Kconfig.preempt | 6 ++++ kernel/sched/core.c | 60 ++++++++++++++++++++++++++++++++++++++++++- kernel/sched/fair.c | 16 +++++------ - kernel/sched/features.h | 4 ++ + kernel/sched/features.h | 3 ++ kernel/sched/sched.h | 9 ++++++ kernel/trace/trace.c | 41 +++++++++++++++++------------ kernel/trace/trace.h | 2 + kernel/trace/trace_output.c | 13 +++++++-- - 11 files changed, 207 insertions(+), 34 deletions(-) + 11 files changed, 206 insertions(+), 34 deletions(-) -Index: linux-stable/include/linux/ftrace_event.h -=================================================================== ---- linux-stable.orig/include/linux/ftrace_event.h -+++ linux-stable/include/linux/ftrace_event.h +--- a/include/linux/ftrace_event.h ++++ b/include/linux/ftrace_event.h @@ -51,6 +51,7 @@ struct trace_entry { int pid; unsigned short migrate_disable; @@ -77,10 +75,8 @@ Index: linux-stable/include/linux/ftrace_event.h }; #define FTRACE_MAX_EVENT \ -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -23,15 +23,38 @@ #define preempt_count() (current_thread_info()->preempt_count) @@ -134,25 +130,23 @@ Index: linux-stable/include/linux/preempt.h #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ -@@ -69,6 +98,13 @@ do { \ +@@ -68,6 +97,13 @@ do { \ + barrier(); \ preempt_check_resched(); \ } while (0) - ++ +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) -+ + /* For debugging and tracer internals only! */ #define add_preempt_count_notrace(val) \ - do { preempt_count() += (val); } while (0) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -2660,6 +2660,52 @@ static inline int test_tsk_need_resched( +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2691,6 +2691,52 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -205,7 +199,7 @@ Index: linux-stable/include/linux/sched.h static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); -@@ -2691,11 +2737,6 @@ static inline int signal_pending_state(l +@@ -2722,11 +2768,6 @@ static inline int signal_pending_state(l return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } @@ -217,10 +211,8 @@ Index: linux-stable/include/linux/sched.h /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return -Index: linux-stable/kernel/Kconfig.preempt -=================================================================== ---- linux-stable.orig/kernel/Kconfig.preempt -+++ linux-stable/kernel/Kconfig.preempt +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt @@ -6,6 +6,12 @@ config PREEMPT_RT_BASE bool select PREEMPT @@ -234,11 +226,9 @@ Index: linux-stable/kernel/Kconfig.preempt choice prompt "Preemption Model" default PREEMPT_NONE -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -534,6 +534,37 @@ void resched_task(struct task_struct *p) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -543,6 +543,37 @@ void resched_task(struct task_struct *p) smp_send_reschedule(cpu); } @@ -276,7 +266,7 @@ Index: linux-stable/kernel/sched/core.c void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -650,6 +681,17 @@ void resched_task(struct task_struct *p) +@@ -659,6 +690,17 @@ void resched_task(struct task_struct *p) assert_raw_spin_locked(&task_rq(p)->lock); set_tsk_need_resched(p); } @@ -294,7 +284,7 @@ Index: linux-stable/kernel/sched/core.c #endif /* CONFIG_SMP */ #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ -@@ -1838,6 +1880,9 @@ void sched_fork(struct task_struct *p) +@@ -1718,6 +1760,9 @@ void sched_fork(struct task_struct *p) /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; #endif @@ -304,7 +294,7 @@ Index: linux-stable/kernel/sched/core.c #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif -@@ -3448,6 +3493,7 @@ void migrate_disable(void) +@@ -2926,6 +2971,7 @@ void migrate_disable(void) return; } @@ -312,7 +302,7 @@ Index: linux-stable/kernel/sched/core.c pin_current_cpu(); p->migrate_disable = 1; preempt_enable(); -@@ -3503,6 +3549,7 @@ void migrate_enable(void) +@@ -2981,6 +3027,7 @@ void migrate_enable(void) unpin_current_cpu(); preempt_enable(); @@ -320,7 +310,7 @@ Index: linux-stable/kernel/sched/core.c } EXPORT_SYMBOL(migrate_enable); #else -@@ -3603,6 +3650,7 @@ need_resched: +@@ -3115,6 +3162,7 @@ need_resched: put_prev_task(rq, prev); next = pick_next_task(rq); clear_tsk_need_resched(prev); @@ -328,7 +318,7 @@ Index: linux-stable/kernel/sched/core.c rq->skip_clock_update = 0; if (likely(prev != next)) { -@@ -3724,6 +3772,14 @@ asmlinkage void __sched notrace preempt_ +@@ -3251,6 +3299,14 @@ asmlinkage void __sched notrace preempt_ if (likely(ti->preempt_count || irqs_disabled())) return; @@ -343,7 +333,7 @@ Index: linux-stable/kernel/sched/core.c do { add_preempt_count_notrace(PREEMPT_ACTIVE); /* -@@ -5331,7 +5387,9 @@ void __cpuinit init_idle(struct task_str +@@ -4862,7 +4918,9 @@ void __cpuinit init_idle(struct task_str /* Set the preempt count _outside_ the spinlocks! */ task_thread_info(idle)->preempt_count = 0; @@ -354,11 +344,9 @@ Index: linux-stable/kernel/sched/core.c /* * The idle tasks have their own, simple scheduling class: */ -Index: linux-stable/kernel/sched/fair.c -=================================================================== ---- linux-stable.orig/kernel/sched/fair.c -+++ linux-stable/kernel/sched/fair.c -@@ -1222,7 +1222,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -1827,7 +1827,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -367,7 +355,7 @@ Index: linux-stable/kernel/sched/fair.c /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -1246,7 +1246,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -1851,7 +1851,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -376,7 +364,7 @@ Index: linux-stable/kernel/sched/fair.c } static void -@@ -1363,7 +1363,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -1971,7 +1971,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -385,7 +373,7 @@ Index: linux-stable/kernel/sched/fair.c return; } /* -@@ -1543,7 +1543,7 @@ static void __account_cfs_rq_runtime(str +@@ -2160,7 +2160,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -394,7 +382,7 @@ Index: linux-stable/kernel/sched/fair.c } static __always_inline -@@ -2129,7 +2129,7 @@ static void hrtick_start_fair(struct rq +@@ -2745,7 +2745,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -403,7 +391,7 @@ Index: linux-stable/kernel/sched/fair.c return; } -@@ -2954,7 +2954,7 @@ static void check_preempt_wakeup(struct +@@ -3577,7 +3577,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -412,7 +400,7 @@ Index: linux-stable/kernel/sched/fair.c /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -5027,7 +5027,7 @@ static void task_fork_fair(struct task_s +@@ -5772,7 +5772,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -421,7 +409,7 @@ Index: linux-stable/kernel/sched/fair.c } se->vruntime -= cfs_rq->min_vruntime; -@@ -5052,7 +5052,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -5797,7 +5797,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) @@ -430,11 +418,9 @@ Index: linux-stable/kernel/sched/fair.c } else check_preempt_curr(rq, p, 0); } -Index: linux-stable/kernel/sched/features.h -=================================================================== ---- linux-stable.orig/kernel/sched/features.h -+++ linux-stable/kernel/sched/features.h -@@ -68,6 +68,9 @@ SCHED_FEAT(NONTASK_POWER, true) +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -65,6 +65,9 @@ SCHED_FEAT(NONTASK_POWER, true) SCHED_FEAT(TTWU_QUEUE, true) #else SCHED_FEAT(TTWU_QUEUE, false) @@ -444,11 +430,9 @@ Index: linux-stable/kernel/sched/features.h #endif SCHED_FEAT(FORCE_SD_OVERLAP, false) -Index: linux-stable/kernel/sched/sched.h -=================================================================== ---- linux-stable.orig/kernel/sched/sched.h -+++ linux-stable/kernel/sched/sched.h -@@ -876,6 +876,15 @@ extern void init_sched_fair_class(void); +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -897,6 +897,15 @@ extern void init_sched_fair_class(void); extern void resched_task(struct task_struct *p); extern void resched_cpu(int cpu); @@ -464,11 +448,9 @@ Index: linux-stable/kernel/sched/sched.h extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); -Index: linux-stable/kernel/trace/trace.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace.c -+++ linux-stable/kernel/trace/trace.c -@@ -1152,6 +1152,7 @@ tracing_generic_entry_update(struct trac +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1166,6 +1166,7 @@ tracing_generic_entry_update(struct trac struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; @@ -476,7 +458,7 @@ Index: linux-stable/kernel/trace/trace.c entry->pid = (tsk) ? tsk->pid : 0; entry->padding = 0; entry->flags = -@@ -1162,7 +1163,8 @@ tracing_generic_entry_update(struct trac +@@ -1176,7 +1177,8 @@ tracing_generic_entry_update(struct trac #endif ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -486,7 +468,7 @@ Index: linux-stable/kernel/trace/trace.c entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; } -@@ -1985,15 +1987,17 @@ get_total_entries(struct trace_array *tr +@@ -2031,15 +2033,17 @@ get_total_entries(struct trace_array *tr static void print_lat_help_header(struct seq_file *m) { @@ -513,7 +495,7 @@ Index: linux-stable/kernel/trace/trace.c } static void print_event_info(struct trace_array *tr, struct seq_file *m) -@@ -2017,13 +2021,16 @@ static void print_func_help_header(struc +@@ -2063,13 +2067,16 @@ static void print_func_help_header(struc static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) { print_event_info(tr, m); @@ -537,10 +519,8 @@ Index: linux-stable/kernel/trace/trace.c } void -Index: linux-stable/kernel/trace/trace.h -=================================================================== ---- linux-stable.orig/kernel/trace/trace.h -+++ linux-stable/kernel/trace/trace.h +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h @@ -116,6 +116,7 @@ struct uprobe_trace_entry_head { * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler @@ -557,10 +537,8 @@ Index: linux-stable/kernel/trace/trace.h }; #define TRACE_BUF_SIZE 1024 -Index: linux-stable/kernel/trace/trace_output.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace_output.c -+++ linux-stable/kernel/trace/trace_output.c +--- a/kernel/trace/trace_output.c ++++ b/kernel/trace/trace_output.c @@ -564,6 +564,7 @@ int trace_print_lat_fmt(struct trace_seq { char hardsoft_irq; diff --git a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch index 94549dded..7a045c525 100644 --- a/debian/patches/features/all/rt/preempt-nort-rt-variants.patch +++ b/debian/patches/features/all/rt/preempt-nort-rt-variants.patch @@ -11,10 +11,8 @@ Signed-off-by: Thomas Gleixner include/linux/preempt.h | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -54,11 +54,15 @@ do { \ dec_preempt_count(); \ } while (0) diff --git a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch index 6abf87eea..b8be1619c 100644 --- a/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch +++ b/debian/patches/features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -17,11 +17,9 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org kernel/printk.c | 7 +++++++ 1 file changed, 7 insertions(+) -Index: linux-stable/kernel/printk.c -=================================================================== ---- linux-stable.orig/kernel/printk.c -+++ linux-stable/kernel/printk.c -@@ -1255,6 +1255,13 @@ asmlinkage void early_printk(const char +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -779,6 +779,13 @@ asmlinkage void early_printk(const char */ static bool __read_mostly printk_killswitch; diff --git a/debian/patches/features/all/rt/printk-kill.patch b/debian/patches/features/all/rt/printk-kill.patch index c8f21fa33..f864fac0b 100644 --- a/debian/patches/features/all/rt/printk-kill.patch +++ b/debian/patches/features/all/rt/printk-kill.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner kernel/watchdog.c | 15 +++++++++++++-- 3 files changed, 48 insertions(+), 3 deletions(-) -Index: linux-stable/include/linux/printk.h -=================================================================== ---- linux-stable.orig/include/linux/printk.h -+++ linux-stable/include/linux/printk.h +--- a/include/linux/printk.h ++++ b/include/linux/printk.h @@ -99,9 +99,11 @@ int no_printk(const char *fmt, ...) extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); @@ -33,11 +31,9 @@ Index: linux-stable/include/linux/printk.h extern int printk_delay_msec; extern int dmesg_restrict; extern int kptr_restrict; -Index: linux-stable/kernel/printk.c -=================================================================== ---- linux-stable.orig/kernel/printk.c -+++ linux-stable/kernel/printk.c -@@ -1246,6 +1246,32 @@ asmlinkage void early_printk(const char +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -770,6 +770,32 @@ asmlinkage void early_printk(const char early_vprintk(fmt, ap); va_end(ap); } @@ -69,8 +65,8 @@ Index: linux-stable/kernel/printk.c +} #endif - static bool __read_mostly ignore_loglevel; -@@ -1508,6 +1534,13 @@ asmlinkage int vprintk_emit(int facility + #ifdef CONFIG_BOOT_PRINTK_DELAY +@@ -1511,6 +1537,13 @@ asmlinkage int vprintk_emit(int facility int this_cpu; int printed_len = 0; @@ -81,14 +77,12 @@ Index: linux-stable/kernel/printk.c + if (unlikely(forced_early_printk(fmt, args))) + return 1; + - boot_delay_msec(); + boot_delay_msec(level); printk_delay(); -Index: linux-stable/kernel/watchdog.c -=================================================================== ---- linux-stable.orig/kernel/watchdog.c -+++ linux-stable/kernel/watchdog.c -@@ -202,6 +202,8 @@ static int is_softlockup(unsigned long t +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -206,6 +206,8 @@ static int is_softlockup(unsigned long t #ifdef CONFIG_HARDLOCKUP_DETECTOR @@ -97,7 +91,7 @@ Index: linux-stable/kernel/watchdog.c static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, -@@ -236,10 +238,19 @@ static void watchdog_overflow_callback(s +@@ -240,10 +242,19 @@ static void watchdog_overflow_callback(s if (__this_cpu_read(hard_watchdog_warn) == true) return; diff --git a/debian/patches/features/all/rt/printk-rt-aware.patch b/debian/patches/features/all/rt/printk-rt-aware.patch index 2529fcd4b..b9c221ae2 100644 --- a/debian/patches/features/all/rt/printk-rt-aware.patch +++ b/debian/patches/features/all/rt/printk-rt-aware.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/printk.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) -Index: linux-stable/kernel/printk.c -=================================================================== ---- linux-stable.orig/kernel/printk.c -+++ linux-stable/kernel/printk.c -@@ -1312,6 +1312,7 @@ static void call_console_drivers(int lev +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -1315,6 +1315,7 @@ static void call_console_drivers(int lev if (!console_drivers) return; @@ -19,7 +17,7 @@ Index: linux-stable/kernel/printk.c for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; -@@ -1324,6 +1325,7 @@ static void call_console_drivers(int lev +@@ -1327,6 +1328,7 @@ static void call_console_drivers(int lev continue; con->write(con, text, len); } @@ -27,7 +25,7 @@ Index: linux-stable/kernel/printk.c } /* -@@ -1383,12 +1385,18 @@ static inline int can_use_console(unsign +@@ -1386,12 +1388,18 @@ static inline int can_use_console(unsign * interrupts disabled. It should return with 'lockbuf_lock' * released but interrupts still disabled. */ @@ -48,7 +46,7 @@ Index: linux-stable/kernel/printk.c retval = 1; /* -@@ -1667,8 +1675,15 @@ asmlinkage int vprintk_emit(int facility +@@ -1670,8 +1678,15 @@ asmlinkage int vprintk_emit(int facility * The console_trylock_for_printk() function will release 'logbuf_lock' * regardless of whether it actually gets the console semaphore or not. */ @@ -65,7 +63,7 @@ Index: linux-stable/kernel/printk.c lockdep_on(); out_restore_irqs: -@@ -2057,11 +2072,16 @@ static void console_cont_flush(char *tex +@@ -2060,11 +2075,16 @@ static void console_cont_flush(char *tex goto out; len = cont_print_text(text, size); @@ -82,7 +80,7 @@ Index: linux-stable/kernel/printk.c return; out: raw_spin_unlock_irqrestore(&logbuf_lock, flags); -@@ -2144,12 +2164,17 @@ skip: +@@ -2147,12 +2167,17 @@ skip: console_idx = log_next(console_idx); console_seq++; console_prev = msg->flags; diff --git a/debian/patches/features/all/rt/radix-tree-rt-aware.patch b/debian/patches/features/all/rt/radix-tree-rt-aware.patch index 102f0b63f..f92a9eeb9 100644 --- a/debian/patches/features/all/rt/radix-tree-rt-aware.patch +++ b/debian/patches/features/all/rt/radix-tree-rt-aware.patch @@ -8,10 +8,8 @@ Signed-off-by: Thomas Gleixner lib/radix-tree.c | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/radix-tree.h -=================================================================== ---- linux-stable.orig/include/linux/radix-tree.h -+++ linux-stable/include/linux/radix-tree.h +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h @@ -230,7 +230,13 @@ unsigned long radix_tree_next_hole(struc unsigned long index, unsigned long max_scan); unsigned long radix_tree_prev_hole(struct radix_tree_root *root, @@ -35,10 +33,8 @@ Index: linux-stable/include/linux/radix-tree.h } /** -Index: linux-stable/lib/radix-tree.c -=================================================================== ---- linux-stable.orig/lib/radix-tree.c -+++ linux-stable/lib/radix-tree.c +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c @@ -215,12 +215,13 @@ radix_tree_node_alloc(struct radix_tree_ * succeed in getting a node here (and never reach * kmem_cache_alloc) diff --git a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch index edade40b8..825b744b7 100644 --- a/debian/patches/features/all/rt/random-make-it-work-on-rt.patch +++ b/debian/patches/features/all/rt/random-make-it-work-on-rt.patch @@ -17,11 +17,9 @@ Cc: stable-rt@vger.kernel.org kernel/irq/manage.c | 6 ++++++ 5 files changed, 21 insertions(+), 6 deletions(-) -Index: linux-stable/drivers/char/random.c -=================================================================== ---- linux-stable.orig/drivers/char/random.c -+++ linux-stable/drivers/char/random.c -@@ -745,18 +745,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness); +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -742,18 +742,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness); static DEFINE_PER_CPU(struct fast_pool, irq_randomness); @@ -42,7 +40,7 @@ Index: linux-stable/drivers/char/random.c input[2] = ip; input[3] = ip >> 32; } -@@ -770,7 +768,11 @@ void add_interrupt_randomness(int irq, i +@@ -767,7 +765,11 @@ void add_interrupt_randomness(int irq, i fast_pool->last = now; r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; @@ -54,10 +52,8 @@ Index: linux-stable/drivers/char/random.c /* * If we don't have a valid cycle counter, and we see * back-to-back timer interrupts, then skip giving credit for -Index: linux-stable/include/linux/irqdesc.h -=================================================================== ---- linux-stable.orig/include/linux/irqdesc.h -+++ linux-stable/include/linux/irqdesc.h +--- a/include/linux/irqdesc.h ++++ b/include/linux/irqdesc.h @@ -52,6 +52,7 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ @@ -66,11 +62,9 @@ Index: linux-stable/include/linux/irqdesc.h raw_spinlock_t lock; struct cpumask *percpu_enabled; #ifdef CONFIG_SMP -Index: linux-stable/include/linux/random.h -=================================================================== ---- linux-stable.orig/include/linux/random.h -+++ linux-stable/include/linux/random.h -@@ -51,7 +51,7 @@ struct rnd_state { +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -12,7 +12,7 @@ extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); @@ -79,10 +73,8 @@ Index: linux-stable/include/linux/random.h extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes); -Index: linux-stable/kernel/irq/handle.c -=================================================================== ---- linux-stable.orig/kernel/irq/handle.c -+++ linux-stable/kernel/irq/handle.c +--- a/kernel/irq/handle.c ++++ b/kernel/irq/handle.c @@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_d irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) @@ -92,7 +84,7 @@ Index: linux-stable/kernel/irq/handle.c irqreturn_t retval = IRQ_NONE; unsigned int flags = 0, irq = desc->irq_data.irq; -@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc +@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc action = action->next; } while (action); @@ -105,18 +97,16 @@ Index: linux-stable/kernel/irq/handle.c if (!noirqdebug) note_interrupt(irq, desc, retval); -Index: linux-stable/kernel/irq/manage.c -=================================================================== ---- linux-stable.orig/kernel/irq/manage.c -+++ linux-stable/kernel/irq/manage.c -@@ -852,6 +852,12 @@ static int irq_thread(void *data) +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -879,6 +879,12 @@ static int irq_thread(void *data) if (!noirqdebug) note_interrupt(action->irq, desc, action_ret); +#ifdef CONFIG_PREEMPT_RT_FULL + migrate_disable(); + add_interrupt_randomness(action->irq, 0, -+ desc->random_ip ^ (u64) action); ++ desc->random_ip ^ (unsigned long) action); + migrate_enable(); +#endif wake_threads_waitq(desc); diff --git a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch index b4fcd787b..02063ebe2 100644 --- a/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch +++ b/debian/patches/features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch @@ -11,11 +11,9 @@ Cc: stable-rt@vger.kernel.org init/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -504,7 +504,7 @@ config RCU_FANOUT_EXACT +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -579,7 +579,7 @@ config RCU_FANOUT_EXACT config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" @@ -23,4 +21,4 @@ Index: linux-stable/init/Kconfig + depends on NO_HZ && SMP && !PREEMPT_RT_FULL default n help - This option causes RCU to attempt to accelerate grace periods + This option causes RCU to attempt to accelerate grace periods in diff --git a/debian/patches/features/all/rt/rcu-fix-build-break.patch b/debian/patches/features/all/rt/rcu-fix-build-break.patch deleted file mode 100644 index ecb6d96c9..000000000 --- a/debian/patches/features/all/rt/rcu-fix-build-break.patch +++ /dev/null @@ -1,60 +0,0 @@ -Subject: rcu: Fix build break -From: John Kacur -Date: Fri, 13 Apr 2012 12:54:21 +0200 - -A build break can occur with the following config options enabled - -CONFIG_PREEMPT_RT_FULL -CONFIG_RCU_FAST_NO_HZ -CONFIG_RCU_CPU_STALL_INFO - -This occurs because symbols in print_cpu_stall_fast_no_hz() such as -rcu_idle_gp_timer are not accessible with CONFIG_PREEMPT_RT_FULL -and results in the following type of build errors: - -kernel/rcutree_plugin.h: In function ‘print_cpu_stall_fast_no_hz’: -kernel/rcutree_plugin.h:2195: error: ‘rcu_idle_gp_timer’ undeclared (first use in this function) - -This patch fixes the build break by limiting the PREEMPT_RT_FULL -section to the function rcu_needs_cpu() instead of to the entire -!defined(CONFIG_RCU_FAST_NO_NZ) section as was intended in the -original "rcu: Make ksoftirqd do RCU quiescent states" patch. - -Signed-off-by: John Kacur -Cc: Paul McKenney -Link: http://lkml.kernel.org/r/1334314461-8937-1-git-send-email-jkacur@redhat.com -Signed-off-by: Thomas Gleixner ---- - kernel/rcutree_plugin.h | 5 +++++ - 1 file changed, 5 insertions(+) - -Index: linux-stable/kernel/rcutree_plugin.h -=================================================================== ---- linux-stable.orig/kernel/rcutree_plugin.h -+++ linux-stable/kernel/rcutree_plugin.h -@@ -1743,6 +1743,9 @@ int rcu_needs_cpu(int cpu, unsigned long - *delta_jiffies = ULONG_MAX; - return rcu_cpu_has_callbacks(cpu); - } -+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ -+ -+#if !defined(CONFIG_RCU_FAST_NO_HZ) - - /* - * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it. -@@ -1859,6 +1862,7 @@ static bool rcu_cpu_has_nonlazy_callback - rcu_preempt_cpu_has_nonlazy_callbacks(cpu); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Allow the CPU to enter dyntick-idle mode if either: (1) There are no - * callbacks on this CPU, (2) this CPU has not yet attempted to enter -@@ -1902,6 +1906,7 @@ int rcu_needs_cpu(int cpu, unsigned long - } - return 0; - } -+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ - - /* - * Handler for smp_call_function_single(). The only point of this diff --git a/debian/patches/features/all/rt/rcu-force-preempt-rcu-for-rt.patch b/debian/patches/features/all/rt/rcu-force-preempt-rcu-for-rt.patch deleted file mode 100644 index 5de6462d2..000000000 --- a/debian/patches/features/all/rt/rcu-force-preempt-rcu-for-rt.patch +++ /dev/null @@ -1,28 +0,0 @@ -Subject: RCU: Force PREEMPT_RCU for PREEMPT-RT -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:30:30 -0500 - -PREEMPT_RT relies on PREEMPT_RCU - only allow RCU to be configured -interactively in the !PREEMPT_RT case. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - -Signed-off-by: Peter Zijlstra -Link: http://lkml.kernel.org/n/tip-j1y0phicu6s6pu8guku2vca0@git.kernel.org ---- - init/Kconfig | 1 - - 1 file changed, 1 deletion(-) - -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -806,7 +806,6 @@ config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on EXPERIMENTAL - depends on CGROUP_SCHED -- depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth diff --git a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch index 755ae4d10..beff51cb5 100644 --- a/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +++ b/debian/patches/features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch @@ -30,11 +30,9 @@ Signed-off-by: Thomas Gleixner kernel/rcutree.c | 10 ++++++++++ 4 files changed, 53 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/rcupdate.h -=================================================================== ---- linux-stable.orig/include/linux/rcupdate.h -+++ linux-stable/include/linux/rcupdate.h -@@ -101,6 +101,9 @@ extern void call_rcu(struct rcu_head *he +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -120,6 +120,9 @@ extern void call_rcu(struct rcu_head *he #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ @@ -44,7 +42,7 @@ Index: linux-stable/include/linux/rcupdate.h /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. -@@ -121,6 +124,7 @@ extern void call_rcu(struct rcu_head *he +@@ -143,6 +146,7 @@ extern void call_rcu(struct rcu_head *he */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -52,7 +50,7 @@ Index: linux-stable/include/linux/rcupdate.h /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. -@@ -191,7 +195,13 @@ static inline int rcu_preempt_depth(void +@@ -216,7 +220,13 @@ static inline int rcu_preempt_depth(void /* Internal to kernel */ extern void rcu_sched_qs(int cpu); @@ -66,7 +64,7 @@ Index: linux-stable/include/linux/rcupdate.h extern void rcu_check_callbacks(int cpu, int user); struct notifier_block; extern void rcu_idle_enter(void); -@@ -328,7 +338,14 @@ static inline int rcu_read_lock_held(voi +@@ -366,7 +376,14 @@ static inline int rcu_read_lock_held(voi * rcu_read_lock_bh_held() is defined out of line to avoid #include-file * hell. */ @@ -81,7 +79,7 @@ Index: linux-stable/include/linux/rcupdate.h /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? -@@ -776,10 +793,14 @@ static inline void rcu_read_unlock(void) +@@ -814,10 +831,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); @@ -96,7 +94,7 @@ Index: linux-stable/include/linux/rcupdate.h } /* -@@ -789,10 +810,14 @@ static inline void rcu_read_lock_bh(void +@@ -827,10 +848,14 @@ static inline void rcu_read_lock_bh(void */ static inline void rcu_read_unlock_bh(void) { @@ -111,10 +109,8 @@ Index: linux-stable/include/linux/rcupdate.h local_bh_enable(); } -Index: linux-stable/include/linux/rcutree.h -=================================================================== ---- linux-stable.orig/include/linux/rcutree.h -+++ linux-stable/include/linux/rcutree.h +--- a/include/linux/rcutree.h ++++ b/include/linux/rcutree.h @@ -45,7 +45,11 @@ static inline void rcu_virt_note_context rcu_note_context_switch(cpu); } @@ -160,11 +156,9 @@ Index: linux-stable/include/linux/rcutree.h +#endif + #endif /* __LINUX_RCUTREE_H */ -Index: linux-stable/kernel/rcupdate.c -=================================================================== ---- linux-stable.orig/kernel/rcupdate.c -+++ linux-stable/kernel/rcupdate.c -@@ -149,6 +149,7 @@ int debug_lockdep_rcu_enabled(void) +--- a/kernel/rcupdate.c ++++ b/kernel/rcupdate.c +@@ -156,6 +156,7 @@ int debug_lockdep_rcu_enabled(void) } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); @@ -172,7 +166,7 @@ Index: linux-stable/kernel/rcupdate.c /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * -@@ -175,6 +176,7 @@ int rcu_read_lock_bh_held(void) +@@ -182,6 +183,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); @@ -180,11 +174,9 @@ Index: linux-stable/kernel/rcupdate.c #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -Index: linux-stable/kernel/rcutree.c -=================================================================== ---- linux-stable.orig/kernel/rcutree.c -+++ linux-stable/kernel/rcutree.c -@@ -182,6 +182,7 @@ void rcu_sched_qs(int cpu) +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -181,6 +181,7 @@ void rcu_sched_qs(int cpu) rdp->passed_quiesce = 1; } @@ -192,7 +184,7 @@ Index: linux-stable/kernel/rcutree.c void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); -@@ -192,6 +193,7 @@ void rcu_bh_qs(int cpu) +@@ -189,6 +190,7 @@ void rcu_bh_qs(int cpu) trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; } @@ -200,7 +192,7 @@ Index: linux-stable/kernel/rcutree.c /* * Note a context switch. This is a quiescent state for RCU-sched, -@@ -238,6 +240,7 @@ long rcu_batches_completed_sched(void) +@@ -242,6 +244,7 @@ long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); @@ -208,15 +200,15 @@ Index: linux-stable/kernel/rcutree.c /* * Return the number of RCU BH batches processed thus far for debug & stats. */ -@@ -255,6 +258,7 @@ void rcu_bh_force_quiescent_state(void) - force_quiescent_state(&rcu_bh_state, 0); +@@ -259,6 +262,7 @@ void rcu_bh_force_quiescent_state(void) + force_quiescent_state(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#endif /* * Record the number of times rcutorture tests have been initiated and -@@ -1972,6 +1976,7 @@ void call_rcu_sched(struct rcu_head *hea +@@ -2183,6 +2187,7 @@ void call_rcu_sched(struct rcu_head *hea } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -224,15 +216,15 @@ Index: linux-stable/kernel/rcutree.c /* * Queue an RCU callback for invocation after a quicker grace period. */ -@@ -1980,6 +1985,7 @@ void call_rcu_bh(struct rcu_head *head, - __call_rcu(head, func, &rcu_bh_state, 0); +@@ -2191,6 +2196,7 @@ void call_rcu_bh(struct rcu_head *head, + __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Because a context switch is a grace period for RCU-sched and RCU-bh, -@@ -2036,6 +2042,7 @@ void synchronize_sched(void) +@@ -2268,6 +2274,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -240,23 +232,23 @@ Index: linux-stable/kernel/rcutree.c /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * -@@ -2056,6 +2063,7 @@ void synchronize_rcu_bh(void) - wait_rcu_gp(call_rcu_bh); +@@ -2294,6 +2301,7 @@ void synchronize_rcu_bh(void) + wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif - static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); - static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); -@@ -2462,6 +2470,7 @@ static void _rcu_barrier(struct rcu_stat - destroy_rcu_head_on_stack(&rd.barrier_head); + static int synchronize_sched_expedited_cpu_stop(void *data) + { +@@ -2682,6 +2690,7 @@ static void _rcu_barrier(struct rcu_stat + mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ -@@ -2470,6 +2479,7 @@ void rcu_barrier_bh(void) +@@ -2690,6 +2699,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); diff --git a/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch index c697500bd..ac9c70c5b 100644 --- a/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch +++ b/debian/patches/features/all/rt/rcu-tiny-merge-bh.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/rcutiny.c | 2 ++ 1 file changed, 2 insertions(+) -Index: linux-stable/kernel/rcutiny.c -=================================================================== ---- linux-stable.orig/kernel/rcutiny.c -+++ linux-stable/kernel/rcutiny.c -@@ -368,6 +368,7 @@ void call_rcu_sched(struct rcu_head *hea +--- a/kernel/rcutiny.c ++++ b/kernel/rcutiny.c +@@ -371,6 +371,7 @@ void call_rcu_sched(struct rcu_head *hea } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -19,7 +17,7 @@ Index: linux-stable/kernel/rcutiny.c /* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. -@@ -377,3 +378,4 @@ void call_rcu_bh(struct rcu_head *head, +@@ -380,3 +381,4 @@ void call_rcu_bh(struct rcu_head *head, __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh); diff --git a/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch b/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch index c238058d3..097c93043 100644 --- a/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch +++ b/debian/patches/features/all/rt/rcu-tiny-solve-rt-mistery.patch @@ -29,10 +29,8 @@ Cc: stable-rt@vger.kernel.org kernel/rcutiny_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/rcutiny_plugin.h -=================================================================== ---- linux-stable.orig/kernel/rcutiny_plugin.h -+++ linux-stable/kernel/rcutiny_plugin.h +--- a/kernel/rcutiny_plugin.h ++++ b/kernel/rcutiny_plugin.h @@ -560,7 +560,7 @@ void rcu_read_unlock_special(struct task rcu_preempt_cpu_qs(); diff --git a/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch b/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch new file mode 100644 index 000000000..a2d598b09 --- /dev/null +++ b/debian/patches/features/all/rt/rcutiny-use-simple-waitqueue.patch @@ -0,0 +1,79 @@ +Subject: rcutiny: Use simple waitqueue +From: Thomas Gleixner +Date: Mon, 03 Dec 2012 16:25:21 +0100 + +Simple waitqueues can be handled from interrupt disabled contexts. + +Signed-off-by: Thomas Gleixner +--- + kernel/rcutiny_plugin.h | 17 +++++++++-------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +--- a/kernel/rcutiny_plugin.h ++++ b/kernel/rcutiny_plugin.h +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + /* Global control variables for rcupdate callback mechanism. */ + struct rcu_ctrlblk { +@@ -260,7 +261,7 @@ static void show_tiny_preempt_stats(stru + + /* Controls for rcu_kthread() kthread. */ + static struct task_struct *rcu_kthread_task; +-static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); ++static DEFINE_SWAIT_HEAD(rcu_kthread_wq); + static unsigned long have_rcu_kthread_work; + + /* +@@ -713,7 +714,7 @@ void synchronize_rcu(void) + } + EXPORT_SYMBOL_GPL(synchronize_rcu); + +-static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); ++static DEFINE_SWAIT_HEAD(sync_rcu_preempt_exp_wq); + static unsigned long sync_rcu_preempt_exp_count; + static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); + +@@ -735,7 +736,7 @@ static int rcu_preempted_readers_exp(voi + */ + static void rcu_report_exp_done(void) + { +- wake_up(&sync_rcu_preempt_exp_wq); ++ swait_wake(&sync_rcu_preempt_exp_wq); + } + + /* +@@ -787,8 +788,8 @@ void synchronize_rcu_expedited(void) + } else { + rcu_initiate_boost(); + local_irq_restore(flags); +- wait_event(sync_rcu_preempt_exp_wq, +- !rcu_preempted_readers_exp()); ++ swait_event(sync_rcu_preempt_exp_wq, ++ !rcu_preempted_readers_exp()); + } + + /* Clean up and exit. */ +@@ -858,7 +859,7 @@ static void invoke_rcu_callbacks(void) + { + have_rcu_kthread_work = 1; + if (rcu_kthread_task != NULL) +- wake_up(&rcu_kthread_wq); ++ swait_wake(&rcu_kthread_wq); + } + + #ifdef CONFIG_RCU_TRACE +@@ -888,8 +889,8 @@ static int rcu_kthread(void *arg) + unsigned long flags; + + for (;;) { +- wait_event_interruptible(rcu_kthread_wq, +- have_rcu_kthread_work != 0); ++ swait_event_interruptible(rcu_kthread_wq, ++ have_rcu_kthread_work != 0); + morework = rcu_boost(); + local_irq_save(flags); + work = have_rcu_kthread_work; diff --git a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch index 6851af928..719795117 100644 --- a/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch +++ b/debian/patches/features/all/rt/re-migrate_disable-race-with-cpu-hotplug-3f.patch @@ -17,10 +17,8 @@ Signed-off-by: Thomas Gleixner kernel/cpu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -Index: linux-stable/kernel/cpu.c -=================================================================== ---- linux-stable.orig/kernel/cpu.c -+++ linux-stable/kernel/cpu.c +--- a/kernel/cpu.c ++++ b/kernel/cpu.c @@ -81,9 +81,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp */ void pin_current_cpu(void) diff --git a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch index 5b9d3118d..e0a042a77 100644 --- a/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch +++ b/debian/patches/features/all/rt/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch @@ -33,11 +33,9 @@ Signed-off-by: Thomas Gleixner arch/arm/kernel/process.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) -Index: linux-stable/arch/arm/kernel/process.c -=================================================================== ---- linux-stable.orig/arch/arm/kernel/process.c -+++ linux-stable/arch/arm/kernel/process.c -@@ -508,6 +508,31 @@ unsigned long arch_randomize_brk(struct +--- a/arch/arm/kernel/process.c ++++ b/arch/arm/kernel/process.c +@@ -459,6 +459,31 @@ unsigned long arch_randomize_brk(struct } #ifdef CONFIG_MMU diff --git a/debian/patches/features/all/rt/relay-fix-timer-madness.patch b/debian/patches/features/all/rt/relay-fix-timer-madness.patch index 8ce2eecae..738f20d8b 100644 --- a/debian/patches/features/all/rt/relay-fix-timer-madness.patch +++ b/debian/patches/features/all/rt/relay-fix-timer-madness.patch @@ -13,10 +13,8 @@ Signed-off-by: Thomas Gleixner kernel/relay.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) -Index: linux-stable/kernel/relay.c -=================================================================== ---- linux-stable.orig/kernel/relay.c -+++ linux-stable/kernel/relay.c +--- a/kernel/relay.c ++++ b/kernel/relay.c @@ -340,6 +340,10 @@ static void wakeup_readers(unsigned long { struct rchan_buf *buf = (struct rchan_buf *)data; diff --git a/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch index bcb79ced9..e39de4d6b 100644 --- a/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch +++ b/debian/patches/features/all/rt/resource-counters-use-localirq-nort.patch @@ -46,10 +46,8 @@ Signed-off-by: Thomas Gleixner kernel/res_counter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/kernel/res_counter.c -=================================================================== ---- linux-stable.orig/kernel/res_counter.c -+++ linux-stable/kernel/res_counter.c +--- a/kernel/res_counter.c ++++ b/kernel/res_counter.c @@ -49,7 +49,7 @@ static int __res_counter_charge(struct r r = ret = 0; @@ -68,19 +66,21 @@ Index: linux-stable/kernel/res_counter.c return ret; } -@@ -101,13 +101,13 @@ void res_counter_uncharge_until(struct r - unsigned long flags; +@@ -103,7 +103,7 @@ u64 res_counter_uncharge_until(struct re struct res_counter *c; + u64 ret = 0; - local_irq_save(flags); + local_irq_save_nort(flags); for (c = counter; c != top; c = c->parent) { + u64 r; spin_lock(&c->lock); - res_counter_uncharge_locked(c, val); +@@ -112,7 +112,7 @@ u64 res_counter_uncharge_until(struct re + ret = r; spin_unlock(&c->lock); } - local_irq_restore(flags); + local_irq_restore_nort(flags); + return ret; } - void res_counter_uncharge(struct res_counter *counter, unsigned long val) diff --git a/debian/patches/features/all/rt/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch b/debian/patches/features/all/rt/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch index 973b05537..771294f49 100644 --- a/debian/patches/features/all/rt/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch +++ b/debian/patches/features/all/rt/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch @@ -30,11 +30,9 @@ Signed-off-by: Thomas Gleixner kernel/printk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/kernel/printk.c -=================================================================== ---- linux-stable.orig/kernel/printk.c -+++ linux-stable/kernel/printk.c -@@ -2027,8 +2027,8 @@ void printk_tick(void) +--- a/kernel/printk.c ++++ b/kernel/printk.c +@@ -2030,8 +2030,8 @@ void printk_tick(void) int printk_needs_cpu(int cpu) { diff --git a/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch b/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch index f93d9a25d..76166cbde 100644 --- a/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch +++ b/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch @@ -22,11 +22,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -1172,7 +1172,8 @@ unsigned long wait_task_inactive(struct +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1041,7 +1041,8 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -36,7 +34,7 @@ Index: linux-stable/kernel/sched/core.c return 0; cpu_relax(); } -@@ -1187,7 +1188,8 @@ unsigned long wait_task_inactive(struct +@@ -1056,7 +1057,8 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); on_rq = p->on_rq; ncsw = 0; diff --git a/debian/patches/features/all/rt/rt-add-rt-locks.patch b/debian/patches/features/all/rt/rt-add-rt-locks.patch index fdbc41280..1cad15eea 100644 --- a/debian/patches/features/all/rt/rt-add-rt-locks.patch +++ b/debian/patches/features/all/rt/rt-add-rt-locks.patch @@ -18,10 +18,8 @@ Signed-off-by: Thomas Gleixner lib/spinlock_debug.c | 5 8 files changed, 756 insertions(+), 4 deletions(-) -Index: linux-stable/include/linux/rwlock_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/rwlock_rt.h ++++ b/include/linux/rwlock_rt.h @@ -0,0 +1,123 @@ +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H @@ -146,10 +144,8 @@ Index: linux-stable/include/linux/rwlock_rt.h + } while (0) + +#endif -Index: linux-stable/include/linux/spinlock.h -=================================================================== ---- linux-stable.orig/include/linux/spinlock.h -+++ linux-stable/include/linux/spinlock.h +--- a/include/linux/spinlock.h ++++ b/include/linux/spinlock.h @@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(ra #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) @@ -181,10 +177,8 @@ Index: linux-stable/include/linux/spinlock.h +#endif /* !PREEMPT_RT_FULL */ + #endif /* __LINUX_SPINLOCK_H */ -Index: linux-stable/include/linux/spinlock_api_smp.h -=================================================================== ---- linux-stable.orig/include/linux/spinlock_api_smp.h -+++ linux-stable/include/linux/spinlock_api_smp.h +--- a/include/linux/spinlock_api_smp.h ++++ b/include/linux/spinlock_api_smp.h @@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh( return 0; } @@ -195,10 +189,8 @@ Index: linux-stable/include/linux/spinlock_api_smp.h +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ -Index: linux-stable/include/linux/spinlock_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/spinlock_rt.h ++++ b/include/linux/spinlock_rt.h @@ -0,0 +1,158 @@ +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H @@ -358,10 +350,8 @@ Index: linux-stable/include/linux/spinlock_rt.h + atomic_dec_and_spin_lock(atomic, lock) + +#endif -Index: linux-stable/kernel/Makefile -=================================================================== ---- linux-stable.orig/kernel/Makefile -+++ linux-stable/kernel/Makefile +--- a/kernel/Makefile ++++ b/kernel/Makefile @@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ @@ -371,7 +361,7 @@ Index: linux-stable/kernel/Makefile + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ + hrtimer.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o \ - async.o range.o groups.o lglock.o + async.o range.o groups.o lglock.o smpboot.o @@ -32,7 +32,11 @@ obj-$(CONFIG_FREEZER) += freezer.o obj-$(CONFIG_PROFILING) += profile.o @@ -392,11 +382,9 @@ Index: linux-stable/kernel/Makefile +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o - obj-$(CONFIG_SMP) += smpboot.o -Index: linux-stable/kernel/rt.c -=================================================================== + ifneq ($(CONFIG_SMP),y) --- /dev/null -+++ linux-stable/kernel/rt.c ++++ b/kernel/rt.c @@ -0,0 +1,442 @@ +/* + * kernel/rt.c @@ -802,7 +790,7 @@ Index: linux-stable/kernel/rt.c +} +EXPORT_SYMBOL(rt_down_read_nested); + -+void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, ++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -840,10 +828,8 @@ Index: linux-stable/kernel/rt.c + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); -Index: linux-stable/kernel/spinlock.c -=================================================================== ---- linux-stable.orig/kernel/spinlock.c -+++ linux-stable/kernel/spinlock.c +--- a/kernel/spinlock.c ++++ b/kernel/spinlock.c @@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(loc * __[spin|read|write]_lock_bh() */ @@ -874,10 +860,8 @@ Index: linux-stable/kernel/spinlock.c #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) -Index: linux-stable/lib/spinlock_debug.c -=================================================================== ---- linux-stable.orig/lib/spinlock_debug.c -+++ linux-stable/lib/spinlock_debug.c +--- a/lib/spinlock_debug.c ++++ b/lib/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t EXPORT_SYMBOL(__raw_spin_lock_init); @@ -894,7 +878,7 @@ Index: linux-stable/lib/spinlock_debug.c static void spin_dump(raw_spinlock_t *lock, const char *msg) { -@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t * +@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t * arch_spin_unlock(&lock->raw_lock); } @@ -902,7 +886,7 @@ Index: linux-stable/lib/spinlock_debug.c static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) -@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock) +@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } diff --git a/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch index 25de9535c..40ea9a5ae 100644 --- a/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch +++ b/debian/patches/features/all/rt/rt-add-rt-spinlock-to-headers.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner include/linux/spinlock_types_rt.h | 49 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 3 deletions(-) -Index: linux-stable/include/linux/rwlock_types_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/rwlock_types_rt.h ++++ b/include/linux/rwlock_types_rt.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_RWLOCK_TYPES_RT_H +#define __LINUX_RWLOCK_TYPES_RT_H @@ -47,10 +45,8 @@ Index: linux-stable/include/linux/rwlock_types_rt.h + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + +#endif -Index: linux-stable/include/linux/spinlock_types.h -=================================================================== ---- linux-stable.orig/include/linux/spinlock_types.h -+++ linux-stable/include/linux/spinlock_types.h +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h @@ -11,8 +11,13 @@ #include @@ -68,10 +64,8 @@ Index: linux-stable/include/linux/spinlock_types.h +#endif #endif /* __LINUX_SPINLOCK_TYPES_H */ -Index: linux-stable/include/linux/spinlock_types_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/spinlock_types_rt.h ++++ b/include/linux/spinlock_types_rt.h @@ -0,0 +1,49 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RT_H +#define __LINUX_SPINLOCK_TYPES_RT_H diff --git a/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch index 237094975..0d843f0c3 100644 --- a/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch +++ b/debian/patches/features/all/rt/rt-add-rt-to-mutex-headers.patch @@ -8,10 +8,8 @@ Signed-off-by: Thomas Gleixner include/linux/mutex_rt.h | 84 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 7 deletions(-) -Index: linux-stable/include/linux/mutex.h -=================================================================== ---- linux-stable.orig/include/linux/mutex.h -+++ linux-stable/include/linux/mutex.h +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h @@ -17,6 +17,17 @@ #include @@ -54,10 +52,8 @@ Index: linux-stable/include/linux/mutex.h extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX -Index: linux-stable/include/linux/mutex_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/mutex_rt.h ++++ b/include/linux/mutex_rt.h @@ -0,0 +1,84 @@ +#ifndef __LINUX_MUTEX_RT_H +#define __LINUX_MUTEX_RT_H diff --git a/debian/patches/features/all/rt/rt-disable-rt-group-sched.patch b/debian/patches/features/all/rt/rt-disable-rt-group-sched.patch deleted file mode 100644 index 418dd9dcf..000000000 --- a/debian/patches/features/all/rt/rt-disable-rt-group-sched.patch +++ /dev/null @@ -1,29 +0,0 @@ -From f9e7eb3419db82245b3396074c137b687b42df06 Mon Sep 17 00:00:00 2001 -From: Carsten Emde -Date: Wed, 11 Jul 2012 22:05:18 +0000 -Subject: Disable RT_GROUP_SCHED in PREEMPT_RT_FULL - -Strange CPU stalls have been observed in RT when RT_GROUP_SCHED -was configured. - -Disable it for now. - -Signed-off-by: Carsten Emde -Signed-off-by: Thomas Gleixner - ---- - init/Kconfig | 1 + - 1 file changed, 1 insertion(+) - -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -806,6 +806,7 @@ config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on EXPERIMENTAL - depends on CGROUP_SCHED -+ depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth diff --git a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch index 1fa281808..02864a0e9 100644 --- a/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch +++ b/debian/patches/features/all/rt/rt-introduce-cpu-chill.patch @@ -13,10 +13,8 @@ Cc: stable-rt@vger.kernel.org include/linux/delay.h | 6 ++++++ 1 file changed, 6 insertions(+) -Index: linux-stable/include/linux/delay.h -=================================================================== ---- linux-stable.orig/include/linux/delay.h -+++ linux-stable/include/linux/delay.h +--- a/include/linux/delay.h ++++ b/include/linux/delay.h @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s msleep(seconds * 1000); } diff --git a/debian/patches/features/all/rt/rt-local-irq-lock.patch b/debian/patches/features/all/rt/rt-local-irq-lock.patch index b33ccec61..d7e80937b 100644 --- a/debian/patches/features/all/rt/rt-local-irq-lock.patch +++ b/debian/patches/features/all/rt/rt-local-irq-lock.patch @@ -4,14 +4,12 @@ Date: Mon, 20 Jun 2011 09:03:47 +0200 Signed-off-by: Thomas Gleixner --- - include/linux/locallock.h | 230 ++++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 230 insertions(+) + include/linux/locallock.h | 238 ++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 238 insertions(+) -Index: linux-stable/include/linux/locallock.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/locallock.h -@@ -0,0 +1,230 @@ ++++ b/include/linux/locallock.h +@@ -0,0 +1,238 @@ +#ifndef _LINUX_LOCALLOCK_H +#define _LINUX_LOCALLOCK_H + @@ -110,6 +108,9 @@ Index: linux-stable/include/linux/locallock.h +#define local_lock_irq(lvar) \ + do { __local_lock_irq(&get_local_var(lvar)); } while (0) + ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ +static inline void __local_unlock_irq(struct local_irq_lock *lv) +{ + LL_WARN(!lv->nestcnt); @@ -125,6 +126,11 @@ Index: linux-stable/include/linux/locallock.h + put_local_var(lvar); \ + } while (0) + ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ +static inline int __local_lock_irqsave(struct local_irq_lock *lv) +{ + if (lv->owner != current) { diff --git a/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch index 566f52903..a3783cf8b 100644 --- a/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch +++ b/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner kernel/rtmutex_common.h | 9 + 4 files changed, 404 insertions(+), 27 deletions(-) -Index: linux-stable/include/linux/rtmutex.h -=================================================================== ---- linux-stable.orig/include/linux/rtmutex.h -+++ linux-stable/include/linux/rtmutex.h +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h @@ -29,9 +29,10 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct plist_head wait_list; @@ -72,11 +70,9 @@ Index: linux-stable/include/linux/rtmutex.h #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -Index: linux-stable/kernel/futex.c -=================================================================== ---- linux-stable.orig/kernel/futex.c -+++ linux-stable/kernel/futex.c -@@ -2302,8 +2302,7 @@ static int futex_wait_requeue_pi(u32 __u +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -2323,8 +2323,7 @@ static int futex_wait_requeue_pi(u32 __u * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ @@ -86,10 +82,8 @@ Index: linux-stable/kernel/futex.c ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) -Index: linux-stable/kernel/rtmutex.c -=================================================================== ---- linux-stable.orig/kernel/rtmutex.c -+++ linux-stable/kernel/rtmutex.c +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c @@ -8,6 +8,12 @@ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen @@ -116,7 +110,7 @@ Index: linux-stable/kernel/rtmutex.c /* * Calculate task priority from the waiter list priority * -@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct +@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct raw_spin_unlock_irqrestore(&task->pi_lock, flags); } @@ -600,10 +594,8 @@ Index: linux-stable/kernel/rtmutex.c debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); rt_mutex_deadlock_account_lock(lock, proxy_owner); -Index: linux-stable/kernel/rtmutex_common.h -=================================================================== ---- linux-stable.orig/kernel/rtmutex_common.h -+++ linux-stable/kernel/rtmutex_common.h +--- a/kernel/rtmutex_common.h ++++ b/kernel/rtmutex_common.h @@ -49,6 +49,7 @@ struct rt_mutex_waiter { struct plist_node pi_list_entry; struct task_struct *task; diff --git a/debian/patches/features/all/rt/rt-preempt-base-config.patch b/debian/patches/features/all/rt/rt-preempt-base-config.patch index cbff74555..3c6d7e13c 100644 --- a/debian/patches/features/all/rt/rt-preempt-base-config.patch +++ b/debian/patches/features/all/rt/rt-preempt-base-config.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner kernel/Kconfig.preempt | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) -Index: linux-stable/kernel/Kconfig.preempt -=================================================================== ---- linux-stable.orig/kernel/Kconfig.preempt -+++ linux-stable/kernel/Kconfig.preempt +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt @@ -1,3 +1,10 @@ +config PREEMPT + bool diff --git a/debian/patches/features/all/rt/rt-rcutree-warn-fix.patch b/debian/patches/features/all/rt/rt-rcutree-warn-fix.patch deleted file mode 100644 index 8b82a7545..000000000 --- a/debian/patches/features/all/rt/rt-rcutree-warn-fix.patch +++ /dev/null @@ -1,44 +0,0 @@ -Subject: rt/rcutree: Move misplaced prototype -From: Ingo Molnar -Date: Wed Dec 14 12:51:28 CET 2011 - -Fix this warning on x86 defconfig: - - kernel/rcutree.h:433:13: warning: ‘rcu_preempt_qs’ declared ‘static’ but never defined [-Wunused-function] - -The #ifdefs and prototypes here are a maze, move it closer to the -usage site that needs it. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner ---- ---- - kernel/rcutree.c | 2 ++ - kernel/rcutree.h | 1 - - 2 files changed, 2 insertions(+), 1 deletion(-) - -Index: linux-stable/kernel/rcutree.c -=================================================================== ---- linux-stable.orig/kernel/rcutree.c -+++ linux-stable/kernel/rcutree.c -@@ -183,6 +183,8 @@ void rcu_sched_qs(int cpu) - } - - #ifdef CONFIG_PREEMPT_RT_FULL -+static void rcu_preempt_qs(int cpu); -+ - void rcu_bh_qs(int cpu) - { - rcu_preempt_qs(cpu); -Index: linux-stable/kernel/rcutree.h -=================================================================== ---- linux-stable.orig/kernel/rcutree.h -+++ linux-stable/kernel/rcutree.h -@@ -463,7 +463,6 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); - /* Forward declarations for rcutree_plugin.h */ - static void rcu_bootup_announce(void); - long rcu_batches_completed(void); --static void rcu_preempt_qs(int cpu); - static void rcu_preempt_note_context_switch(int cpu); - static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); - #ifdef CONFIG_HOTPLUG_CPU diff --git a/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch index 5e85861de..ba99b8602 100644 --- a/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch +++ b/debian/patches/features/all/rt/rt-rw-lockdep-annotations.patch @@ -8,14 +8,12 @@ lockdep acquire functions accordingly. Signed-off-by: Thomas Gleixner Cc: stable-rt@vger.kernel.org --- - kernel/rt.c | 46 +++++++++++++++++++++++++--------------------- - 1 file changed, 25 insertions(+), 21 deletions(-) + kernel/rt.c | 53 ++++++++++++++++++++++++++++++++--------------------- + 1 file changed, 32 insertions(+), 21 deletions(-) -Index: linux-stable/kernel/rt.c -=================================================================== ---- linux-stable.orig/kernel/rt.c -+++ linux-stable/kernel/rt.c -@@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t +--- a/kernel/rt.c ++++ b/kernel/rt.c +@@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t * write locked. */ migrate_disable(); @@ -83,7 +81,21 @@ Index: linux-stable/kernel/rt.c } EXPORT_SYMBOL(rt_up_read); -@@ -366,15 +369,16 @@ int rt_down_read_trylock(struct rw_sema +@@ -356,6 +359,13 @@ void rt_down_write_nested(struct rw_sem + } + EXPORT_SYMBOL(rt_down_write_nested); + ++void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest) ++{ ++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++ + int rt_down_read_trylock(struct rw_semaphore *rwsem) + { + struct rt_mutex *lock = &rwsem->lock; +@@ -366,15 +376,16 @@ int rt_down_read_trylock(struct rw_sema * but not when read_depth == 0 which means that the rwsem is * write locked. */ @@ -105,7 +117,7 @@ Index: linux-stable/kernel/rt.c return ret; } EXPORT_SYMBOL(rt_down_read_trylock); -@@ -383,10 +387,10 @@ static void __rt_down_read(struct rw_sem +@@ -383,10 +394,10 @@ static void __rt_down_read(struct rw_sem { struct rt_mutex *lock = &rwsem->lock; diff --git a/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch index a5ca035bd..b7c97df84 100644 --- a/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch +++ b/debian/patches/features/all/rt/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch @@ -12,11 +12,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3398,16 +3398,12 @@ static inline void update_migrate_disabl +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2876,16 +2876,12 @@ static inline void update_migrate_disabl */ mask = tsk_cpus_allowed(p); diff --git a/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch index 405477b48..8a5901224 100644 --- a/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch +++ b/debian/patches/features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch @@ -12,11 +12,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3410,7 +3410,7 @@ void migrate_disable(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2888,7 +2888,7 @@ void migrate_disable(void) { struct task_struct *p = current; @@ -25,7 +23,7 @@ Index: linux-stable/kernel/sched/core.c #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic++; #endif -@@ -3441,7 +3441,7 @@ void migrate_enable(void) +@@ -2919,7 +2919,7 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; @@ -34,7 +32,7 @@ Index: linux-stable/kernel/sched/core.c #ifdef CONFIG_SCHED_DEBUG p->migrate_disable_atomic--; #endif -@@ -3462,26 +3462,21 @@ void migrate_enable(void) +@@ -2940,26 +2940,21 @@ void migrate_enable(void) if (unlikely(migrate_disabled_updated(p))) { /* diff --git a/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch index 49e1e4f88..7ccbdeaf5 100644 --- a/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch +++ b/debian/patches/features/all/rt/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch @@ -22,11 +22,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 251 +++++++++++++++++++++++++++------------------------- 1 file changed, 132 insertions(+), 119 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3370,6 +3370,135 @@ static inline void schedule_debug(struct +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2848,6 +2848,135 @@ static inline void schedule_debug(struct schedstat_inc(this_rq(), sched_count); } @@ -162,7 +160,7 @@ Index: linux-stable/kernel/sched/core.c static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->on_rq || rq->skip_clock_update < 0) -@@ -3429,6 +3558,8 @@ need_resched: +@@ -2941,6 +3070,8 @@ need_resched: raw_spin_lock_irq(&rq->lock); @@ -171,7 +169,7 @@ Index: linux-stable/kernel/sched/core.c switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { -@@ -5203,7 +5334,7 @@ void __cpuinit init_idle(struct task_str +@@ -4734,7 +4865,7 @@ void __cpuinit init_idle(struct task_str #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -180,7 +178,7 @@ Index: linux-stable/kernel/sched/core.c if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); p->nr_cpus_allowed = cpumask_weight(new_mask); -@@ -5278,124 +5409,6 @@ out: +@@ -4809,124 +4940,6 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); diff --git a/debian/patches/features/all/rt/rt-serial-warn-fix.patch b/debian/patches/features/all/rt/rt-serial-warn-fix.patch index 1dec8a3dc..ac091b244 100644 --- a/debian/patches/features/all/rt/rt-serial-warn-fix.patch +++ b/debian/patches/features/all/rt/rt-serial-warn-fix.patch @@ -12,13 +12,11 @@ give it a chance to continue in some really ugly situation. Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- - drivers/tty/serial/8250/8250.c | 13 ++++++++++--- - 1 file changed, 10 insertions(+), 3 deletions(-) + drivers/tty/serial/8250/8250.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) -Index: linux-stable/drivers/tty/serial/8250/8250.c -=================================================================== ---- linux-stable.orig/drivers/tty/serial/8250/8250.c -+++ linux-stable/drivers/tty/serial/8250/8250.c +--- a/drivers/tty/serial/8250/8250.c ++++ b/drivers/tty/serial/8250/8250.c @@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* f #define DEBUG_INTR(fmt...) do { } while (0) #endif @@ -37,18 +35,3 @@ Index: linux-stable/drivers/tty/serial/8250/8250.c #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) -@@ -1549,14 +1558,12 @@ static irqreturn_t serial8250_interrupt( - - l = l->next; - --#ifndef CONFIG_PREEMPT_RT_FULL - if (l == i->head && pass_counter++ > PASS_LIMIT) { - /* If we hit this, we're dead. */ - printk_ratelimited(KERN_ERR - "serial8250: too much work for irq%d\n", irq); - break; - } --#endif - } while (l != end); - - spin_unlock(&i->lock); diff --git a/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch index 4d0434cab..c27cc66a7 100644 --- a/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch +++ b/debian/patches/features/all/rt/rt-tracing-show-padding-as-unsigned-short.patch @@ -33,10 +33,8 @@ Signed-off-by: Thomas Gleixner kernel/trace/trace_events.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/trace/trace_events.c -=================================================================== ---- linux-stable.orig/kernel/trace/trace_events.c -+++ linux-stable/kernel/trace/trace_events.c +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c @@ -117,7 +117,7 @@ static int trace_define_common_fields(vo __common_field(unsigned char, preempt_count); __common_field(int, pid); diff --git a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch index 9f141dcb6..f0bbc6000 100644 --- a/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch +++ b/debian/patches/features/all/rt/rtmutex-avoid-include-hell.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/rtmutex.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/include/linux/rtmutex.h -=================================================================== ---- linux-stable.orig/include/linux/rtmutex.h -+++ linux-stable/include/linux/rtmutex.h +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h @@ -14,7 +14,7 @@ #include diff --git a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch index 2c0a84da8..d474b54d4 100644 --- a/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch +++ b/debian/patches/features/all/rt/rtmutex-futex-prepare-rt.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner kernel/rtmutex_common.h | 2 + 3 files changed, 91 insertions(+), 19 deletions(-) -Index: linux-stable/kernel/futex.c -=================================================================== ---- linux-stable.orig/kernel/futex.c -+++ linux-stable/kernel/futex.c -@@ -1423,6 +1423,16 @@ retry_private: +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1442,6 +1442,16 @@ retry_private: requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; @@ -30,7 +28,7 @@ Index: linux-stable/kernel/futex.c } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; -@@ -2267,7 +2277,7 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2286,7 +2296,7 @@ static int futex_wait_requeue_pi(u32 __u struct hrtimer_sleeper timeout, *to = NULL; struct rt_mutex_waiter rt_waiter; struct rt_mutex *pi_mutex = NULL; @@ -39,7 +37,7 @@ Index: linux-stable/kernel/futex.c union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; -@@ -2314,20 +2324,55 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2333,20 +2343,55 @@ static int futex_wait_requeue_pi(u32 __u /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); @@ -106,7 +104,7 @@ Index: linux-stable/kernel/futex.c /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { -@@ -2336,9 +2381,10 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2355,9 +2400,10 @@ static int futex_wait_requeue_pi(u32 __u * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { @@ -119,7 +117,7 @@ Index: linux-stable/kernel/futex.c } } else { /* -@@ -2351,7 +2397,8 @@ static int futex_wait_requeue_pi(u32 __u +@@ -2370,7 +2416,8 @@ static int futex_wait_requeue_pi(u32 __u ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); @@ -129,10 +127,8 @@ Index: linux-stable/kernel/futex.c /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. -Index: linux-stable/kernel/rtmutex.c -=================================================================== ---- linux-stable.orig/kernel/rtmutex.c -+++ linux-stable/kernel/rtmutex.c +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c @@ -67,6 +67,11 @@ static void fixup_rt_mutex_waiters(struc clear_rt_mutex_waiters(lock); } @@ -206,10 +202,8 @@ Index: linux-stable/kernel/rtmutex.c raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } -Index: linux-stable/kernel/rtmutex_common.h -=================================================================== ---- linux-stable.orig/kernel/rtmutex_common.h -+++ linux-stable/kernel/rtmutex_common.h +--- a/kernel/rtmutex_common.h ++++ b/kernel/rtmutex_common.h @@ -103,6 +103,8 @@ static inline struct task_struct *rt_mut /* * PI-futex support (proxy locking functions, etc.): diff --git a/debian/patches/features/all/rt/rtmutex-lock-killable.patch b/debian/patches/features/all/rt/rtmutex-lock-killable.patch index 828da23d7..d068299df 100644 --- a/debian/patches/features/all/rt/rtmutex-lock-killable.patch +++ b/debian/patches/features/all/rt/rtmutex-lock-killable.patch @@ -8,10 +8,8 @@ Signed-off-by: Thomas Gleixner kernel/rtmutex.c | 33 +++++++++++++++++++++++++++------ 2 files changed, 28 insertions(+), 6 deletions(-) -Index: linux-stable/include/linux/rtmutex.h -=================================================================== ---- linux-stable.orig/include/linux/rtmutex.h -+++ linux-stable/include/linux/rtmutex.h +--- a/include/linux/rtmutex.h ++++ b/include/linux/rtmutex.h @@ -90,6 +90,7 @@ extern void rt_mutex_destroy(struct rt_m extern void rt_mutex_lock(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, @@ -20,11 +18,9 @@ Index: linux-stable/include/linux/rtmutex.h extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); -Index: linux-stable/kernel/rtmutex.c -=================================================================== ---- linux-stable.orig/kernel/rtmutex.c -+++ linux-stable/kernel/rtmutex.c -@@ -791,12 +791,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c +@@ -815,12 +815,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible * @@ -40,7 +36,7 @@ Index: linux-stable/kernel/rtmutex.c * -EDEADLK when the lock would deadlock (when deadlock detection is on) */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, -@@ -810,17 +810,38 @@ int __sched rt_mutex_lock_interruptible( +@@ -834,17 +834,38 @@ int __sched rt_mutex_lock_interruptible( EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /** diff --git a/debian/patches/features/all/rt/rwsem-add-rt-variant.patch b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch index e192641af..79c0dfb99 100644 --- a/debian/patches/features/all/rt/rwsem-add-rt-variant.patch +++ b/debian/patches/features/all/rt/rwsem-add-rt-variant.patch @@ -5,14 +5,12 @@ Date: Wed, 29 Jun 2011 21:02:53 +0200 Signed-off-by: Thomas Gleixner --- include/linux/rwsem.h | 6 ++ - include/linux/rwsem_rt.h | 105 +++++++++++++++++++++++++++++++++++++++++++++++ + include/linux/rwsem_rt.h | 128 +++++++++++++++++++++++++++++++++++++++++++++++ lib/Makefile | 3 + - 3 files changed, 114 insertions(+) + 3 files changed, 137 insertions(+) -Index: linux-stable/include/linux/rwsem.h -=================================================================== ---- linux-stable.orig/include/linux/rwsem.h -+++ linux-stable/include/linux/rwsem.h +--- a/include/linux/rwsem.h ++++ b/include/linux/rwsem.h @@ -16,6 +16,10 @@ #include @@ -24,18 +22,16 @@ Index: linux-stable/include/linux/rwsem.h struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -@@ -130,4 +134,6 @@ extern void down_write_nested(struct rw_ +@@ -139,4 +143,6 @@ do { \ # define down_write_nested(sem, subclass) down_write(sem) #endif +#endif /* !PREEMPT_RT_FULL */ + #endif /* _LINUX_RWSEM_H */ -Index: linux-stable/include/linux/rwsem_rt.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/rwsem_rt.h -@@ -0,0 +1,105 @@ ++++ b/include/linux/rwsem_rt.h +@@ -0,0 +1,128 @@ +#ifndef _LINUX_RWSEM_RT_H +#define _LINUX_RWSEM_RT_H + @@ -71,20 +67,29 @@ Index: linux-stable/include/linux/rwsem_rt.h +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + -+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name, ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + ++#define __rt_init_rwsem(sem, name, key) \ ++ do { \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), (name), (key));\ ++ } while (0) ++ ++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) ++ +# define rt_init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ -+ rt_mutex_init(&(sem)->lock); \ -+ __rt_rwsem_init((sem), #sem, &__key); \ ++ __rt_init_rwsem((sem), #sem, &__key); \ +} while (0) + +extern void rt_down_write(struct rw_semaphore *rwsem); +extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); +extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest); +extern void rt_down_read(struct rw_semaphore *rwsem); +extern int rt_down_write_trylock(struct rw_semaphore *rwsem); +extern int rt_down_read_trylock(struct rw_semaphore *rwsem); @@ -139,13 +144,25 @@ Index: linux-stable/include/linux/rwsem_rt.h +{ + rt_down_write_nested(sem, subclass); +} ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, &nest_lock->dep_map); ++} + ++#else ++ ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, NULL); ++} +#endif -Index: linux-stable/lib/Makefile -=================================================================== ---- linux-stable.orig/lib/Makefile -+++ linux-stable/lib/Makefile -@@ -38,8 +38,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o ++#endif +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -38,8 +38,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o @@ -154,6 +171,6 @@ Index: linux-stable/lib/Makefile lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +endif + lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) - obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o diff --git a/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch b/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch new file mode 100644 index 000000000..8b7d6d541 --- /dev/null +++ b/debian/patches/features/all/rt/sched-adjust-reset-on-fork-always.patch @@ -0,0 +1,31 @@ +Subject: sched: Adjust sched_reset_on_fork when nothing else changes +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 14:58:00 +0100 + +If the policy and priority remain unchanged a possible modification of +sched_reset_on_fork gets lost in the early exit path. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4117,10 +4117,13 @@ recheck: + } + + /* +- * If not changing anything there's no need to proceed further: ++ * If not changing anything there's no need to proceed ++ * further, but store a possible modification of ++ * reset_on_fork. + */ + if (unlikely(policy == p->policy && (!rt_policy(policy) || + param->sched_priority == p->rt_priority))) { ++ p->sched_reset_on_fork = reset_on_fork; + task_rq_unlock(rq, p, &flags); + return 0; + } diff --git a/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch b/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch index 471a05c0e..d563e26b1 100644 --- a/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch +++ b/debian/patches/features/all/rt/sched-better-debug-output-for-might-sleep.patch @@ -7,30 +7,25 @@ have no idea what disabled preemption. Add some debug infrastructure. Signed-off-by: Thomas Gleixner --- - include/linux/sched.h | 4 ++++ + include/linux/sched.h | 3 +++ kernel/sched/core.c | 23 +++++++++++++++++++++-- - 2 files changed, 25 insertions(+), 2 deletions(-) + 2 files changed, 24 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1624,6 +1624,10 @@ struct task_struct { - int kmap_idx; +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1642,6 +1642,9 @@ struct task_struct { pte_t kmap_pte[KM_TYPE_NR]; + # endif #endif -+ +#ifdef CONFIG_DEBUG_PREEMPT + unsigned long preempt_disable_ip; +#endif }; - #ifdef CONFIG_PREEMPT_RT_FULL -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3311,8 +3311,13 @@ void __kprobes add_preempt_count(int val + #ifdef CONFIG_NUMA_BALANCING +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2789,8 +2789,13 @@ void __kprobes add_preempt_count(int val DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK - 10); #endif @@ -46,7 +41,7 @@ Index: linux-stable/kernel/sched/core.c } EXPORT_SYMBOL(add_preempt_count); -@@ -3355,6 +3360,13 @@ static noinline void __schedule_bug(stru +@@ -2833,6 +2838,13 @@ static noinline void __schedule_bug(stru print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); @@ -60,7 +55,7 @@ Index: linux-stable/kernel/sched/core.c dump_stack(); add_taint(TAINT_WARN); } -@@ -7700,6 +7712,13 @@ void __might_sleep(const char *file, int +@@ -7308,6 +7320,13 @@ void __might_sleep(const char *file, int debug_show_held_locks(current); if (irqs_disabled()) print_irqtrace_events(current); diff --git a/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch index 82f7bf2f6..ebcd586fe 100644 --- a/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch +++ b/debian/patches/features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 6 ++++++ 1 file changed, 6 insertions(+) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -1323,6 +1323,12 @@ out: +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1192,6 +1192,12 @@ out: } } diff --git a/debian/patches/features/all/rt/sched-cond-resched.patch b/debian/patches/features/all/rt/sched-cond-resched.patch index 01b1f8463..fb436a67e 100644 --- a/debian/patches/features/all/rt/sched-cond-resched.patch +++ b/debian/patches/features/all/rt/sched-cond-resched.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -4781,9 +4781,17 @@ static inline int should_resched(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4315,9 +4315,17 @@ static inline int should_resched(void) static void __cond_resched(void) { diff --git a/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch b/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch new file mode 100644 index 000000000..064cc441c --- /dev/null +++ b/debian/patches/features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch @@ -0,0 +1,161 @@ +Subject: sched: Consider pi boosting in setscheduler +From: Thomas Gleixner +Date: Thu, 20 Dec 2012 15:13:49 +0100 + +If a PI boosted task policy/priority is modified by a setscheduler() +call we unconditionally dequeue and requeue the task if it is on the +runqueue even if the new priority is lower than the current effective +boosted priority. This can result in undesired reordering of the +priority bucket list. + +If the new priority is less or equal than the current effective we +just store the new parameters in the task struct and leave the +scheduler class and the runqueue untouched. This is handled when the +task deboosts itself. Only if the new priority is higher than the +effective boosted priority we apply the change immediately. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + include/linux/sched.h | 5 +++++ + kernel/rtmutex.c | 12 ++++++++++++ + kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++--------- + 3 files changed, 48 insertions(+), 9 deletions(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2175,6 +2175,7 @@ extern unsigned int sysctl_sched_cfs_ban + #ifdef CONFIG_RT_MUTEXES + extern int rt_mutex_getprio(struct task_struct *p); + extern void rt_mutex_setprio(struct task_struct *p, int prio); ++extern int rt_mutex_check_prio(struct task_struct *task, int newprio); + extern void rt_mutex_adjust_pi(struct task_struct *p); + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) + { +@@ -2185,6 +2186,10 @@ static inline int rt_mutex_getprio(struc + { + return p->normal_prio; + } ++static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ return 0; ++} + # define rt_mutex_adjust_pi(p) do { } while (0) + static inline bool tsk_is_pi_blocked(struct task_struct *tsk) + { +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c +@@ -124,6 +124,18 @@ int rt_mutex_getprio(struct task_struct + } + + /* ++ * Called by sched_setscheduler() to check whether the priority change ++ * is overruled by a possible priority boosting. ++ */ ++int rt_mutex_check_prio(struct task_struct *task, int newprio) ++{ ++ if (!task_has_pi_waiters(task)) ++ return 0; ++ ++ return task_top_pi_waiter(task)->pi_list_entry.prio <= newprio; ++} ++ ++/* + * Adjust the priority of a task, after its pi_waiters got modified. + * + * This can be both boosting and unboosting. task->pi_lock must be held. +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3764,7 +3764,8 @@ EXPORT_SYMBOL(sleep_on_timeout); + * This function changes the 'effective' priority of a task. It does + * not touch ->normal_prio like __setscheduler(). + * +- * Used by the rt_mutex code to implement priority inheritance logic. ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. + */ + void rt_mutex_setprio(struct task_struct *p, int prio) + { +@@ -3987,20 +3988,25 @@ static struct task_struct *find_process_ + return pid ? find_task_by_vpid(pid) : current; + } + +-/* Actually do priority change: must hold rq lock. */ +-static void +-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ++static void __setscheduler_params(struct task_struct *p, int policy, int prio) + { + p->policy = policy; + p->rt_priority = prio; + p->normal_prio = normal_prio(p); ++ set_load_weight(p); ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void ++__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) ++{ ++ __setscheduler_params(p, policy, prio); + /* we are holding p->pi_lock already */ + p->prio = rt_mutex_getprio(p); + if (rt_prio(p->prio)) + p->sched_class = &rt_sched_class; + else + p->sched_class = &fair_sched_class; +- set_load_weight(p); + } + + /* +@@ -4022,6 +4028,7 @@ static bool check_same_owner(struct task + static int __sched_setscheduler(struct task_struct *p, int policy, + const struct sched_param *param, bool user) + { ++ int newprio = MAX_RT_PRIO - 1 - param->sched_priority; + int retval, oldprio, oldpolicy = -1, on_rq, running; + unsigned long flags; + const struct sched_class *prev_class; +@@ -4149,6 +4156,25 @@ recheck: + task_rq_unlock(rq, p, &flags); + goto recheck; + } ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ oldprio = p->prio; ++ ++ /* ++ * Special case for priority boosted tasks. ++ * ++ * If the new priority is lower or equal (user space view) ++ * than the current (boosted) priority, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ if (rt_mutex_check_prio(p, newprio)) { ++ __setscheduler_params(p, policy, param->sched_priority); ++ task_rq_unlock(rq, p, &flags); ++ return 0; ++ } ++ + on_rq = p->on_rq; + running = task_current(rq, p); + if (on_rq) +@@ -4156,9 +4182,6 @@ recheck: + if (running) + p->sched_class->put_prev_task(rq, p); + +- p->sched_reset_on_fork = reset_on_fork; +- +- oldprio = p->prio; + prev_class = p->sched_class; + __setscheduler(rq, p, policy, param->sched_priority); + +@@ -4171,7 +4194,6 @@ recheck: + */ + enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); + } +- + check_class_changed(rq, p, prev_class, oldprio); + task_rq_unlock(rq, p, &flags); + diff --git a/debian/patches/features/all/rt/sched-delay-put-task.patch b/debian/patches/features/all/rt/sched-delay-put-task.patch index 96ebac0c7..38ffa569f 100644 --- a/debian/patches/features/all/rt/sched-delay-put-task.patch +++ b/debian/patches/features/all/rt/sched-delay-put-task.patch @@ -5,14 +5,12 @@ Date: Tue, 31 May 2011 16:59:16 +0200 Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 13 +++++++++++++ - kernel/fork.c | 11 +++++++++++ - 2 files changed, 24 insertions(+) + kernel/fork.c | 15 ++++++++++++++- + 2 files changed, 27 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1607,6 +1607,9 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1623,6 +1623,9 @@ struct task_struct { #ifdef CONFIG_UPROBES struct uprobe_task *utask; #endif @@ -22,7 +20,7 @@ Index: linux-stable/include/linux/sched.h }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ -@@ -1791,6 +1794,15 @@ extern struct pid *cad_pid; +@@ -1813,6 +1816,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) @@ -38,19 +36,28 @@ Index: linux-stable/include/linux/sched.h extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) -@@ -1798,6 +1810,7 @@ static inline void put_task_struct(struc +@@ -1820,6 +1832,7 @@ static inline void put_task_struct(struc if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } +#endif - extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); - extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -244,7 +244,18 @@ void __put_task_struct(struct task_struc + extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); + extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -229,7 +229,9 @@ static inline void put_signal_struct(str + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); +@@ -244,7 +246,18 @@ void __put_task_struct(struct task_struc if (!profile_handoff_task(tsk)) free_task(tsk); } @@ -59,7 +66,7 @@ Index: linux-stable/kernel/fork.c +#else +void __put_task_struct_cb(struct rcu_head *rhp) +{ -+ struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); + + __put_task_struct(tsk); + diff --git a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch index da5e0933e..8fd377e58 100644 --- a/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch +++ b/debian/patches/features/all/rt/sched-disable-rt-group-sched-on-rt.patch @@ -16,11 +16,9 @@ Signed-off-by: Thomas Gleixner init/Kconfig | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/init/Kconfig -=================================================================== ---- linux-stable.orig/init/Kconfig -+++ linux-stable/init/Kconfig -@@ -806,6 +806,7 @@ config RT_GROUP_SCHED +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -946,6 +946,7 @@ config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on EXPERIMENTAL depends on CGROUP_SCHED diff --git a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch index 276b75e62..3f429e397 100644 --- a/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch +++ b/debian/patches/features/all/rt/sched-disable-ttwu-queue.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/features.h | 4 ++++ 1 file changed, 4 insertions(+) -Index: linux-stable/kernel/sched/features.h -=================================================================== ---- linux-stable.orig/kernel/sched/features.h -+++ linux-stable/kernel/sched/features.h -@@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, true) +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -57,11 +57,15 @@ SCHED_FEAT(OWNER_SPIN, true) */ SCHED_FEAT(NONTASK_POWER, true) diff --git a/debian/patches/features/all/rt/sched-enqueue-to-head.patch b/debian/patches/features/all/rt/sched-enqueue-to-head.patch new file mode 100644 index 000000000..8f1941734 --- /dev/null +++ b/debian/patches/features/all/rt/sched-enqueue-to-head.patch @@ -0,0 +1,68 @@ +Subject: sched: Queue RT tasks to head when prio drops +From: Thomas Gleixner +Date: Tue, 04 Dec 2012 08:56:41 +0100 + +The following scenario does not work correctly: + +Runqueue of CPUx contains two runnable and pinned tasks: + T1: SCHED_FIFO, prio 80 + T2: SCHED_FIFO, prio 80 + +T1 is on the cpu and executes the following syscalls (classic priority +ceiling scenario): + + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 90); + ... + sys_sched_setscheduler(pid(T1), SCHED_FIFO, .prio = 80); + ... + +Now T1 gets preempted by T3 (SCHED_FIFO, prio 95). After T3 goes back +to sleep the scheduler picks T2. Surprise! + +The same happens w/o actual preemption when T1 is forced into the +scheduler due to a sporadic NEED_RESCHED event. The scheduler invokes +pick_next_task() which returns T2. So T1 gets preempted and scheduled +out. + +This happens because sched_setscheduler() dequeues T1 from the prio 90 +list and then enqueues it on the tail of the prio 80 list behind T2. +This violates the POSIX spec and surprises user space which relies on +the guarantee that SCHED_FIFO tasks are not scheduled out unless they +give the CPU up voluntarily or are preempted by a higher priority +task. In the latter case the preempted task must get back on the CPU +after the preempting task schedules out again. + +We fixed a similar issue already in commit 60db48c (sched: Queue a +deboosted task to the head of the RT prio queue). The same treatment +is necessary for sched_setscheduler(). So enqueue to head of the prio +bucket list if the priority of the task is lowered. + +It might be possible that existing user space relies on the current +behaviour, but it can be considered highly unlikely due to the corner +case nature of the application scenario. + +Signed-off-by: Thomas Gleixner +Cc: stable@vger.kernel.org +Cc: stable-rt@vger.kernel.org +--- + kernel/sched/core.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4164,8 +4164,13 @@ recheck: + + if (running) + p->sched_class->set_curr_task(rq); +- if (on_rq) +- enqueue_task(rq, p, 0); ++ if (on_rq) { ++ /* ++ * We enqueue to tail when the priority of a task is ++ * increased (user space view). ++ */ ++ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0); ++ } + + check_class_changed(rq, p, prev_class, oldprio); + task_rq_unlock(rq, p, &flags); diff --git a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch index 2eca354ea..334cd3dfe 100644 --- a/debian/patches/features/all/rt/sched-limit-nr-migrate.patch +++ b/debian/patches/features/all/rt/sched-limit-nr-migrate.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 4 ++++ 1 file changed, 4 insertions(+) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -263,7 +263,11 @@ late_initcall(sched_init_debug); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -272,7 +272,11 @@ late_initcall(sched_init_debug); * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ diff --git a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch index 12c5373b6..d746c6691 100644 --- a/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch +++ b/debian/patches/features/all/rt/sched-might-sleep-do-not-account-rcu-depth.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/rcupdate.h -=================================================================== ---- linux-stable.orig/include/linux/rcupdate.h -+++ linux-stable/include/linux/rcupdate.h -@@ -157,6 +157,11 @@ void synchronize_rcu(void); +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -182,6 +182,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) @@ -24,7 +22,7 @@ Index: linux-stable/include/linux/rcupdate.h #else /* #ifdef CONFIG_PREEMPT_RCU */ -@@ -180,6 +185,8 @@ static inline int rcu_preempt_depth(void +@@ -205,6 +210,8 @@ static inline int rcu_preempt_depth(void return 0; } @@ -33,11 +31,9 @@ Index: linux-stable/include/linux/rcupdate.h #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -7450,7 +7450,8 @@ void __init sched_init(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -7058,7 +7058,8 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { diff --git a/debian/patches/features/all/rt/sched-migrate-disable.patch b/debian/patches/features/all/rt/sched-migrate-disable.patch index cdd15f1d5..1960a047f 100644 --- a/debian/patches/features/all/rt/sched-migrate-disable.patch +++ b/debian/patches/features/all/rt/sched-migrate-disable.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner lib/smp_processor_id.c | 6 +-- 4 files changed, 104 insertions(+), 11 deletions(-) -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -108,6 +108,14 @@ do { \ #endif /* CONFIG_PREEMPT_COUNT */ @@ -29,11 +27,9 @@ Index: linux-stable/include/linux/preempt.h #ifdef CONFIG_PREEMPT_RT_FULL # define preempt_disable_rt() preempt_disable() # define preempt_enable_rt() preempt_enable() -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1275,6 +1275,7 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1279,6 +1279,7 @@ struct task_struct { #endif unsigned int policy; @@ -41,17 +37,17 @@ Index: linux-stable/include/linux/sched.h int nr_cpus_allowed; cpumask_t cpus_allowed; -@@ -1614,9 +1615,6 @@ struct task_struct { +@@ -1630,9 +1631,6 @@ struct task_struct { #endif }; -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - - #ifdef CONFIG_PREEMPT_RT_FULL - static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } - #else -@@ -2773,6 +2771,15 @@ static inline void set_task_cpu(struct t + #ifdef CONFIG_NUMA_BALANCING + extern void task_numa_fault(int node, int pages, bool migrated); + extern void set_numabalancing_state(bool enabled); +@@ -2812,6 +2810,15 @@ static inline void set_task_cpu(struct t #endif /* CONFIG_SMP */ @@ -67,11 +63,9 @@ Index: linux-stable/include/linux/sched.h extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -5203,11 +5203,12 @@ void __cpuinit init_idle(struct task_str +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4734,11 +4734,12 @@ void __cpuinit init_idle(struct task_str #ifdef CONFIG_SMP void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -88,7 +82,7 @@ Index: linux-stable/kernel/sched/core.c } /* -@@ -5258,7 +5259,7 @@ int set_cpus_allowed_ptr(struct task_str +@@ -4789,7 +4790,7 @@ int set_cpus_allowed_ptr(struct task_str do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ @@ -97,7 +91,7 @@ Index: linux-stable/kernel/sched/core.c goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -5277,6 +5278,83 @@ out: +@@ -4808,6 +4809,83 @@ out: } EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); @@ -181,10 +175,8 @@ Index: linux-stable/kernel/sched/core.c /* * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() -Index: linux-stable/lib/smp_processor_id.c -=================================================================== ---- linux-stable.orig/lib/smp_processor_id.c -+++ linux-stable/lib/smp_processor_id.c +--- a/lib/smp_processor_id.c ++++ b/lib/smp_processor_id.c @@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor if (!printk_ratelimit()) goto out_enable; diff --git a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch index a007da918..d7a4f03c7 100644 --- a/debian/patches/features/all/rt/sched-mmdrop-delayed.patch +++ b/debian/patches/features/all/rt/sched-mmdrop-delayed.patch @@ -9,15 +9,13 @@ Signed-off-by: Thomas Gleixner --- include/linux/mm_types.h | 4 ++++ include/linux/sched.h | 12 ++++++++++++ - kernel/fork.c | 15 ++++++++++++++- + kernel/fork.c | 13 +++++++++++++ kernel/sched/core.c | 19 +++++++++++++++++-- - 4 files changed, 47 insertions(+), 3 deletions(-) + 4 files changed, 46 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/mm_types.h -=================================================================== ---- linux-stable.orig/include/linux/mm_types.h -+++ linux-stable/include/linux/mm_types.h -@@ -12,6 +12,7 @@ +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -11,6 +11,7 @@ #include #include #include @@ -25,8 +23,8 @@ Index: linux-stable/include/linux/mm_types.h #include #include #include -@@ -409,6 +410,9 @@ struct mm_struct { - struct cpumask cpumask_allocation; +@@ -440,6 +441,9 @@ struct mm_struct { + int first_nid; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE @@ -34,12 +32,10 @@ Index: linux-stable/include/linux/mm_types.h +#endif }; - static inline void mm_init_cpumask(struct mm_struct *mm) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -2324,12 +2324,24 @@ extern struct mm_struct * mm_alloc(void) + /* first nid will either be a valid NID or one of these values */ +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2354,12 +2354,24 @@ extern struct mm_struct * mm_alloc(void) /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); @@ -64,20 +60,9 @@ Index: linux-stable/include/linux/sched.h /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -249,7 +249,7 @@ EXPORT_SYMBOL_GPL(__put_task_struct); - #else - void __put_task_struct_cb(struct rcu_head *rhp) - { -- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); -+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); - - __put_task_struct(tsk); - -@@ -608,6 +608,19 @@ void __mmdrop(struct mm_struct *mm) +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -616,6 +616,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -97,11 +82,9 @@ Index: linux-stable/kernel/fork.c /* * Decrement the use count and release all resources for an mm. */ -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -1970,8 +1970,12 @@ static void finish_task_switch(struct rq +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1845,8 +1845,12 @@ static void finish_task_switch(struct rq finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); @@ -115,7 +98,7 @@ Index: linux-stable/kernel/sched/core.c if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this -@@ -5302,6 +5306,8 @@ static int migration_cpu_stop(void *data +@@ -4833,6 +4837,8 @@ static int migration_cpu_stop(void *data #ifdef CONFIG_HOTPLUG_CPU @@ -124,7 +107,7 @@ Index: linux-stable/kernel/sched/core.c /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. -@@ -5314,7 +5320,12 @@ void idle_task_exit(void) +@@ -4845,7 +4851,12 @@ void idle_task_exit(void) if (mm != &init_mm) switch_mm(mm, &init_mm, current); @@ -138,7 +121,7 @@ Index: linux-stable/kernel/sched/core.c } /* -@@ -5622,6 +5633,10 @@ migration_call(struct notifier_block *nf +@@ -5162,6 +5173,10 @@ migration_call(struct notifier_block *nf case CPU_DEAD: calc_load_migrate(rq); diff --git a/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch index 76fabf4b0..7634a6037 100644 --- a/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch +++ b/debian/patches/features/all/rt/sched-rt-fix-migrate_enable-thinko.patch @@ -20,11 +20,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/debug.c | 7 +++++++ 2 files changed, 10 insertions(+), 1 deletion(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -5355,12 +5355,14 @@ void migrate_enable(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4886,12 +4886,14 @@ void migrate_enable(void) */ rq = this_rq(); raw_spin_lock_irqsave(&rq->lock, flags); @@ -40,11 +38,9 @@ Index: linux-stable/kernel/sched/core.c if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, mask); p->nr_cpus_allowed = cpumask_weight(mask); -Index: linux-stable/kernel/sched/debug.c -=================================================================== ---- linux-stable.orig/kernel/sched/debug.c -+++ linux-stable/kernel/sched/debug.c -@@ -237,6 +237,9 @@ void print_rt_rq(struct seq_file *m, int +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -253,6 +253,9 @@ void print_rt_rq(struct seq_file *m, int P(rt_throttled); PN(rt_time); PN(rt_runtime); @@ -54,7 +50,7 @@ Index: linux-stable/kernel/sched/debug.c #undef PN #undef P -@@ -491,6 +494,10 @@ void proc_sched_show_task(struct task_st +@@ -507,6 +510,10 @@ void proc_sched_show_task(struct task_st P(se.load.weight); P(policy); P(prio); diff --git a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch index b7ee8e227..c5ced9ed0 100644 --- a/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch +++ b/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1080,6 +1080,7 @@ struct sched_domain; +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1062,6 +1062,7 @@ struct sched_domain; #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x04 /* internal use, task got migrated */ @@ -20,7 +18,7 @@ Index: linux-stable/include/linux/sched.h #define ENQUEUE_WAKEUP 1 #define ENQUEUE_HEAD 2 -@@ -1234,6 +1235,7 @@ enum perf_event_task_context { +@@ -1238,6 +1239,7 @@ enum perf_event_task_context { struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -28,7 +26,7 @@ Index: linux-stable/include/linux/sched.h void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ -@@ -2220,6 +2222,7 @@ extern void xtime_update(unsigned long t +@@ -2250,6 +2252,7 @@ extern void xtime_update(unsigned long t extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -36,11 +34,9 @@ Index: linux-stable/include/linux/sched.h extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -1588,8 +1588,25 @@ try_to_wake_up(struct task_struct *p, un +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1438,8 +1438,25 @@ try_to_wake_up(struct task_struct *p, un smp_wmb(); raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -67,7 +63,7 @@ Index: linux-stable/kernel/sched/core.c success = 1; /* we're going to change ->state */ cpu = task_cpu(p); -@@ -1695,6 +1712,18 @@ int wake_up_process(struct task_struct * +@@ -1533,6 +1550,18 @@ int wake_up_process(struct task_struct * } EXPORT_SYMBOL(wake_up_process); diff --git a/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch index 0b3bd74e6..2871f82bf 100644 --- a/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch +++ b/debian/patches/features/all/rt/sched-teach-migrate_disable-about-atomic-contexts.patch @@ -37,11 +37,9 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org kernel/sched/core.c | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1277,6 +1277,9 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1281,6 +1281,9 @@ struct task_struct { unsigned int policy; #ifdef CONFIG_PREEMPT_RT_FULL int migrate_disable; @@ -51,11 +49,9 @@ Index: linux-stable/include/linux/sched.h #endif int nr_cpus_allowed; cpumask_t cpus_allowed; -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -5286,6 +5286,17 @@ void migrate_disable(void) +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4817,6 +4817,17 @@ void migrate_disable(void) unsigned long flags; struct rq *rq; @@ -73,7 +69,7 @@ Index: linux-stable/kernel/sched/core.c preempt_disable(); if (p->migrate_disable) { p->migrate_disable++; -@@ -5334,6 +5345,16 @@ void migrate_enable(void) +@@ -4865,6 +4876,16 @@ void migrate_enable(void) unsigned long flags; struct rq *rq; diff --git a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch index 2299dd819..a4a536989 100644 --- a/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch +++ b/debian/patches/features/all/rt/sched-ttwu-ensure-success-return-is-correct.patch @@ -18,11 +18,9 @@ Cc: stable-rt@vger.kernel.org kernel/sched/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -1595,8 +1595,10 @@ try_to_wake_up(struct task_struct *p, un +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -1445,8 +1445,10 @@ try_to_wake_up(struct task_struct *p, un * if the wakeup condition is true. */ if (!(wake_flags & WF_LOCK_SLEEPER)) { diff --git a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch index 4713348f6..8a30fc31b 100644 --- a/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch +++ b/debian/patches/features/all/rt/scsi-fcoe-rt-aware.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner drivers/scsi/libfc/fc_exch.c | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) -Index: linux-stable/drivers/scsi/fcoe/fcoe.c -=================================================================== ---- linux-stable.orig/drivers/scsi/fcoe/fcoe.c -+++ linux-stable/drivers/scsi/fcoe/fcoe.c +--- a/drivers/scsi/fcoe/fcoe.c ++++ b/drivers/scsi/fcoe/fcoe.c @@ -1272,7 +1272,7 @@ static void fcoe_percpu_thread_destroy(u struct sk_buff *skb; #ifdef CONFIG_SMP @@ -76,10 +74,8 @@ Index: linux-stable/drivers/scsi/fcoe/fcoe.c kfree_skb(skb); } -Index: linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c -=================================================================== ---- linux-stable.orig/drivers/scsi/fcoe/fcoe_ctlr.c -+++ linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs( INIT_LIST_HEAD(&del_list); @@ -98,10 +94,8 @@ Index: linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ -Index: linux-stable/drivers/scsi/libfc/fc_exch.c -=================================================================== ---- linux-stable.orig/drivers/scsi/libfc/fc_exch.c -+++ linux-stable/drivers/scsi/libfc/fc_exch.c +--- a/drivers/scsi/libfc/fc_exch.c ++++ b/drivers/scsi/libfc/fc_exch.c @@ -730,10 +730,10 @@ static struct fc_exch *fc_exch_em_alloc( } memset(ep, 0, sizeof(*ep)); diff --git a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch index 575a5a7a7..0a4f191cc 100644 --- a/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch +++ b/debian/patches/features/all/rt/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch @@ -28,10 +28,8 @@ Signed-off-by: Thomas Gleixner drivers/scsi/qla2xxx/qla_inline.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/drivers/scsi/qla2xxx/qla_inline.h -=================================================================== ---- linux-stable.orig/drivers/scsi/qla2xxx/qla_inline.h -+++ linux-stable/drivers/scsi/qla2xxx/qla_inline.h +--- a/drivers/scsi/qla2xxx/qla_inline.h ++++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -36,12 +36,12 @@ qla2x00_poll(struct rsp_que *rsp) { unsigned long flags; diff --git a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch index 3ea39fbcb..8e61bab05 100644 --- a/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch +++ b/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch @@ -18,13 +18,12 @@ Cc: stable-rt@vger.kernel.org --- include/linux/seqlock.h | 55 +++++++++++++++++++++++++++++++++++++++--------- - include/net/neighbour.h | 2 - - 2 files changed, 46 insertions(+), 11 deletions(-) + include/net/dst.h | 2 - + include/net/neighbour.h | 4 +-- + 3 files changed, 48 insertions(+), 13 deletions(-) -Index: linux-stable/include/linux/seqlock.h -=================================================================== ---- linux-stable.orig/include/linux/seqlock.h -+++ linux-stable/include/linux/seqlock.h +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h @@ -146,18 +146,30 @@ static inline int read_seqcount_retry(co * Sequence counter only version assumes that callers are using their * own mutexing. @@ -76,7 +75,7 @@ Index: linux-stable/include/linux/seqlock.h + unsigned ret; + +repeat: -+ ret = sl->seqcount.sequence; ++ ret = ACCESS_ONCE(sl->seqcount.sequence); + if (unlikely(ret & 1)) { + /* + * Take the lock and let the writer proceed (i.e. evtl @@ -153,11 +152,29 @@ Index: linux-stable/include/linux/seqlock.h spin_unlock_irqrestore(&sl->lock, flags); } -Index: linux-stable/include/net/neighbour.h -=================================================================== ---- linux-stable.orig/include/net/neighbour.h -+++ linux-stable/include/net/neighbour.h -@@ -385,7 +385,7 @@ struct neighbour_cb { +--- a/include/net/dst.h ++++ b/include/net/dst.h +@@ -392,7 +392,7 @@ static inline void dst_confirm(struct ds + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -334,7 +334,7 @@ static inline int neigh_hh_bridge(struct + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -389,7 +389,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) diff --git a/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch b/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch index 7bbaaac84..59e11c958 100644 --- a/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch +++ b/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner include/linux/seqlock.h | 21 --------------------- 1 file changed, 21 deletions(-) -Index: linux-stable/include/linux/seqlock.h -=================================================================== ---- linux-stable.orig/include/linux/seqlock.h -+++ linux-stable/include/linux/seqlock.h +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h @@ -69,17 +69,6 @@ static inline void write_sequnlock(seqlo spin_unlock(&sl->lock); } diff --git a/debian/patches/features/all/rt/seqlock-use-seqcount.patch b/debian/patches/features/all/rt/seqlock-use-seqcount.patch index c86cc9bf9..37b187203 100644 --- a/debian/patches/features/all/rt/seqlock-use-seqcount.patch +++ b/debian/patches/features/all/rt/seqlock-use-seqcount.patch @@ -9,10 +9,8 @@ Signed-off-by: Thomas Gleixner include/linux/seqlock.h | 176 +++++++++++++++++++++++++----------------------- 1 file changed, 93 insertions(+), 83 deletions(-) -Index: linux-stable/include/linux/seqlock.h -=================================================================== ---- linux-stable.orig/include/linux/seqlock.h -+++ linux-stable/include/linux/seqlock.h +--- a/include/linux/seqlock.h ++++ b/include/linux/seqlock.h @@ -30,81 +30,12 @@ #include #include diff --git a/debian/patches/features/all/rt/series b/debian/patches/features/all/rt/series index 4162c2cd6..b72d275d5 100644 --- a/debian/patches/features/all/rt/series +++ b/debian/patches/features/all/rt/series @@ -5,6 +5,21 @@ ############################################################ # UPSTREAM changes queued ############################################################ +fix-1-2-slub-do-not-dereference-null-pointer-in-node_match.patch +fix-2-2-slub-tid-must-be-retrieved-from-the-percpu-area-of-the-current-processor.patch +fix-rq-3elock-vs-logbuf_lock-unlock-race.patch +genirq-add-default-mask-cmdline-option.patch +of-fixup-resursive-locking-code-paths.patch +of-convert-devtree-lock-from-rw_lock-to-raw-spinlock.patch + +locking-various-init-fixes.patch +intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch +ntp-make-ntp-lock-raw-sigh.patch +seqlock-remove-unused-functions.patch +seqlock-use-seqcount.patch +generic-cmpxchg-use-raw-local-irq.patch + +0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch ############################################################ # UPSTREAM FIXES, patches pending @@ -13,7 +28,6 @@ ############################################################ # Stuff broken upstream, patches submitted ############################################################ -x86-kprobes-remove-bogus-preempt-enable.patch ############################################################ # Stuff which needs addressing upstream, but requires more @@ -28,31 +42,39 @@ x86-hpet-disable-msi-on-lenovo-w510.patch ############################################################ # Submitted on LKML ############################################################ -# SCHED BLOCK/WQ -block-shorten-interrupt-disabled-regions.patch +early-printk-consolidate.patch -# CHECKME sched-distangle-worker-accounting-from-rq-3elock.patch +# SRCU +0001-kernel-srcu-merge-common-code-into-a-macro.patch +0002-kernel-SRCU-provide-a-static-initializer.patch ############################################################ # Submitted to mips ML ############################################################ -mips-enable-interrupts-in-signal.patch ############################################################ # Submitted to ARM ML ############################################################ +arm-mark-pmu-interupt-no-thread.patch +arm-allow-irq-threading.patch + +############################################################ +# Submitted to PPC ML +############################################################ +ppc-mark-low-level-handlers-no-thread.patch ############################################################ # Submitted on LKML ############################################################ -# JBD - -# SCHED - -############################################################ -# Submitted on ppc-devel -############################################################ +timekeeping-do-not-calc-crap-over-and-over.patch +timekeeping-make-jiffies-lock-internal.patch +timekeeping-move-lock-out-of-timekeeper.patch +timekeeping-split-timekeeper-lock.patch +timekeeping-store-cycle-last-in-timekeeper.patch +timekeeping-delay-clock-cycle-last-update.patch +timekeeping-implement-shadow-timekeeper.patch +timekeeping-shorten-seq-count-region.patch ############################################################ # Submitted to net-dev @@ -62,43 +84,18 @@ mips-enable-interrupts-in-signal.patch # Pending in tip ############################################################ -# WATCHDOG - -# CLOCKSOURCE - -# RTMUTEX CLEANUP - -# RAW SPINLOCKS - -# X86 - -############################################################ -# Pending in peterz's scheduler queue -############################################################ - - ############################################################ # Stuff which should go upstream ASAP ############################################################ -# GENIRQ -genirq-add-default-mask-cmdline-option.patch +# SCHED BLOCK/WQ +block-shorten-interrupt-disabled-regions.patch -# PPC -ppc-mark-low-level-handlers-no-thread.patch +# Timekeeping split jiffies lock. Needs a good argument :) +timekeeping-split-jiffies-lock.patch -# Timekeeping / VDSO - -# SEQLOCK -seqlock-remove-unused-functions.patch -seqlock-use-seqcount.patch - -# RAW SPINLOCKS -timekeeping-split-xtime-lock.patch -intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch -ntp-make-ntp-lock-raw-sigh.patch - -# MM memcg +# CHECKME: Should local_irq_enable() generally do a preemption check ? +mips-enable-interrupts-in-signal.patch # Tracing tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -107,15 +104,12 @@ tracing-account-for-preempt-off-in-preempt_schedule.patch signal-revert-ptrace-preempt-magic.patch # ARM IRQF_NO_TRHEAD / IRQ THREADING SUPPORT -arm-mark-pmu-interupt-no-thread.patch -arm-allow-irq-threading.patch arm-convert-boot-lock-to-raw.patch arm-omap-make-wakeupgen_lock-raw.patch # PREEMPT_ENABLE_NO_RESCHED # SIGNALS / POSIXTIMERS -signals-do-not-wake-self.patch posix-timers-no-broadcast.patch signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch oleg-signal-rt-fix.patch @@ -123,7 +117,6 @@ oleg-signal-rt-fix.patch # SCHED # GENERIC CMPXCHG -generic-cmpxchg-use-raw-local-irq.patch # SHORTEN PREEMPT DISABLED drivers-random-reduce-preempt-disabled-region.patch @@ -132,14 +125,9 @@ drivers-random-reduce-preempt-disabled-region.patch arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch clocksource-tclib-allow-higher-clockrates.patch -# HW LATENCY DETECTOR - this really wants a rewrite -#hw-latency-detector.patch - # DRIVERS NET drivers-net-tulip-add-missing-pci-disable.patch drivers-net-8139-disable-irq-nosync.patch -drivers-net-ehea-mark-rx-irq-no-thread.patch -drivers-net-at91-make-mdio-protection-rt-safe.patch # PREEMPT @@ -152,16 +140,9 @@ peterz-raw_pagefault_disable.patch filemap-fix-up.patch mm-remove-preempt-count-from-pf.patch -# HIGHMEM -x86-highmem-warn.patch - # PM suspend-prevernt-might-sleep-splats.patch -# DEVICE TREE -of-fixup-recursive-locking.patch -of-convert-devtree-lock.patch - # MM/LISTS list-add-list-last-entry.patch mm-page-alloc-use-list-last-entry.patch @@ -185,7 +166,6 @@ fix-rt-int3-x86_32-3.2-rt.patch # RCU # LOCKING INIT FIXES -locking-various-init-fixes.patch # PCI pci-access-use-__wake_up_all_locked.patch @@ -201,6 +181,7 @@ pci-access-use-__wake_up_all_locked.patch # TRACING latency-hist.patch +# HW LATENCY DETECTOR - this really wants a rewrite # HW latency detector hwlatdetect.patch @@ -212,7 +193,6 @@ hwlatdetect.patch localversion.patch # PRINTK -early-printk-consolidate.patch printk-kill.patch printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -237,7 +217,6 @@ acpi-use-local-irq-nort.patch user-use-local-irq-nort.patch resource-counters-use-localirq-nort.patch usb-hcd-use-local-irq-nort.patch -tty-use-local-irq-nort.patch mm-scatterlist-dont-disable-irqs-on-RT.patch # Sigh @@ -327,9 +306,6 @@ rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch # HRTIMERS hrtimers-prepare-full-preemption.patch hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch -peter_zijlstra-frob-hrtimer.patch -hrtimer-add-missing-debug_activate-aid.patch -hrtimer-fix-reprogram-madness.patch timer-fd-avoid-live-lock.patch # POSIX-CPU-TIMERS @@ -378,7 +354,7 @@ rt-sched-have-migrate_disable-ignore-bounded-threads.patch sched-clear-pf-thread-bound-on-fallback-rq.patch # FTRACE -ftrace-crap.patch +# XXX checkme ftrace-crap.patch # CHECKME rt-ring-buffer-convert-reader_lock-from-raw_spin_lock-into-spin_lock.patch # CHECKME rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch @@ -396,7 +372,7 @@ mutex-no-spin-on-rt.patch softirq-local-lock.patch softirq-export-in-serving-softirq.patch harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch -softirq-fix-unplug-deadlock.patch +# XXX checkme softirq-fix-unplug-deadlock.patch softirq-disable-softirq-stacks-for-rt.patch softirq-make-fifo.patch tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -408,14 +384,13 @@ local-vars-migrate-disable.patch # RAID5 md-raid5-percpu-handling-rt-aware.patch -# RTMUTEX -rtmutex-lock-killable.patch - # FUTEX/RTMUTEX rtmutex-futex-prepare-rt.patch futex-requeue-pi-fix.patch +0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch # RTMUTEX +rtmutex-lock-killable.patch rt-mutex-add-sleeping-spinlocks-support.patch spinlock-types-separate-raw.patch rtmutex-avoid-include-hell.patch @@ -423,22 +398,19 @@ rt-add-rt-spinlock-to-headers.patch rt-add-rt-to-mutex-headers.patch rwsem-add-rt-variant.patch rt-add-rt-locks.patch +percpu-rwsem-compilefix.patch # RTMUTEX Fallout tasklist-lock-fix-section-conflict.patch # NOHZ/RTMUTEX timer-handle-idle-trylock-in-get-next-timer-irq.patch -timer.c-fix-build-fail-for-RT_FULL.patch # RCU -rcu-force-preempt-rcu-for-rt.patch peter_zijlstra-frob-rcu.patch rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch rcu-tiny-merge-bh.patch patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch -rcu-fix-build-break.patch -rt-rcutree-warn-fix.patch # LGLOCKS - lovely lglocks-rt.patch @@ -447,28 +419,27 @@ lglocks-rt.patch drivers-serial-cleanup-locking-for-rt.patch drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch drivers-tty-fix-omap-lock-crap.patch +drivers-tty-pl011-irq-disable-madness.patch rt-serial-warn-fix.patch # FS fs-namespace-preemption-fix.patch mm-protect-activate-switch-mm.patch -mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch fs-block-rt-support.patch fs-ntfs-disable-interrupt-non-rt.patch # X86 x86-mce-timer-hrtimer.patch x86-stackprot-no-random-on-rt.patch -# x86-no-highmem-with-rt.patch -- peterz x86-use-gen-rwsem-spinlocks-rt.patch x86-disable-debug-stack.patch # CPU get light -workqueue-use-get-cpu-light.patch epoll-use-get-cpu-light.patch mm-vmalloc-use-get-cpu-light.patch # WORKQUEUE more fixes +workqueue-use-locallock.patch # CHECKME workqueue-sanity.patch # CHECKME workqueue-fix-PF_THREAD_BOUND.patch # CHECKME workqueue-hotplug-fix.patch @@ -514,6 +485,9 @@ sysfs-realtime-entry.patch # KMAP/HIGHMEM mm-rt-kmap-atomic-scheduling.patch +0002-x86-highmem-add-a-already-used-pte-check.patch +0003-arm-highmem-flush-tlb-on-unmap.patch +arm-enable-highmem-for-rt.patch # IPC ipc-sem-rework-semaphore-wakeups.patch @@ -567,7 +541,6 @@ net-use-cpu-chill.patch lockdep-selftest-convert-spinlock-to-raw-spinlock.patch lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch -rt-disable-rt-group-sched.patch fs-jbd-pull-plug-when-waiting-for-space.patch perf-make-swevent-hrtimer-irqsafe.patch cpu-rt-rework-cpu-down.patch @@ -575,7 +548,6 @@ cpu-rt-rework-cpu-down.patch # Stable-rt stuff: Fold back when Steve grabbed it random-make-it-work-on-rt.patch softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch -mm-slab-fix-potential-deadlock.patch mm-page-alloc-use-local-lock-on-target-cpu.patch rt-rw-lockdep-annotations.patch sched-better-debug-output-for-might-sleep.patch @@ -585,33 +557,51 @@ stomp-machine-deal-clever-with-stopper-lock.patch net-another-local-irq-disable-alloc-atomic-headache.patch net-use-cpu-light-in-ip-send-unicast-reply.patch peterz-srcu-crypto-chain.patch -crypto-make-core-static-and-init-scru-early.patch -fix-crypto-api-init-for-3-6-4-rt10.patch x86-perf-uncore-deal-with-kfree.patch softirq-make-serving-softirqs-a-task-flag.patch softirq-split-handling-function.patch softirq-split-locks.patch rcu-tiny-solve-rt-mistery.patch -slub-correct-per-cpu-slab.patch mm-enable-slub.patch hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch rcu-disable-rcu-fast-no-hz-on-rt.patch net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch -softirq-add-more-debug.patch -softirq-fix-nohz-pending-issue-for-real.patch net-netif-rx-ni-use-local-bh-disable.patch -fix-random-fallout.patch - preempt-lazy-support.patch x86-preempt-lazy.patch arm-preempt-lazy-support.patch +# 3.8 changes +net-make-devnet_rename_seq-a-mutex.patch +powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch +i2c-omap-drop-the-lock-hard-irq-context.patch +spi-omap-mcspi-check-condition-also-after-timeout.patch +HACK-printk-drop-the-logbuf_lock-more-often.patch +fscache_compile_fix.patch +i915_compile_fix.patch + # Enable full RT powerpc-preempt-lazy-support.patch +wait-simple-implementation.patch +rcutiny-use-simple-waitqueue.patch +treercu-use-simple-waitqueue.patch +sched-adjust-reset-on-fork-always.patch +sched-enqueue-to-head.patch +sched-consider-pi-boosting-in-setscheduler.patch +block-use-cpu-chill.patch + +mm-bounce-local-irq-save-nort.patch +mmci-remove-bogus-irq-save.patch +slub-enable-irqs-for-no-wait.patch +slub_delay_ctor_on_rt.patch +idle-state.patch +might-sleep-check-for-idle.patch +wait-simple-rework-for-completions.patch +completion-use-simple-wait-queues.patch + kconfig-disable-a-few-options-rt.patch kconfig-preempt-rt-full.patch - diff --git a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch index 5171fd5b2..bee7aba78 100644 --- a/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch +++ b/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/signal.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -Index: linux-stable/kernel/signal.c -=================================================================== ---- linux-stable.orig/kernel/signal.c -+++ linux-stable/kernel/signal.c -@@ -1394,12 +1394,12 @@ struct sighand_struct *__lock_task_sigha +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1391,12 +1391,12 @@ struct sighand_struct *__lock_task_sigha struct sighand_struct *sighand; for (;;) { @@ -26,7 +24,7 @@ Index: linux-stable/kernel/signal.c break; } -@@ -1410,7 +1410,7 @@ struct sighand_struct *__lock_task_sigha +@@ -1407,7 +1407,7 @@ struct sighand_struct *__lock_task_sigha } spin_unlock(&sighand->siglock); rcu_read_unlock(); diff --git a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch index 87a663b4f..6a07b93d4 100644 --- a/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch +++ b/debian/patches/features/all/rt/signal-revert-ptrace-preempt-magic.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner kernel/signal.c | 8 -------- 1 file changed, 8 deletions(-) -Index: linux-stable/kernel/signal.c -=================================================================== ---- linux-stable.orig/kernel/signal.c -+++ linux-stable/kernel/signal.c -@@ -1898,15 +1898,7 @@ static void ptrace_stop(int exit_code, i +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1902,15 +1902,7 @@ static void ptrace_stop(int exit_code, i if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); @@ -24,6 +22,6 @@ Index: linux-stable/kernel/signal.c - preempt_disable(); read_unlock(&tasklist_lock); - preempt_enable_no_resched(); - schedule(); + freezable_schedule(); } else { /* diff --git a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch index 8f230db48..809a02f44 100644 --- a/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/debian/patches/features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -15,11 +15,9 @@ Signed-off-by: Thomas Gleixner kernel/signal.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++--- 5 files changed, 83 insertions(+), 5 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1403,6 +1403,7 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1407,6 +1407,7 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; @@ -27,11 +25,9 @@ Index: linux-stable/include/linux/sched.h sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ -Index: linux-stable/include/linux/signal.h -=================================================================== ---- linux-stable.orig/include/linux/signal.h -+++ linux-stable/include/linux/signal.h -@@ -229,6 +229,7 @@ static inline void init_sigpending(struc +--- a/include/linux/signal.h ++++ b/include/linux/signal.h +@@ -226,6 +226,7 @@ static inline void init_sigpending(struc } extern void flush_sigqueue(struct sigpending *queue); @@ -39,11 +35,9 @@ Index: linux-stable/include/linux/signal.h /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) -Index: linux-stable/kernel/exit.c -=================================================================== ---- linux-stable.orig/kernel/exit.c -+++ linux-stable/kernel/exit.c -@@ -155,7 +155,7 @@ static void __exit_signal(struct task_st +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -143,7 +143,7 @@ static void __exit_signal(struct task_st * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ @@ -52,11 +46,9 @@ Index: linux-stable/kernel/exit.c tsk->sighand = NULL; spin_unlock(&sighand->siglock); -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c -@@ -1239,6 +1239,7 @@ static struct task_struct *copy_process( +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1230,6 +1230,7 @@ static struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -64,11 +56,9 @@ Index: linux-stable/kernel/fork.c p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; -Index: linux-stable/kernel/signal.c -=================================================================== ---- linux-stable.orig/kernel/signal.c -+++ linux-stable/kernel/signal.c -@@ -346,13 +346,45 @@ static bool task_participate_group_stop( +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -348,13 +348,45 @@ static bool task_participate_group_stop( return false; } @@ -115,7 +105,7 @@ Index: linux-stable/kernel/signal.c { struct sigqueue *q = NULL; struct user_struct *user; -@@ -369,7 +401,10 @@ __sigqueue_alloc(int sig, struct task_st +@@ -371,7 +403,10 @@ __sigqueue_alloc(int sig, struct task_st if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { @@ -127,7 +117,7 @@ Index: linux-stable/kernel/signal.c } else { print_dropped_signal(sig); } -@@ -386,6 +421,13 @@ __sigqueue_alloc(int sig, struct task_st +@@ -388,6 +423,13 @@ __sigqueue_alloc(int sig, struct task_st return q; } @@ -141,7 +131,7 @@ Index: linux-stable/kernel/signal.c static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) -@@ -395,6 +437,21 @@ static void __sigqueue_free(struct sigqu +@@ -397,6 +439,21 @@ static void __sigqueue_free(struct sigqu kmem_cache_free(sigqueue_cachep, q); } @@ -163,7 +153,7 @@ Index: linux-stable/kernel/signal.c void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; -@@ -408,6 +465,21 @@ void flush_sigqueue(struct sigpending *q +@@ -410,6 +467,21 @@ void flush_sigqueue(struct sigpending *q } /* @@ -185,7 +175,7 @@ Index: linux-stable/kernel/signal.c * Flush all pending signals for a task. */ void __flush_signals(struct task_struct *t) -@@ -556,7 +628,7 @@ static void collect_signal(int sig, stru +@@ -561,7 +633,7 @@ static void collect_signal(int sig, stru still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); @@ -194,7 +184,7 @@ Index: linux-stable/kernel/signal.c } else { /* * Ok, it wasn't in the queue. This must be -@@ -602,6 +674,8 @@ int dequeue_signal(struct task_struct *t +@@ -607,6 +679,8 @@ int dequeue_signal(struct task_struct *t { int signr; @@ -203,7 +193,7 @@ Index: linux-stable/kernel/signal.c /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ -@@ -1548,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid); +@@ -1545,7 +1619,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { diff --git a/debian/patches/features/all/rt/signals-do-not-wake-self.patch b/debian/patches/features/all/rt/signals-do-not-wake-self.patch deleted file mode 100644 index 411045e93..000000000 --- a/debian/patches/features/all/rt/signals-do-not-wake-self.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Thomas Gleixner -Date: Fri, 3 Jul 2009 08:44:44 -0500 -Subject: signals: Do not wakeup self - -Signals which are delivered by current to current can do without -waking up current :) - -Signed-off-by: Thomas Gleixner - ---- - kernel/signal.c | 3 +++ - 1 file changed, 3 insertions(+) - -Index: linux-stable/kernel/signal.c -=================================================================== ---- linux-stable.orig/kernel/signal.c -+++ linux-stable/kernel/signal.c -@@ -684,6 +684,9 @@ void signal_wake_up(struct task_struct * - - set_tsk_thread_flag(t, TIF_SIGPENDING); - -+ if (unlikely(t == current)) -+ return; -+ - /* - * For SIGKILL, we want to wake it up in the stopped/traced/killable - * case. We don't check t->state here because there is a race with it diff --git a/debian/patches/features/all/rt/skbufhead-raw-lock.patch b/debian/patches/features/all/rt/skbufhead-raw-lock.patch index 74128923e..3f27b9efd 100644 --- a/debian/patches/features/all/rt/skbufhead-raw-lock.patch +++ b/debian/patches/features/all/rt/skbufhead-raw-lock.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 26 ++++++++++++++++++++------ 3 files changed, 28 insertions(+), 6 deletions(-) -Index: linux-stable/include/linux/netdevice.h -=================================================================== ---- linux-stable.orig/include/linux/netdevice.h -+++ linux-stable/include/linux/netdevice.h -@@ -1765,6 +1765,7 @@ struct softnet_data { +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1783,6 +1783,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; @@ -21,10 +19,8 @@ Index: linux-stable/include/linux/netdevice.h }; static inline void input_queue_head_incr(struct softnet_data *sd) -Index: linux-stable/include/linux/skbuff.h -=================================================================== ---- linux-stable.orig/include/linux/skbuff.h -+++ linux-stable/include/linux/skbuff.h +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h @@ -132,6 +132,7 @@ struct sk_buff_head { __u32 qlen; @@ -33,7 +29,7 @@ Index: linux-stable/include/linux/skbuff.h }; struct sk_buff; -@@ -992,6 +993,12 @@ static inline void skb_queue_head_init(s +@@ -1008,6 +1009,12 @@ static inline void skb_queue_head_init(s __skb_queue_head_init(list); } @@ -46,11 +42,9 @@ Index: linux-stable/include/linux/skbuff.h static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -221,14 +221,14 @@ static inline struct hlist_head *dev_ind +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -225,14 +225,14 @@ static inline struct hlist_head *dev_ind static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS @@ -67,7 +61,7 @@ Index: linux-stable/net/core/dev.c #endif } -@@ -3402,7 +3402,7 @@ static void flush_backlog(void *arg) +@@ -3528,7 +3528,7 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); @@ -76,7 +70,7 @@ Index: linux-stable/net/core/dev.c input_queue_head_incr(sd); } } -@@ -3411,10 +3411,13 @@ static void flush_backlog(void *arg) +@@ -3537,10 +3537,13 @@ static void flush_backlog(void *arg) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); @@ -91,7 +85,7 @@ Index: linux-stable/net/core/dev.c } static int napi_gro_complete(struct sk_buff *skb) -@@ -3903,10 +3906,17 @@ static void net_rx_action(struct softirq +@@ -4045,10 +4048,17 @@ static void net_rx_action(struct softirq struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; @@ -109,7 +103,7 @@ Index: linux-stable/net/core/dev.c while (!list_empty(&sd->poll_list)) { struct napi_struct *n; int work, weight; -@@ -6337,6 +6347,9 @@ static int dev_cpu_callback(struct notif +@@ -6530,6 +6540,9 @@ static int dev_cpu_callback(struct notif netif_rx(skb); input_queue_head_incr(oldsd); } @@ -119,7 +113,7 @@ Index: linux-stable/net/core/dev.c return NOTIFY_OK; } -@@ -6601,8 +6614,9 @@ static int __init net_dev_init(void) +@@ -6802,8 +6815,9 @@ static int __init net_dev_init(void) struct softnet_data *sd = &per_cpu(softnet_data, i); memset(sd, 0, sizeof(*sd)); diff --git a/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch b/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch deleted file mode 100644 index 8a78121ea..000000000 --- a/debian/patches/features/all/rt/slub-correct-per-cpu-slab.patch +++ /dev/null @@ -1,49 +0,0 @@ -Date: Tue, 30 Oct 2012 15:29:17 +0000 -From: Christoph Lameter -Subject: slub: Use correct cpu_slab on dead cpu - -Pass a kmem_cache_cpu pointer into unfreeze partials so that a different -kmem_cache_cpu structure than the local one can be specified. - -Reported-by: Thomas Gleixner -Signed-off-by: Christoph Lameter - ---- - mm/slub.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -Index: linux-stable/mm/slub.c -=================================================================== ---- linux-stable.orig/mm/slub.c -+++ linux-stable/mm/slub.c -@@ -1871,10 +1871,10 @@ redo: - * - * This function must be called with interrupt disabled. - */ --static void unfreeze_partials(struct kmem_cache *s) -+static void unfreeze_partials(struct kmem_cache *s, -+ struct kmem_cache_cpu *c) - { - struct kmem_cache_node *n = NULL, *n2 = NULL; -- struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); - struct page *page, *discard_page = NULL; - - while ((page = c->partial)) { -@@ -1960,7 +1960,7 @@ int put_cpu_partial(struct kmem_cache *s - * set to the per node partial list. - */ - local_irq_save(flags); -- unfreeze_partials(s); -+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); - local_irq_restore(flags); - pobjects = 0; - pages = 0; -@@ -2002,7 +2002,7 @@ static inline void __flush_cpu_slab(stru - if (c->page) - flush_slab(s, c); - -- unfreeze_partials(s); -+ unfreeze_partials(s, c); - } - } - diff --git a/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch new file mode 100644 index 000000000..cd0904f00 --- /dev/null +++ b/debian/patches/features/all/rt/slub-enable-irqs-for-no-wait.patch @@ -0,0 +1,46 @@ +Subject: slub: Enable irqs for __GFP_WAIT +From: Thomas Gleixner +Date: Wed, 09 Jan 2013 12:08:15 +0100 + +SYSTEM_RUNNING might be too late for enabling interrupts. Allocations +with GFP_WAIT can happen before that. So use this as an indicator. + +Signed-off-by: Thomas Gleixner +--- + mm/slub.c | 13 +++++-------- + 1 file changed, 5 insertions(+), 8 deletions(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1280,14 +1280,15 @@ static struct page *allocate_slab(struct + struct page *page; + struct kmem_cache_order_objects oo = s->oo; + gfp_t alloc_gfp; ++ bool enableirqs; + + flags &= gfp_allowed_mask; + ++ enableirqs = (flags & __GFP_WAIT) != 0; + #ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) ++ enableirqs |= system_state == SYSTEM_RUNNING; + #endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1327,11 +1328,7 @@ static struct page *allocate_slab(struct + kmemcheck_mark_unallocated_pages(page, pages); + } + +-#ifdef CONFIG_PREEMPT_RT_FULL +- if (system_state == SYSTEM_RUNNING) +-#else +- if (flags & __GFP_WAIT) +-#endif ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; diff --git a/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch new file mode 100644 index 000000000..1b6a420c8 --- /dev/null +++ b/debian/patches/features/all/rt/slub_delay_ctor_on_rt.patch @@ -0,0 +1,31 @@ +From: Sebastian Andrzej Siewior +Subject: slub: delay ctor until the object is requested + +It seems that allocation of plenty objects causes latency on ARM since that +code can not be preempted + +Signed-off-by: Sebastian Andrzej Siewior +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1346,8 +1346,10 @@ static void setup_object(struct kmem_cac + void *object) + { + setup_object_debug(s, page, object); ++#ifndef CONFIG_PREEMPT_RT_FULL + if (unlikely(s->ctor)) + s->ctor(object); ++#endif + } + + static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) +@@ -2437,6 +2439,10 @@ redo: + + if (unlikely(gfpflags & __GFP_ZERO) && object) + memset(object, 0, s->object_size); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (unlikely(s->ctor) && object) ++ s->ctor(object); ++#endif + + slab_post_alloc_hook(s, gfpflags, object); + diff --git a/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch index 84fcb18b4..f1599ff81 100644 --- a/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch +++ b/debian/patches/features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch @@ -8,14 +8,12 @@ the other pending bits of that task as well. Signed-off-by: Thomas Gleixner --- - kernel/softirq.c | 68 ++++++++++++++++++++++++++++++++++++++++++------------- - 1 file changed, 52 insertions(+), 16 deletions(-) + kernel/softirq.c | 83 ++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 58 insertions(+), 25 deletions(-) -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -65,45 +65,75 @@ char *softirq_to_name[NR_SOFTIRQS] = { +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -66,46 +66,71 @@ char *softirq_to_name[NR_SOFTIRQS] = { #ifdef CONFIG_NO_HZ # ifdef CONFIG_PREEMPT_RT_FULL @@ -59,24 +57,22 @@ Index: linux-stable/kernel/softirq.c void softirq_check_pending_idle(void) { static int rate_limit; -- u32 warnpending = 0, pending = local_softirq_pending(); +- u32 warnpending = 0, pending; + struct softirq_runner *sr = &__get_cpu_var(softirq_runners); -+ u32 warnpending, pending = local_softirq_pending(); ++ u32 warnpending; ++ int i; if (rate_limit >= 10) return; +- pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; - if (pending) { -+ warnpending = pending; -+ -+ while (pending) { - struct task_struct *tsk; -+ int i = __ffs(pending); +- struct task_struct *tsk; ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ struct task_struct *tsk = sr->runner[i]; - tsk = __get_cpu_var(ksoftirqd); -+ pending &= ~(1 << i); -+ -+ tsk = sr->runner[i]; /* * The wakeup code in rtmutex.c wakes up the task * _before_ it sets pi_blocked_on to NULL under @@ -107,7 +103,7 @@ Index: linux-stable/kernel/softirq.c rate_limit++; } } -@@ -122,6 +152,10 @@ void softirq_check_pending_idle(void) +@@ -125,6 +150,10 @@ void softirq_check_pending_idle(void) } } # endif @@ -118,7 +114,7 @@ Index: linux-stable/kernel/softirq.c #endif /* -@@ -482,6 +516,7 @@ static void do_current_softirqs(int need +@@ -478,6 +507,7 @@ static void do_current_softirqs(int need */ lock_softirq(i); local_irq_disable(); @@ -126,7 +122,7 @@ Index: linux-stable/kernel/softirq.c /* * Check with the local_softirq_pending() bits, * whether we need to process this still or if someone -@@ -492,6 +527,7 @@ static void do_current_softirqs(int need +@@ -488,6 +518,7 @@ static void do_current_softirqs(int need set_softirq_pending(pending & ~mask); do_single_softirq(i, need_rcu_bh_qs); } @@ -134,3 +130,45 @@ Index: linux-stable/kernel/softirq.c unlock_softirq(i); WARN_ON(current->softirq_nestcnt != 1); } +@@ -558,7 +589,7 @@ void thread_do_softirq(void) + } + } + +-void __raise_softirq_irqoff(unsigned int nr) ++static void do_raise_softirq_irqoff(unsigned int nr) + { + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +@@ -575,12 +606,19 @@ void __raise_softirq_irqoff(unsigned int + __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); + } + ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_softirqd(); ++} ++ + /* + * This function must run with irqs disabled! + */ + void raise_softirq_irqoff(unsigned int nr) + { +- __raise_softirq_irqoff(nr); ++ do_raise_softirq_irqoff(nr); + + /* + * If we're in an hard interrupt we let irq return code deal +@@ -602,11 +640,6 @@ void raise_softirq_irqoff(unsigned int n + wakeup_softirqd(); + } + +-void do_raise_softirq_irqoff(unsigned int nr) +-{ +- raise_softirq_irqoff(nr); +-} +- + static inline int ksoftirqd_softirq_pending(void) + { + return current->softirqs_raised; diff --git a/debian/patches/features/all/rt/softirq-add-more-debug.patch b/debian/patches/features/all/rt/softirq-add-more-debug.patch deleted file mode 100644 index 8fb38c63b..000000000 --- a/debian/patches/features/all/rt/softirq-add-more-debug.patch +++ /dev/null @@ -1,60 +0,0 @@ -Subject: softirq: Add more debugging -From: Thomas Gleixner -Date: Sun, 28 Oct 2012 15:21:59 +0000 - -We really want to find code which calls __raise_softirq_irqsoff() and -runs neither in hardirq context nor in a local_bh disabled -region. This is even wrong on mainline as that code relies on random -events to take care of it's newly raised softirq. - -Signed-off-by: Thomas Gleixner ---- - kernel/softirq.c | 16 +++++++++------- - 1 file changed, 9 insertions(+), 7 deletions(-) - -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -618,7 +618,7 @@ void thread_do_softirq(void) - } - } - --void __raise_softirq_irqoff(unsigned int nr) -+static void do_raise_softirq_irqoff(unsigned int nr) - { - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -@@ -635,12 +635,19 @@ void __raise_softirq_irqoff(unsigned int - __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr); - } - -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ if (!in_irq() && !current->softirq_nestcnt) -+ wakeup_softirqd(); -+} -+ - /* - * This function must run with irqs disabled! - */ - void raise_softirq_irqoff(unsigned int nr) - { -- __raise_softirq_irqoff(nr); -+ do_raise_softirq_irqoff(nr); - - /* - * If we're in an hard interrupt we let irq return code deal -@@ -662,11 +669,6 @@ void raise_softirq_irqoff(unsigned int n - wakeup_softirqd(); - } - --void do_raise_softirq_irqoff(unsigned int nr) --{ -- raise_softirq_irqoff(nr); --} -- - static inline int ksoftirqd_softirq_pending(void) - { - return current->softirqs_raised; diff --git a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch index c1e3500a4..2d42abefb 100644 --- a/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch +++ b/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch @@ -15,10 +15,8 @@ Signed-off-by: Thomas Gleixner include/linux/interrupt.h | 3 +-- 9 files changed, 17 insertions(+), 3 deletions(-) -Index: linux-stable/arch/powerpc/kernel/irq.c -=================================================================== ---- linux-stable.orig/arch/powerpc/kernel/irq.c -+++ linux-stable/arch/powerpc/kernel/irq.c +--- a/arch/powerpc/kernel/irq.c ++++ b/arch/powerpc/kernel/irq.c @@ -584,6 +584,7 @@ void irq_ctx_init(void) } } @@ -35,10 +33,8 @@ Index: linux-stable/arch/powerpc/kernel/irq.c irq_hw_number_t virq_to_hw(unsigned int virq) { -Index: linux-stable/arch/powerpc/kernel/misc_32.S -=================================================================== ---- linux-stable.orig/arch/powerpc/kernel/misc_32.S -+++ linux-stable/arch/powerpc/kernel/misc_32.S +--- a/arch/powerpc/kernel/misc_32.S ++++ b/arch/powerpc/kernel/misc_32.S @@ -36,6 +36,7 @@ .text @@ -55,10 +51,8 @@ Index: linux-stable/arch/powerpc/kernel/misc_32.S _GLOBAL(call_handle_irq) mflr r0 -Index: linux-stable/arch/powerpc/kernel/misc_64.S -=================================================================== ---- linux-stable.orig/arch/powerpc/kernel/misc_64.S -+++ linux-stable/arch/powerpc/kernel/misc_64.S +--- a/arch/powerpc/kernel/misc_64.S ++++ b/arch/powerpc/kernel/misc_64.S @@ -29,6 +29,7 @@ .text @@ -75,10 +69,8 @@ Index: linux-stable/arch/powerpc/kernel/misc_64.S _GLOBAL(call_handle_irq) ld r8,0(r6) -Index: linux-stable/arch/sh/kernel/irq.c -=================================================================== ---- linux-stable.orig/arch/sh/kernel/irq.c -+++ linux-stable/arch/sh/kernel/irq.c +--- a/arch/sh/kernel/irq.c ++++ b/arch/sh/kernel/irq.c @@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } @@ -95,10 +87,8 @@ Index: linux-stable/arch/sh/kernel/irq.c #else static inline void handle_one_irq(unsigned int irq) { -Index: linux-stable/arch/sparc/kernel/irq_64.c -=================================================================== ---- linux-stable.orig/arch/sparc/kernel/irq_64.c -+++ linux-stable/arch/sparc/kernel/irq_64.c +--- a/arch/sparc/kernel/irq_64.c ++++ b/arch/sparc/kernel/irq_64.c @@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, st set_irq_regs(old_regs); } @@ -115,19 +105,17 @@ Index: linux-stable/arch/sparc/kernel/irq_64.c #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) -Index: linux-stable/arch/x86/kernel/entry_64.S -=================================================================== ---- linux-stable.orig/arch/x86/kernel/entry_64.S -+++ linux-stable/arch/x86/kernel/entry_64.S -@@ -1252,6 +1252,7 @@ ENTRY(kernel_execve) - CFI_ENDPROC - END(kernel_execve) +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -1337,6 +1337,7 @@ bad_gs: + jmp 2b + .previous +#ifndef CONFIG_PREEMPT_RT_FULL /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(call_softirq) CFI_STARTPROC -@@ -1271,6 +1272,7 @@ ENTRY(call_softirq) +@@ -1356,6 +1357,7 @@ ENTRY(call_softirq) ret CFI_ENDPROC END(call_softirq) @@ -135,10 +123,8 @@ Index: linux-stable/arch/x86/kernel/entry_64.S #ifdef CONFIG_XEN zeroentry xen_hypervisor_callback xen_do_hypervisor_callback -Index: linux-stable/arch/x86/kernel/irq_32.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/irq_32.c -+++ linux-stable/arch/x86/kernel/irq_32.c +--- a/arch/x86/kernel/irq_32.c ++++ b/arch/x86/kernel/irq_32.c @@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu) cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); } @@ -155,10 +141,8 @@ Index: linux-stable/arch/x86/kernel/irq_32.c bool handle_irq(unsigned irq, struct pt_regs *regs) { -Index: linux-stable/arch/x86/kernel/irq_64.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/irq_64.c -+++ linux-stable/arch/x86/kernel/irq_64.c +--- a/arch/x86/kernel/irq_64.c ++++ b/arch/x86/kernel/irq_64.c @@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_ return true; } @@ -173,11 +157,9 @@ Index: linux-stable/arch/x86/kernel/irq_64.c local_irq_restore(flags); } +#endif -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -448,10 +448,9 @@ struct softirq_action +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -445,10 +445,9 @@ struct softirq_action void (*action)(struct softirq_action *); }; diff --git a/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch b/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch index 0ddf8acd7..a8b79f37e 100644 --- a/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch +++ b/debian/patches/features/all/rt/softirq-export-in-serving-softirq.patch @@ -16,11 +16,9 @@ Signed-off-by: Thomas Gleixner kernel/softirq.c | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -448,6 +448,7 @@ int in_serving_softirq(void) +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -431,6 +431,7 @@ int in_serving_softirq(void) preempt_enable(); return res; } diff --git a/debian/patches/features/all/rt/softirq-fix-nohz-pending-issue-for-real.patch b/debian/patches/features/all/rt/softirq-fix-nohz-pending-issue-for-real.patch deleted file mode 100644 index 28beca147..000000000 --- a/debian/patches/features/all/rt/softirq-fix-nohz-pending-issue-for-real.patch +++ /dev/null @@ -1,54 +0,0 @@ -Subject: softirq: Fix nohz pending issue for real -From: Thomas Gleixner -Date: Mon, 12 Nov 2012 22:07:34 +0100 - -We really need to iterate through all softirqs to find a potentially -blocked runner. - -T1 runs softirq X (that cleared pending bit for X) - -Interrupt raises softirq Y - -T1 gets blocked on a lock and lock owner is not runnable - -T1 schedules out - -CPU goes idle and complains about pending softirq Y. - -Now iterating over all softirqs lets us find the runner for X and -eliminate Y from the to warn about list as well. - -Signed-off-by: Thomas Gleixner ---- - kernel/softirq.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -100,20 +100,15 @@ void softirq_check_pending_idle(void) - { - static int rate_limit; - struct softirq_runner *sr = &__get_cpu_var(softirq_runners); -- u32 warnpending, pending = local_softirq_pending(); -+ u32 warnpending = local_softirq_pending(); -+ int i; - - if (rate_limit >= 10) - return; - -- warnpending = pending; -- -- while (pending) { -- struct task_struct *tsk; -- int i = __ffs(pending); -- -- pending &= ~(1 << i); -+ for (i = 0; i < NR_SOFTIRQS; i++) { -+ struct task_struct *tsk = sr->runner[i]; - -- tsk = sr->runner[i]; - /* - * The wakeup code in rtmutex.c wakes up the task - * _before_ it sets pi_blocked_on to NULL under diff --git a/debian/patches/features/all/rt/softirq-fix-unplug-deadlock.patch b/debian/patches/features/all/rt/softirq-fix-unplug-deadlock.patch deleted file mode 100644 index 302ee53cc..000000000 --- a/debian/patches/features/all/rt/softirq-fix-unplug-deadlock.patch +++ /dev/null @@ -1,68 +0,0 @@ -Subject: softirq: Fix unplug deadlock -From: Peter Zijlstra -Date: Fri, 30 Sep 2011 15:59:16 +0200 - -Subject: [RT] softirq: Fix unplug deadlock -From: Peter Zijlstra -Date: Fri Sep 30 15:52:14 CEST 2011 - -If ksoftirqd gets woken during hot-unplug, __thread_do_softirq() will -call pin_current_cpu() which will block on the held cpu_hotplug.lock. -Moving the offline check in __thread_do_softirq() before the -pin_current_cpu() call doesn't work, since the wakeup can happen -before we mark the cpu offline. - -So here we have the ksoftirq thread stuck until hotplug finishes, but -then the ksoftirq CPU_DOWN notifier issues kthread_stop() which will -wait for the ksoftirq thread to go away -- while holding the hotplug -lock. - -Sort this by delaying the kthread_stop() until CPU_POST_DEAD, which is -outside of the cpu_hotplug.lock, but still serialized by the -cpu_add_remove_lock. - -Signed-off-by: Peter Zijlstra -Cc: rostedt -Cc: Clark Williams -Link: http://lkml.kernel.org/r/1317391156.12973.3.camel@twins -Signed-off-by: Thomas Gleixner ---- - kernel/softirq.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -1087,9 +1087,8 @@ static int __cpuinit cpu_callback(struct - int hotcpu = (unsigned long)hcpu; - struct task_struct *p; - -- switch (action) { -+ switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: -- case CPU_UP_PREPARE_FROZEN: - p = kthread_create_on_node(run_ksoftirqd, - hcpu, - cpu_to_node(hotcpu), -@@ -1102,19 +1101,16 @@ static int __cpuinit cpu_callback(struct - per_cpu(ksoftirqd, hotcpu) = p; - break; - case CPU_ONLINE: -- case CPU_ONLINE_FROZEN: - wake_up_process(per_cpu(ksoftirqd, hotcpu)); - break; - #ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: -- case CPU_UP_CANCELED_FROZEN: - if (!per_cpu(ksoftirqd, hotcpu)) - break; - /* Unbind so it can run. Fall thru. */ - kthread_bind(per_cpu(ksoftirqd, hotcpu), - cpumask_any(cpu_online_mask)); -- case CPU_DEAD: -- case CPU_DEAD_FROZEN: { -+ case CPU_POST_DEAD: { - static const struct sched_param param = { - .sched_priority = MAX_RT_PRIO-1 - }; diff --git a/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch index 5f0cdf3b7..3481dd7d0 100644 --- a/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch +++ b/debian/patches/features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch @@ -113,11 +113,9 @@ Signed-off-by: Thomas Gleixner init/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/init/main.c -=================================================================== ---- linux-stable.orig/init/main.c -+++ linux-stable/init/main.c -@@ -491,7 +491,6 @@ asmlinkage void __init start_kernel(void +--- a/init/main.c ++++ b/init/main.c +@@ -493,7 +493,6 @@ asmlinkage void __init start_kernel(void * Interrupts are still disabled. Do necessary setups, then * enable them */ @@ -125,7 +123,7 @@ Index: linux-stable/init/main.c tick_init(); boot_cpu_init(); page_address_init(); -@@ -502,6 +501,7 @@ asmlinkage void __init start_kernel(void +@@ -504,6 +503,7 @@ asmlinkage void __init start_kernel(void setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); diff --git a/debian/patches/features/all/rt/softirq-local-lock.patch b/debian/patches/features/all/rt/softirq-local-lock.patch index 640d88c75..12d490eed 100644 --- a/debian/patches/features/all/rt/softirq-local-lock.patch +++ b/debian/patches/features/all/rt/softirq-local-lock.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner kernel/softirq.c | 166 +++++++++++++++++++++++++++++++++++++++++++++- 5 files changed, 191 insertions(+), 5 deletions(-) -Index: linux-stable/include/linux/hardirq.h -=================================================================== ---- linux-stable.orig/include/linux/hardirq.h -+++ linux-stable/include/linux/hardirq.h -@@ -60,7 +60,11 @@ +--- a/include/linux/hardirq.h ++++ b/include/linux/hardirq.h +@@ -61,7 +61,11 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) @@ -28,7 +26,7 @@ Index: linux-stable/include/linux/hardirq.h #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 -@@ -73,10 +77,17 @@ +@@ -74,10 +78,17 @@ #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) @@ -47,7 +45,7 @@ Index: linux-stable/include/linux/hardirq.h /* * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? -@@ -86,7 +97,6 @@ +@@ -87,7 +98,6 @@ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) @@ -55,11 +53,9 @@ Index: linux-stable/include/linux/hardirq.h /* * Are we in NMI context? -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -450,7 +450,13 @@ struct softirq_action +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -447,7 +447,13 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); @@ -73,7 +69,7 @@ Index: linux-stable/include/linux/interrupt.h extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); -@@ -637,6 +643,12 @@ void tasklet_hrtimer_cancel(struct taskl +@@ -634,6 +640,12 @@ void tasklet_hrtimer_cancel(struct taskl tasklet_kill(&ttimer->tasklet); } @@ -86,11 +82,9 @@ Index: linux-stable/include/linux/interrupt.h /* * Autoprobing for irqs: * -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1617,6 +1617,7 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1633,6 +1633,7 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; @@ -98,11 +92,9 @@ Index: linux-stable/include/linux/sched.h #endif }; -Index: linux-stable/init/main.c -=================================================================== ---- linux-stable.orig/init/main.c -+++ linux-stable/init/main.c -@@ -491,6 +491,7 @@ asmlinkage void __init start_kernel(void +--- a/init/main.c ++++ b/init/main.c +@@ -493,6 +493,7 @@ asmlinkage void __init start_kernel(void * Interrupts are still disabled. Do necessary setups, then * enable them */ @@ -110,19 +102,17 @@ Index: linux-stable/init/main.c tick_init(); boot_cpu_init(); page_address_init(); -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -24,6 +24,7 @@ - #include +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -25,6 +25,7 @@ #include + #include #include +#include #define CREATE_TRACE_POINTS #include -@@ -165,6 +166,7 @@ static void handle_pending_softirqs(u32 +@@ -168,6 +169,7 @@ static void handle_pending_softirqs(u32 local_irq_disable(); } @@ -130,7 +120,7 @@ Index: linux-stable/kernel/softirq.c /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -377,6 +379,162 @@ asmlinkage void do_softirq(void) +@@ -360,6 +362,162 @@ asmlinkage void do_softirq(void) #endif @@ -293,7 +283,7 @@ Index: linux-stable/kernel/softirq.c /* * Enter an interrupt context. */ -@@ -390,9 +548,9 @@ void irq_enter(void) +@@ -373,9 +531,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ @@ -305,7 +295,7 @@ Index: linux-stable/kernel/softirq.c } __irq_enter(); -@@ -400,6 +558,7 @@ void irq_enter(void) +@@ -383,6 +541,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { @@ -313,7 +303,7 @@ Index: linux-stable/kernel/softirq.c if (!force_irqthreads) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); -@@ -412,6 +571,9 @@ static inline void invoke_softirq(void) +@@ -395,6 +554,9 @@ static inline void invoke_softirq(void) wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } diff --git a/debian/patches/features/all/rt/softirq-make-fifo.patch b/debian/patches/features/all/rt/softirq-make-fifo.patch index 9286dc5e7..82f323597 100644 --- a/debian/patches/features/all/rt/softirq-make-fifo.patch +++ b/debian/patches/features/all/rt/softirq-make-fifo.patch @@ -4,34 +4,32 @@ Date: Thu, 21 Jul 2011 21:06:43 +0200 Signed-off-by: Thomas Gleixner --- - kernel/softirq.c | 19 +++++++++++++++++++ - 1 file changed, 19 insertions(+) + kernel/softirq.c | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -381,6 +381,8 @@ asmlinkage void do_softirq(void) +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -364,6 +364,8 @@ asmlinkage void do_softirq(void) static inline void local_bh_disable_nort(void) { local_bh_disable(); } static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -+static inline void ksoftirqd_set_sched_params(void) { } -+static inline void ksoftirqd_clr_sched_params(void) { } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { } #else /* !PREEMPT_RT_FULL */ -@@ -535,6 +537,20 @@ static int ksoftirqd_do_softirq(int cpu) +@@ -518,6 +520,20 @@ static int ksoftirqd_do_softirq(int cpu) static inline void local_bh_disable_nort(void) { } static inline void _local_bh_enable_nort(void) { } -+static inline void ksoftirqd_set_sched_params(void) ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); +} + -+static inline void ksoftirqd_clr_sched_params(void) ++static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) +{ + struct sched_param param = { .sched_priority = 0 }; + @@ -41,20 +39,12 @@ Index: linux-stable/kernel/softirq.c #endif /* PREEMPT_RT_FULL */ /* * Enter an interrupt context. -@@ -986,6 +1002,8 @@ void __init softirq_init(void) +@@ -1065,6 +1081,8 @@ static struct notifier_block __cpuinitda - static int run_ksoftirqd(void * __bind_cpu) - { -+ ksoftirqd_set_sched_params(); -+ - set_current_state(TASK_INTERRUPTIBLE); - - while (!kthread_should_stop()) { -@@ -1011,6 +1029,7 @@ static int run_ksoftirqd(void * __bind_c - - wait_to_die: - preempt_enable(); -+ ksoftirqd_clr_sched_params(); - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, ++ .cleanup = ksoftirqd_clr_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", diff --git a/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch index e290a6747..fbd7bed94 100644 --- a/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch +++ b/debian/patches/features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch @@ -10,23 +10,19 @@ Signed-off-by: Thomas Gleixner kernel/softirq.c | 20 +++----------------- 2 files changed, 4 insertions(+), 17 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1845,6 +1845,7 @@ extern void thread_group_times(struct ta - #define PF_MEMALLOC 0x00000800 /* Allocating memory */ - #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ - #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ -+#define PF_IN_SOFTIRQ 0x00004000 /* Task is serving softirq */ - #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ - #define PF_FROZEN 0x00010000 /* frozen for system suspend */ - #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -392,7 +392,6 @@ static inline void ksoftirqd_clr_sched_p +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1856,6 +1856,7 @@ extern void thread_group_cputime_adjuste + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -375,7 +375,6 @@ static void ksoftirqd_clr_sched_params(u * On RT we serialize softirq execution with a cpu local lock */ static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock); @@ -34,7 +30,7 @@ Index: linux-stable/kernel/softirq.c static void __do_softirq_common(int need_rcu_bh_qs); -@@ -447,22 +446,9 @@ void _local_bh_enable(void) +@@ -430,22 +429,9 @@ void _local_bh_enable(void) } EXPORT_SYMBOL(_local_bh_enable); @@ -58,7 +54,7 @@ Index: linux-stable/kernel/softirq.c } EXPORT_SYMBOL(in_serving_softirq); -@@ -480,7 +466,7 @@ static void __do_softirq_common(int need +@@ -463,7 +449,7 @@ static void __do_softirq_common(int need /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -67,7 +63,7 @@ Index: linux-stable/kernel/softirq.c lockdep_softirq_enter(); -@@ -491,7 +477,7 @@ static void __do_softirq_common(int need +@@ -474,7 +460,7 @@ static void __do_softirq_common(int need wakeup_softirqd(); lockdep_softirq_exit(); diff --git a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch index f878e568a..85b8188e1 100644 --- a/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch +++ b/debian/patches/features/all/rt/softirq-preempt-fix-3-re.patch @@ -20,11 +20,9 @@ Cc: stable-rt@vger.kernel.org net/core/dev.c | 6 ++++++ 4 files changed, 15 insertions(+) -Index: linux-stable/block/blk-iopoll.c -=================================================================== ---- linux-stable.orig/block/blk-iopoll.c -+++ linux-stable/block/blk-iopoll.c -@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll +--- a/block/blk-iopoll.c ++++ b/block/blk-iopoll.c +@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); local_irq_restore(flags); @@ -48,10 +46,8 @@ Index: linux-stable/block/blk-iopoll.c } return NOTIFY_OK; -Index: linux-stable/block/blk-softirq.c -=================================================================== ---- linux-stable.orig/block/blk-softirq.c -+++ linux-stable/block/blk-softirq.c +--- a/block/blk-softirq.c ++++ b/block/blk-softirq.c @@ -51,6 +51,7 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); @@ -76,10 +72,8 @@ Index: linux-stable/block/blk-softirq.c } /** -Index: linux-stable/include/linux/preempt.h -=================================================================== ---- linux-stable.orig/include/linux/preempt.h -+++ linux-stable/include/linux/preempt.h +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h @@ -56,8 +56,10 @@ do { \ #ifndef CONFIG_PREEMPT_RT_BASE @@ -99,11 +93,9 @@ Index: linux-stable/include/linux/preempt.h #endif /* CONFIG_PREEMPT_COUNT */ -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -1836,6 +1836,7 @@ static inline void __netif_reschedule(st +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1946,6 +1946,7 @@ static inline void __netif_reschedule(st sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -111,7 +103,7 @@ Index: linux-stable/net/core/dev.c } void __netif_schedule(struct Qdisc *q) -@@ -1857,6 +1858,7 @@ void dev_kfree_skb_irq(struct sk_buff *s +@@ -1967,6 +1968,7 @@ void dev_kfree_skb_irq(struct sk_buff *s sd->completion_queue = skb; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -119,7 +111,7 @@ Index: linux-stable/net/core/dev.c } } EXPORT_SYMBOL(dev_kfree_skb_irq); -@@ -2927,6 +2929,7 @@ enqueue: +@@ -3052,6 +3054,7 @@ enqueue: rps_unlock(sd); local_irq_restore(flags); @@ -127,7 +119,7 @@ Index: linux-stable/net/core/dev.c atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -3790,6 +3793,7 @@ static void net_rps_action_and_irq_enabl +@@ -3932,6 +3935,7 @@ static void net_rps_action_and_irq_enabl } else #endif local_irq_enable(); @@ -135,7 +127,7 @@ Index: linux-stable/net/core/dev.c } static int process_backlog(struct napi_struct *napi, int quota) -@@ -3862,6 +3866,7 @@ void __napi_schedule(struct napi_struct +@@ -4004,6 +4008,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); ____napi_schedule(&__get_cpu_var(softnet_data), n); local_irq_restore(flags); @@ -143,7 +135,7 @@ Index: linux-stable/net/core/dev.c } EXPORT_SYMBOL(__napi_schedule); -@@ -6367,6 +6372,7 @@ static int dev_cpu_callback(struct notif +@@ -6560,6 +6565,7 @@ static int dev_cpu_callback(struct notif raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); diff --git a/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch index ecc72b066..e4b841fa9 100644 --- a/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch +++ b/debian/patches/features/all/rt/softirq-sanitize-softirq-pending.patch @@ -6,15 +6,13 @@ Signed-off-by: Thomas Gleixner --- include/linux/interrupt.h | 2 + - kernel/softirq.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++ - kernel/time/tick-sched.c | 8 ------ - 3 files changed, 64 insertions(+), 7 deletions(-) + kernel/softirq.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ + kernel/time/tick-sched.c | 9 ------ + 3 files changed, 66 insertions(+), 8 deletions(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -458,6 +458,8 @@ extern void __raise_softirq_irqoff(unsig +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -455,6 +455,8 @@ extern void __raise_softirq_irqoff(unsig extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); @@ -23,11 +21,9 @@ Index: linux-stable/include/linux/interrupt.h /* This is the worklist that queues up per-cpu softirq work. * * send_remote_sendirq() adds work to these lists, and -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = { +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -62,6 +62,69 @@ char *softirq_to_name[NR_SOFTIRQS] = { "TASKLET", "SCHED", "HRTIMER", "RCU" }; @@ -46,11 +42,12 @@ Index: linux-stable/kernel/softirq.c +void softirq_check_pending_idle(void) +{ + static int rate_limit; -+ u32 warnpending = 0, pending = local_softirq_pending(); ++ u32 warnpending = 0, pending; + + if (rate_limit >= 10) + return; + ++ pending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; + if (pending) { + struct task_struct *tsk; + @@ -83,7 +80,8 @@ Index: linux-stable/kernel/softirq.c +{ + static int rate_limit; + -+ if (rate_limit < 10) { ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; @@ -95,17 +93,16 @@ Index: linux-stable/kernel/softirq.c /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency -Index: linux-stable/kernel/time/tick-sched.c -=================================================================== ---- linux-stable.orig/kernel/time/tick-sched.c -+++ linux-stable/kernel/time/tick-sched.c -@@ -438,13 +438,7 @@ static bool can_stop_idle_tick(int cpu, +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -481,14 +481,7 @@ static bool can_stop_idle_tick(int cpu, return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - -- if (ratelimit < 10) { +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; diff --git a/debian/patches/features/all/rt/softirq-split-handling-function.patch b/debian/patches/features/all/rt/softirq-split-handling-function.patch index f81c0292d..707aa441d 100644 --- a/debian/patches/features/all/rt/softirq-split-handling-function.patch +++ b/debian/patches/features/all/rt/softirq-split-handling-function.patch @@ -9,11 +9,9 @@ Signed-off-by: Thomas Gleixner kernel/softirq.c | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -139,31 +139,34 @@ static void wakeup_softirqd(void) +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -142,31 +142,34 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } diff --git a/debian/patches/features/all/rt/softirq-split-locks.patch b/debian/patches/features/all/rt/softirq-split-locks.patch index b9f3625ec..39474508d 100644 --- a/debian/patches/features/all/rt/softirq-split-locks.patch +++ b/debian/patches/features/all/rt/softirq-split-locks.patch @@ -25,42 +25,56 @@ threads. Signed-off-by: Thomas Gleixner --- include/linux/sched.h | 1 - kernel/softirq.c | 281 ++++++++++++++++++++++++++++++++------------------ - 2 files changed, 183 insertions(+), 99 deletions(-) + kernel/softirq.c | 305 ++++++++++++++++++++++++++++++-------------------- + 2 files changed, 185 insertions(+), 121 deletions(-) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1619,6 +1619,7 @@ struct task_struct { +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1635,6 +1635,7 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; int softirq_nestcnt; + unsigned int softirqs_raised; #endif - #if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM - int kmap_idx; -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -159,6 +159,7 @@ static void handle_softirq(unsigned int + #ifdef CONFIG_PREEMPT_RT_FULL + # if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -162,6 +162,12 @@ static void handle_softirq(unsigned int rcu_bh_qs(cpu); } +#ifndef CONFIG_PREEMPT_RT_FULL ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs) { unsigned int vec_nr; -@@ -171,7 +172,6 @@ static void handle_pending_softirqs(u32 +@@ -174,7 +180,19 @@ static void handle_pending_softirqs(u32 local_irq_disable(); } -#ifndef CONFIG_PREEMPT_RT_FULL ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); ++ cond_resched(); ++ return; ++ } ++ local_irq_enable(); ++} ++ /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -384,28 +384,117 @@ asmlinkage void do_softirq(void) +@@ -367,6 +385,32 @@ asmlinkage void do_softirq(void) #endif @@ -92,14 +106,8 @@ Index: linux-stable/kernel/softirq.c + static inline void local_bh_disable_nort(void) { local_bh_disable(); } static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } - static inline void ksoftirqd_set_sched_params(void) { } - static inline void ksoftirqd_clr_sched_params(void) { } - -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return local_softirq_pending(); -+} -+ + static void ksoftirqd_set_sched_params(unsigned int cpu) { } +@@ -375,20 +419,78 @@ static void ksoftirqd_clr_sched_params(u #else /* !PREEMPT_RT_FULL */ /* @@ -127,7 +135,8 @@ Index: linux-stable/kernel/softirq.c -void __init softirq_early_init(void) +static void unlock_softirq(int which) -+{ + { +- local_irq_lock_init(local_softirq_lock); + __local_unlock(&__get_cpu_var(local_softirq_locks[which])); +} + @@ -136,7 +145,7 @@ Index: linux-stable/kernel/softirq.c + unsigned long old_flags = current->flags; + + current->flags &= ~PF_MEMALLOC; -+ account_system_vtime(current); ++ vtime_account(current); + current->flags |= PF_IN_SOFTIRQ; + lockdep_softirq_enter(); + local_irq_enable(); @@ -144,7 +153,7 @@ Index: linux-stable/kernel/softirq.c + local_irq_disable(); + lockdep_softirq_exit(); + current->flags &= ~PF_IN_SOFTIRQ; -+ account_system_vtime(current); ++ vtime_account(current); + tsk_restore_flags(current, old_flags, PF_MEMALLOC); +} + @@ -153,8 +162,7 @@ Index: linux-stable/kernel/softirq.c + * in current context (or on behalf of ksoftirqd). + */ +static void do_current_softirqs(int need_rcu_bh_qs) - { -- local_irq_lock_init(local_softirq_lock); ++{ + while (current->softirqs_raised) { + int i = __ffs(current->softirqs_raised); + unsigned int pending, mask = (1U << i); @@ -185,7 +193,7 @@ Index: linux-stable/kernel/softirq.c } void local_bh_disable(void) -@@ -420,17 +509,11 @@ void local_bh_enable(void) +@@ -403,17 +505,11 @@ void local_bh_enable(void) if (WARN_ON(current->softirq_nestcnt == 0)) return; @@ -207,7 +215,7 @@ Index: linux-stable/kernel/softirq.c current->softirq_nestcnt--; migrate_enable(); } -@@ -455,37 +538,8 @@ int in_serving_softirq(void) +@@ -438,86 +534,82 @@ int in_serving_softirq(void) } EXPORT_SYMBOL(in_serving_softirq); @@ -216,11 +224,14 @@ Index: linux-stable/kernel/softirq.c - * be pinned. - */ -static void __do_softirq_common(int need_rcu_bh_qs) --{ ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) + { - u32 pending = local_softirq_pending(); - int cpu = smp_processor_id(); - -- current->softirq_nestcnt++; ++ local_irq_disable(); + current->softirq_nestcnt++; - - /* Reset the pending bitmask before enabling irqs */ - set_softirq_pending(0); @@ -238,82 +249,43 @@ Index: linux-stable/kernel/softirq.c - lockdep_softirq_exit(); - current->flags &= ~PF_IN_SOFTIRQ; - -- current->softirq_nestcnt--; --} -- --static int __thread_do_softirq(int cpu) -+/* Called with preemption disabled */ -+static int ksoftirqd_do_softirq(int cpu) - { - /* - * Prevent the current cpu from going offline. -@@ -496,45 +550,90 @@ static int __thread_do_softirq(int cpu) - */ - pin_current_cpu(); - /* -- * If called from ksoftirqd (cpu >= 0) we need to check -- * whether we are on the wrong cpu due to cpu offlining. If -- * called via thread_do_softirq() no action required. -+ * We need to check whether we are on the wrong cpu due to cpu -+ * offlining. - */ -- if (cpu >= 0 && cpu_is_offline(cpu)) { -+ if (cpu_is_offline(cpu)) { - unpin_current_cpu(); - return -1; - } - preempt_enable(); -- local_lock(local_softirq_lock); - local_irq_disable(); -- /* -- * We cannot switch stacks on RT as we want to be able to -- * schedule! -- */ -- if (local_softirq_pending()) -- __do_softirq_common(cpu >= 0); -- local_unlock(local_softirq_lock); -- unpin_current_cpu(); -- preempt_disable(); -+ current->softirq_nestcnt++; + do_current_softirqs(1); -+ current->softirq_nestcnt--; - local_irq_enable(); -+ -+ preempt_disable(); -+ unpin_current_cpu(); - return 0; + current->softirq_nestcnt--; ++ rcu_note_context_switch(cpu); ++ local_irq_enable(); } - /* -- * Called from netif_rx_ni(). Preemption enabled. +-static int __thread_do_softirq(int cpu) ++/* + * Called from netif_rx_ni(). Preemption enabled, but migration + * disabled. So the cpu can't go away under us. - */ - void thread_do_softirq(void) - { -- if (!in_serving_softirq()) { -- preempt_disable(); -- __thread_do_softirq(-1); -- preempt_enable(); ++ */ ++void thread_do_softirq(void) ++{ + if (!in_serving_softirq() && current->softirqs_raised) { + current->softirq_nestcnt++; + do_current_softirqs(0); + current->softirq_nestcnt--; - } - } - --static int ksoftirqd_do_softirq(int cpu) ++ } ++} ++ +void __raise_softirq_irqoff(unsigned int nr) -+{ + { + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); + -+ /* + /* +- * Prevent the current cpu from going offline. +- * pin_current_cpu() can reenable preemption and block on the +- * hotplug mutex. When it returns, the current cpu is +- * pinned. It might be the wrong one, but the offline check +- * below catches that. + * If we are not in a hard interrupt and inside a bh disabled + * region, we simply raise the flag on current. local_bh_enable() + * will make sure that the softirq is executed. Otherwise we + * delegate it to ksoftirqd. -+ */ + */ +- pin_current_cpu(); + if (!in_irq() && current->softirq_nestcnt) + current->softirqs_raised |= (1U << nr); + else if (__this_cpu_read(ksoftirqd)) @@ -327,14 +299,26 @@ Index: linux-stable/kernel/softirq.c +{ + __raise_softirq_irqoff(nr); + -+ /* + /* +- * If called from ksoftirqd (cpu >= 0) we need to check +- * whether we are on the wrong cpu due to cpu offlining. If +- * called via thread_do_softirq() no action required. + * If we're in an hard interrupt we let irq return code deal + * with the wakeup of ksoftirqd. -+ */ + */ +- if (cpu >= 0 && cpu_is_offline(cpu)) { +- unpin_current_cpu(); +- return -1; +- } +- preempt_enable(); +- local_lock(local_softirq_lock); +- local_irq_disable(); + if (in_irq()) + return; + -+ /* + /* +- * We cannot switch stacks on RT as we want to be able to +- * schedule! + * If we are in thread context but outside of a bh disabled + * region, we need to wake ksoftirqd as well. + * @@ -342,16 +326,33 @@ Index: linux-stable/kernel/softirq.c + * into local_bh_disable/enable pairs. Though it's unclear + * whether this is worth the effort. To find those places just + * raise a WARN() if the condition is met. -+ */ + */ +- if (local_softirq_pending()) +- __do_softirq_common(cpu >= 0); +- local_unlock(local_softirq_lock); +- unpin_current_cpu(); +- preempt_disable(); +- local_irq_enable(); +- return 0; + if (!current->softirq_nestcnt) + wakeup_softirqd(); -+} -+ + } + +-/* +- * Called from netif_rx_ni(). Preemption enabled. +- */ +-void thread_do_softirq(void) +void do_raise_softirq_irqoff(unsigned int nr) -+{ + { +- if (!in_serving_softirq()) { +- preempt_disable(); +- __thread_do_softirq(-1); +- preempt_enable(); +- } + raise_softirq_irqoff(nr); -+} -+ + } + +-static int ksoftirqd_do_softirq(int cpu) +static inline int ksoftirqd_softirq_pending(void) { - return __thread_do_softirq(cpu); @@ -359,7 +360,7 @@ Index: linux-stable/kernel/softirq.c } static inline void local_bh_disable_nort(void) { } -@@ -545,6 +644,10 @@ static inline void ksoftirqd_set_sched_p +@@ -528,6 +620,10 @@ static inline void ksoftirqd_set_sched_p struct sched_param param = { .sched_priority = 1 }; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -369,8 +370,8 @@ Index: linux-stable/kernel/softirq.c + local_irq_enable(); } - static inline void ksoftirqd_clr_sched_params(void) -@@ -591,8 +694,14 @@ static inline void invoke_softirq(void) + static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) +@@ -574,8 +670,14 @@ static inline void invoke_softirq(void) wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } @@ -386,7 +387,7 @@ Index: linux-stable/kernel/softirq.c #endif } -@@ -616,26 +725,6 @@ void irq_exit(void) +@@ -599,26 +701,6 @@ void irq_exit(void) sched_preempt_enable_no_resched(); } @@ -413,7 +414,7 @@ Index: linux-stable/kernel/softirq.c void raise_softirq(unsigned int nr) { unsigned long flags; -@@ -645,12 +734,6 @@ void raise_softirq(unsigned int nr) +@@ -628,12 +710,6 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } @@ -426,18 +427,25 @@ Index: linux-stable/kernel/softirq.c void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; -@@ -1102,12 +1185,12 @@ static int run_ksoftirqd(void * __bind_c +@@ -1079,20 +1155,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait); - while (!kthread_should_stop()) { - preempt_disable(); -- if (!local_softirq_pending()) -+ if (!ksoftirqd_softirq_pending()) - schedule_preempt_disabled(); + static int ksoftirqd_should_run(unsigned int cpu) + { +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) +-{ +- local_irq_disable(); +- if (local_softirq_pending()) { +- __do_softirq(); +- rcu_note_context_switch(cpu); +- local_irq_enable(); +- cond_resched(); +- return; +- } +- local_irq_enable(); ++ return ksoftirqd_softirq_pending(); + } - __set_current_state(TASK_RUNNING); - -- while (local_softirq_pending()) { -+ while (ksoftirqd_softirq_pending()) { - if (ksoftirqd_do_softirq((long) __bind_cpu)) - goto wait_to_die; - sched_preempt_enable_no_resched(); + #ifdef CONFIG_HOTPLUG_CPU diff --git a/debian/patches/features/all/rt/softirq-split-out-code.patch b/debian/patches/features/all/rt/softirq-split-out-code.patch index 4e5e405a5..cefcab275 100644 --- a/debian/patches/features/all/rt/softirq-split-out-code.patch +++ b/debian/patches/features/all/rt/softirq-split-out-code.patch @@ -4,14 +4,12 @@ Date: Tue, 28 Jun 2011 15:46:49 +0200 Signed-off-by: Thomas Gleixner --- - kernel/softirq.c | 94 ++++++++++++++++++++++++++++++------------------------- - 1 file changed, 52 insertions(+), 42 deletions(-) + kernel/softirq.c | 62 ++++++++++++++++++++++++++----------------------------- + 1 file changed, 30 insertions(+), 32 deletions(-) -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c -@@ -76,6 +76,34 @@ static void wakeup_softirqd(void) +--- a/kernel/softirq.c ++++ b/kernel/softirq.c +@@ -77,6 +77,34 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } @@ -46,7 +44,7 @@ Index: linux-stable/kernel/softirq.c /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -206,7 +234,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); +@@ -207,7 +235,6 @@ EXPORT_SYMBOL(local_bh_enable_ip); asmlinkage void __do_softirq(void) { @@ -54,8 +52,8 @@ Index: linux-stable/kernel/softirq.c __u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; -@@ -223,7 +250,7 @@ asmlinkage void __do_softirq(void) - account_system_vtime(current); +@@ -224,7 +251,7 @@ asmlinkage void __do_softirq(void) + vtime_account_irq_enter(current); __local_bh_disable((unsigned long)__builtin_return_address(0), - SOFTIRQ_OFFSET); @@ -63,7 +61,7 @@ Index: linux-stable/kernel/softirq.c lockdep_softirq_enter(); cpu = smp_processor_id(); -@@ -231,36 +258,7 @@ restart: +@@ -232,36 +259,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -101,55 +99,3 @@ Index: linux-stable/kernel/softirq.c pending = local_softirq_pending(); if (pending && --max_restart) -@@ -276,6 +274,26 @@ restart: - tsk_restore_flags(current, old_flags, PF_MEMALLOC); - } - -+/* -+ * Called with preemption disabled from run_ksoftirqd() -+ */ -+static int ksoftirqd_do_softirq(int cpu) -+{ -+ /* -+ * Preempt disable stops cpu going offline. -+ * If already offline, we'll be on wrong CPU: -+ * don't process. -+ */ -+ if (cpu_is_offline(cpu)) -+ return -1; -+ -+ local_irq_disable(); -+ if (local_softirq_pending()) -+ __do_softirq(); -+ local_irq_enable(); -+ return 0; -+} -+ - #ifndef __ARCH_HAS_DO_SOFTIRQ - - asmlinkage void do_softirq(void) -@@ -748,22 +766,14 @@ static int run_ksoftirqd(void * __bind_c - - while (!kthread_should_stop()) { - preempt_disable(); -- if (!local_softirq_pending()) { -+ if (!local_softirq_pending()) - schedule_preempt_disabled(); -- } - - __set_current_state(TASK_RUNNING); - - while (local_softirq_pending()) { -- /* Preempt disable stops cpu going offline. -- If already offline, we'll be on wrong CPU: -- don't process */ -- if (cpu_is_offline((long)__bind_cpu)) -+ if (ksoftirqd_do_softirq((long) __bind_cpu)) - goto wait_to_die; -- local_irq_disable(); -- if (local_softirq_pending()) -- __do_softirq(); -- local_irq_enable(); - sched_preempt_enable_no_resched(); - cond_resched(); - preempt_disable(); diff --git a/debian/patches/features/all/rt/softirq-thread-do-softirq.patch b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch index 7746fe253..8db1db6ac 100644 --- a/debian/patches/features/all/rt/softirq-thread-do-softirq.patch +++ b/debian/patches/features/all/rt/softirq-thread-do-softirq.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -446,6 +446,7 @@ struct softirq_action +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -443,6 +443,7 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); @@ -20,11 +18,9 @@ Index: linux-stable/include/linux/interrupt.h extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -2993,7 +2993,7 @@ int netif_rx_ni(struct sk_buff *skb) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3118,7 +3118,7 @@ int netif_rx_ni(struct sk_buff *skb) preempt_disable(); err = netif_rx(skb); if (local_softirq_pending()) diff --git a/debian/patches/features/all/rt/spi-omap-mcspi-check-condition-also-after-timeout.patch b/debian/patches/features/all/rt/spi-omap-mcspi-check-condition-also-after-timeout.patch new file mode 100644 index 000000000..136be4a7a --- /dev/null +++ b/debian/patches/features/all/rt/spi-omap-mcspi-check-condition-also-after-timeout.patch @@ -0,0 +1,33 @@ +From 65ef175b74710f70b6d89794c261e017f6f5d9ec Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 21 Mar 2013 12:46:49 +0100 +Subject: [PATCH 3/3] spi/omap-mcspi: check condition also after timeout + +It is possible that the handler gets interrupted after checking the +status. After it resumes it the time out is due but the condition it was +waiting for might be true. Therefore it is necessary to check the +condition in case of an time out to be sure that the condition is not +true _and_ the time passed by. + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/spi/spi-omap2-mcspi.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/spi/spi-omap2-mcspi.c ++++ b/drivers/spi/spi-omap2-mcspi.c +@@ -285,8 +285,12 @@ static int mcspi_wait_for_reg_bit(void _ + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(__raw_readl(reg) & bit)) { +- if (time_after(jiffies, timeout)) +- return -1; ++ if (time_after(jiffies, timeout)) { ++ if (!(__raw_readl(reg) & bit)) ++ return -ETIMEDOUT; ++ else ++ return 0; ++ } + cpu_relax(); + } + return 0; diff --git a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch index dff05b60b..66117736f 100644 --- a/debian/patches/features/all/rt/spinlock-types-separate-raw.patch +++ b/debian/patches/features/all/rt/spinlock-types-separate-raw.patch @@ -10,10 +10,8 @@ Signed-off-by: Thomas Gleixner include/linux/spinlock_types_raw.h | 56 +++++++++++++++++++++++++++ 4 files changed, 95 insertions(+), 72 deletions(-) -Index: linux-stable/include/linux/rwlock_types.h -=================================================================== ---- linux-stable.orig/include/linux/rwlock_types.h -+++ linux-stable/include/linux/rwlock_types.h +--- a/include/linux/rwlock_types.h ++++ b/include/linux/rwlock_types.h @@ -1,6 +1,10 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H @@ -25,10 +23,8 @@ Index: linux-stable/include/linux/rwlock_types.h /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers -Index: linux-stable/include/linux/spinlock_types.h -=================================================================== ---- linux-stable.orig/include/linux/spinlock_types.h -+++ linux-stable/include/linux/spinlock_types.h +--- a/include/linux/spinlock_types.h ++++ b/include/linux/spinlock_types.h @@ -9,79 +9,9 @@ * Released under the General Public License (GPL). */ @@ -111,10 +107,8 @@ Index: linux-stable/include/linux/spinlock_types.h #include -Index: linux-stable/include/linux/spinlock_types_nort.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/spinlock_types_nort.h ++++ b/include/linux/spinlock_types_nort.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H +#define __LINUX_SPINLOCK_TYPES_NORT_H @@ -149,10 +143,8 @@ Index: linux-stable/include/linux/spinlock_types_nort.h +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#endif -Index: linux-stable/include/linux/spinlock_types_raw.h -=================================================================== --- /dev/null -+++ linux-stable/include/linux/spinlock_types_raw.h ++++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H diff --git a/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch b/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch index 74388ddc9..eaa9d4c9d 100644 --- a/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch +++ b/debian/patches/features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch @@ -13,10 +13,8 @@ Cc: stable-rt@vger.kernel.org kernel/stop_machine.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) -Index: linux-stable/kernel/stop_machine.c -=================================================================== ---- linux-stable.orig/kernel/stop_machine.c -+++ linux-stable/kernel/stop_machine.c +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c @@ -158,7 +158,7 @@ static DEFINE_PER_CPU(struct cpu_stop_wo static void queue_stop_cpus_work(const struct cpumask *cpumask, diff --git a/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch b/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch index c933698ed..5379d29d5 100644 --- a/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch +++ b/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/stop_machine.c | 1 + 2 files changed, 2 insertions(+) -Index: linux-stable/include/linux/sched.h -=================================================================== ---- linux-stable.orig/include/linux/sched.h -+++ linux-stable/include/linux/sched.h -@@ -1836,6 +1836,7 @@ extern void thread_group_times(struct ta +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1859,6 +1859,7 @@ extern void thread_group_cputime_adjuste #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ #define PF_KSWAPD 0x00040000 /* I am kswapd */ @@ -20,10 +18,8 @@ Index: linux-stable/include/linux/sched.h #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -Index: linux-stable/kernel/stop_machine.c -=================================================================== ---- linux-stable.orig/kernel/stop_machine.c -+++ linux-stable/kernel/stop_machine.c +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c @@ -327,6 +327,7 @@ static int __cpuinit cpu_stop_cpu_callba if (IS_ERR(p)) return notifier_from_errno(PTR_ERR(p)); diff --git a/debian/patches/features/all/rt/stomp-machine-raw-lock.patch b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch index 51898f6ab..e11a9a1df 100644 --- a/debian/patches/features/all/rt/stomp-machine-raw-lock.patch +++ b/debian/patches/features/all/rt/stomp-machine-raw-lock.patch @@ -7,10 +7,8 @@ Signed-off-by: Thomas Gleixner kernel/stop_machine.c | 58 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 17 deletions(-) -Index: linux-stable/kernel/stop_machine.c -=================================================================== ---- linux-stable.orig/kernel/stop_machine.c -+++ linux-stable/kernel/stop_machine.c +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c @@ -29,12 +29,12 @@ struct cpu_stop_done { atomic_t nr_todo; /* nr left to execute */ bool executed; /* actually executed? */ @@ -35,7 +33,7 @@ Index: linux-stable/kernel/stop_machine.c } /* signal completion unless @done is NULL */ -@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct +@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct if (done) { if (executed) done->executed = true; diff --git a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch index f5f6c19df..f196a8195 100644 --- a/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch +++ b/debian/patches/features/all/rt/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch @@ -13,10 +13,8 @@ Signed-off-by: Thomas Gleixner kernel/stop_machine.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) -Index: linux-stable/kernel/stop_machine.c -=================================================================== ---- linux-stable.orig/kernel/stop_machine.c -+++ linux-stable/kernel/stop_machine.c +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c @@ -135,6 +135,7 @@ void stop_one_cpu_nowait(unsigned int cp /* static data for stop_cpus */ diff --git a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch index 3ae719228..feaefdbb2 100644 --- a/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch +++ b/debian/patches/features/all/rt/suspend-prevernt-might-sleep-splats.patch @@ -23,11 +23,9 @@ Signed-off-by: Thomas Gleixner kernel/power/suspend.c | 4 ++++ 3 files changed, 12 insertions(+) -Index: linux-stable/include/linux/kernel.h -=================================================================== ---- linux-stable.orig/include/linux/kernel.h -+++ linux-stable/include/linux/kernel.h -@@ -385,6 +385,7 @@ extern enum system_states { +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -412,6 +412,7 @@ extern enum system_states { SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, @@ -35,10 +33,8 @@ Index: linux-stable/include/linux/kernel.h } system_state; #define TAINT_PROPRIETARY_MODULE 0 -Index: linux-stable/kernel/power/hibernate.c -=================================================================== ---- linux-stable.orig/kernel/power/hibernate.c -+++ linux-stable/kernel/power/hibernate.c +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c @@ -275,6 +275,8 @@ static int create_image(int platform_mod local_irq_disable(); @@ -88,10 +84,8 @@ Index: linux-stable/kernel/power/hibernate.c local_irq_enable(); enable_nonboot_cpus(); -Index: linux-stable/kernel/power/suspend.c -=================================================================== ---- linux-stable.orig/kernel/power/suspend.c -+++ linux-stable/kernel/power/suspend.c +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c @@ -165,6 +165,8 @@ static int suspend_enter(suspend_state_t arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); diff --git a/debian/patches/features/all/rt/sysctl-include-atomic-h.patch b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch index 426a6d2f6..2e51098ab 100644 --- a/debian/patches/features/all/rt/sysctl-include-atomic-h.patch +++ b/debian/patches/features/all/rt/sysctl-include-atomic-h.patch @@ -7,15 +7,13 @@ Signed-off-by: Thomas Gleixner include/linux/sysctl.h | 1 + 1 file changed, 1 insertion(+) -Index: linux-stable/include/linux/sysctl.h -=================================================================== ---- linux-stable.orig/include/linux/sysctl.h -+++ linux-stable/include/linux/sysctl.h -@@ -933,6 +933,7 @@ enum +--- a/include/linux/sysctl.h ++++ b/include/linux/sysctl.h +@@ -25,6 +25,7 @@ #include #include #include +#include + #include /* For the /proc/sys support */ - struct ctl_table; diff --git a/debian/patches/features/all/rt/sysfs-realtime-entry.patch b/debian/patches/features/all/rt/sysfs-realtime-entry.patch index 5645db2d3..4889c6b82 100644 --- a/debian/patches/features/all/rt/sysfs-realtime-entry.patch +++ b/debian/patches/features/all/rt/sysfs-realtime-entry.patch @@ -17,11 +17,9 @@ Signed-off-by: Peter Zijlstra kernel/ksysfs.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) -Index: linux-stable/kernel/ksysfs.c -=================================================================== ---- linux-stable.orig/kernel/ksysfs.c -+++ linux-stable/kernel/ksysfs.c -@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo); +--- a/kernel/ksysfs.c ++++ b/kernel/ksysfs.c +@@ -132,6 +132,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC */ @@ -37,10 +35,10 @@ Index: linux-stable/kernel/ksysfs.c /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) -@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[] - &kexec_crash_size_attr.attr, +@@ -197,6 +206,9 @@ static struct attribute * kernel_attrs[] &vmcoreinfo_attr.attr, #endif + &rcu_expedited_attr.attr, +#ifdef CONFIG_PREEMPT_RT_FULL + &realtime_attr.attr, +#endif diff --git a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch index 20d98fe25..f0bbd80bc 100644 --- a/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +++ b/debian/patches/features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -41,11 +41,9 @@ Signed-off-by: Thomas Gleixner kernel/softirq.c | 208 +++++++++++++++++++++++++++++++++------------- 2 files changed, 170 insertions(+), 77 deletions(-) -Index: linux-stable/include/linux/interrupt.h -=================================================================== ---- linux-stable.orig/include/linux/interrupt.h -+++ linux-stable/include/linux/interrupt.h -@@ -505,8 +505,9 @@ extern void __send_remote_softirq(struct +--- a/include/linux/interrupt.h ++++ b/include/linux/interrupt.h +@@ -502,8 +502,9 @@ extern void __send_remote_softirq(struct to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. @@ -57,7 +55,7 @@ Index: linux-stable/include/linux/interrupt.h * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. -@@ -531,27 +532,36 @@ struct tasklet_struct name = { NULL, 0, +@@ -528,27 +529,36 @@ struct tasklet_struct name = { NULL, 0, enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ @@ -100,7 +98,7 @@ Index: linux-stable/include/linux/interrupt.h #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif -@@ -600,17 +610,8 @@ static inline void tasklet_disable(struc +@@ -597,17 +607,8 @@ static inline void tasklet_disable(struc smp_mb(); } @@ -120,10 +118,8 @@ Index: linux-stable/include/linux/interrupt.h extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); -Index: linux-stable/kernel/softirq.c -=================================================================== ---- linux-stable.orig/kernel/softirq.c -+++ linux-stable/kernel/softirq.c +--- a/kernel/softirq.c ++++ b/kernel/softirq.c @@ -21,6 +21,7 @@ #include #include @@ -131,8 +127,8 @@ Index: linux-stable/kernel/softirq.c +#include #include #include - #include -@@ -665,15 +666,45 @@ struct tasklet_head + #include +@@ -648,15 +649,45 @@ struct tasklet_head static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); @@ -182,7 +178,7 @@ Index: linux-stable/kernel/softirq.c local_irq_restore(flags); } -@@ -684,10 +715,7 @@ void __tasklet_hi_schedule(struct taskle +@@ -667,10 +698,7 @@ void __tasklet_hi_schedule(struct taskle unsigned long flags; local_irq_save(flags); @@ -194,7 +190,7 @@ Index: linux-stable/kernel/softirq.c local_irq_restore(flags); } -@@ -695,50 +723,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); +@@ -678,50 +706,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) { @@ -340,7 +336,7 @@ Index: linux-stable/kernel/softirq.c static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; -@@ -749,29 +846,7 @@ static void tasklet_hi_action(struct sof +@@ -732,29 +829,7 @@ static void tasklet_hi_action(struct sof __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); local_irq_enable(); @@ -371,7 +367,7 @@ Index: linux-stable/kernel/softirq.c } -@@ -794,7 +869,7 @@ void tasklet_kill(struct tasklet_struct +@@ -777,7 +852,7 @@ void tasklet_kill(struct tasklet_struct while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { @@ -380,7 +376,7 @@ Index: linux-stable/kernel/softirq.c } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); -@@ -1000,6 +1075,23 @@ void __init softirq_init(void) +@@ -983,6 +1058,23 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } @@ -401,6 +397,6 @@ Index: linux-stable/kernel/softirq.c +EXPORT_SYMBOL(tasklet_unlock_wait); +#endif + - static int run_ksoftirqd(void * __bind_cpu) + static int ksoftirqd_should_run(unsigned int cpu) { - ksoftirqd_set_sched_params(); + return local_softirq_pending(); diff --git a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch index 0516c09ac..48c0e4e2c 100644 --- a/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch +++ b/debian/patches/features/all/rt/tasklist-lock-fix-section-conflict.patch @@ -33,10 +33,8 @@ Signed-off-by: Thomas Gleixner kernel/fork.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) -Index: linux-stable/include/linux/rwlock_types.h -=================================================================== ---- linux-stable.orig/include/linux/rwlock_types.h -+++ linux-stable/include/linux/rwlock_types.h +--- a/include/linux/rwlock_types.h ++++ b/include/linux/rwlock_types.h @@ -47,6 +47,7 @@ typedef struct { RW_DEP_MAP_INIT(lockname) } #endif @@ -46,10 +44,8 @@ Index: linux-stable/include/linux/rwlock_types.h + rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) #endif /* __LINUX_RWLOCK_TYPES_H */ -Index: linux-stable/kernel/fork.c -=================================================================== ---- linux-stable.orig/kernel/fork.c -+++ linux-stable/kernel/fork.c +--- a/kernel/fork.c ++++ b/kernel/fork.c @@ -93,7 +93,7 @@ int max_threads; /* tunable limit on nr DEFINE_PER_CPU(unsigned long, process_counts) = 0; diff --git a/debian/patches/features/all/rt/timekeeping-delay-clock-cycle-last-update.patch b/debian/patches/features/all/rt/timekeeping-delay-clock-cycle-last-update.patch new file mode 100644 index 000000000..249e3917c --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-delay-clock-cycle-last-update.patch @@ -0,0 +1,33 @@ +Subject: timekeeping: Delay update of clock->cycle_last +From: Thomas Gleixner +Date: Sat, 16 Feb 2013 00:06:18 +0100 + +For calculating the new timekeeper values store the new cycle_last +value in the timekeeper and update the clock->cycle_last just when we +actually update the new values. + +Signed-off-by: Thomas Gleixner +--- + kernel/time/timekeeping.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1095,7 +1095,7 @@ static cycle_t logarithmic_accumulation( + + /* Accumulate one shifted interval */ + offset -= interval; +- tk->clock->cycle_last += interval; ++ tk->cycle_last += interval; + + tk->xtime_nsec += tk->xtime_interval << shift; + accumulate_nsecs_to_secs(tk); +@@ -1210,6 +1210,8 @@ static void update_wall_time(void) + */ + accumulate_nsecs_to_secs(tk); + ++ /* Update clock->cycle_last with the new value */ ++ clock->cycle_last = tk->cycle_last; + timekeeping_update(tk, false); + + out: diff --git a/debian/patches/features/all/rt/timekeeping-do-not-calc-crap-over-and-over.patch b/debian/patches/features/all/rt/timekeeping-do-not-calc-crap-over-and-over.patch new file mode 100644 index 000000000..e73e640b5 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-do-not-calc-crap-over-and-over.patch @@ -0,0 +1,34 @@ +Subject: timekeeping: Calc stuff once +From: Thomas Gleixner +Date: Sat, 16 Feb 2013 00:12:36 +0100 + +Calculate the cycle interval shifted value once. No functional change, +just makes the code more readable. + +Signed-off-by: Thomas Gleixner +--- + kernel/time/timekeeping.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1077,15 +1077,16 @@ static inline void accumulate_nsecs_to_s + static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, + u32 shift) + { ++ cycle_t interval = tk->cycle_interval << shift; + u64 raw_nsecs; + + /* If the offset is smaller then a shifted interval, do nothing */ +- if (offset < tk->cycle_interval<cycle_interval << shift; +- tk->clock->cycle_last += tk->cycle_interval << shift; ++ offset -= interval; ++ tk->clock->cycle_last += interval; + + tk->xtime_nsec += tk->xtime_interval << shift; + accumulate_nsecs_to_secs(tk); diff --git a/debian/patches/features/all/rt/timekeeping-implement-shadow-timekeeper.patch b/debian/patches/features/all/rt/timekeeping-implement-shadow-timekeeper.patch new file mode 100644 index 000000000..242aa2200 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-implement-shadow-timekeeper.patch @@ -0,0 +1,156 @@ +Subject: timekeeping: Implement a shadow timekeeper +From: Thomas Gleixner +Date: Fri, 15 Feb 2013 15:47:13 +0100 + +Use the shadow timekeeper to do the update_wall_time() adjustments and +then copy it over to the real timekeeper. + +Keep the shadow timekeeper in sync when updating stuff outside of +update_wall_time(). + +This allows us to limit the timekeeper_seq hold time to the update of +the real timekeeper and the vsyscall data in the next patch. + +Signed-off-by: Thomas Gleixner +--- + kernel/time/timekeeping.c | 41 +++++++++++++++++++++++++++++------------ + 1 file changed, 29 insertions(+), 12 deletions(-) + +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -28,6 +28,7 @@ + static struct timekeeper timekeeper; + static DEFINE_RAW_SPINLOCK(timekeeper_lock); + static seqcount_t timekeeper_seq; ++static struct timekeeper shadow_timekeeper; + + /* flag for if timekeeping is suspended */ + int __read_mostly timekeeping_suspended; +@@ -221,7 +222,7 @@ int pvclock_gtod_unregister_notifier(str + EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); + + /* must hold timekeeper_lock */ +-static void timekeeping_update(struct timekeeper *tk, bool clearntp) ++static void timekeeping_update(struct timekeeper *tk, bool clearntp, bool mirror) + { + if (clearntp) { + tk->ntp_error = 0; +@@ -229,6 +230,9 @@ static void timekeeping_update(struct ti + } + update_vsyscall(tk); + update_pvclock_gtod(tk); ++ ++ if (mirror) ++ memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); + } + + /** +@@ -422,7 +426,7 @@ int do_settimeofday(const struct timespe + + tk_set_xtime(tk, tv); + +- timekeeping_update(tk, true); ++ timekeeping_update(tk, true, true); + + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +@@ -466,7 +470,7 @@ int timekeeping_inject_offset(struct tim + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); + + error: /* even if we error out, we forwarded the time, so call update */ +- timekeeping_update(tk, true); ++ timekeeping_update(tk, true, true); + + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +@@ -501,7 +505,7 @@ static int change_clocksource(void *data + if (old->disable) + old->disable(old); + } +- timekeeping_update(tk, true); ++ timekeeping_update(tk, true, true); + + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +@@ -680,6 +684,8 @@ void __init timekeeping_init(void) + tmp.tv_nsec = 0; + tk_set_sleep_time(tk, tmp); + ++ memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); ++ + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + } +@@ -735,7 +741,7 @@ void timekeeping_inject_sleeptime(struct + + __timekeeping_inject_sleeptime(tk, delta); + +- timekeeping_update(tk, true); ++ timekeeping_update(tk, true, true); + + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +@@ -773,7 +779,7 @@ static void timekeeping_resume(void) + tk->clock->cycle_last = tk->clock->read(tk->clock); + tk->ntp_error = 0; + timekeeping_suspended = 0; +- timekeeping_update(tk, false); ++ timekeeping_update(tk, false, true); + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + +@@ -1152,7 +1158,8 @@ static inline void old_vsyscall_fixup(st + static void update_wall_time(void) + { + struct clocksource *clock; +- struct timekeeper *tk = &timekeeper; ++ struct timekeeper *real_tk = &timekeeper; ++ struct timekeeper *tk = &shadow_timekeeper; + cycle_t offset; + int shift = 0, maxshift; + unsigned long flags; +@@ -1164,16 +1171,16 @@ static void update_wall_time(void) + if (unlikely(timekeeping_suspended)) + goto out; + +- clock = tk->clock; ++ clock = real_tk->clock; + + #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +- offset = tk->cycle_interval; ++ offset = real_tk->cycle_interval; + #else + offset = (clock->read(clock) - clock->cycle_last) & clock->mask; + #endif + + /* Check if there's really nothing to do */ +- if (offset < tk->cycle_interval) ++ if (offset < real_tk->cycle_interval) + goto out; + + /* +@@ -1212,12 +1219,22 @@ static void update_wall_time(void) + + /* Update clock->cycle_last with the new value */ + clock->cycle_last = tk->cycle_last; +- timekeeping_update(tk, false); ++ /* ++ * Update the real timekeeper. ++ * ++ * We could avoid this memcpy by switching pointers, but that ++ * requires changes to all other timekeeper usage sites as ++ * well, i.e. move the timekeeper pointer getter into the ++ * spinlocked/seqcount protected sections. And we trade this ++ * memcpy under the timekeeper_seq against one before we start ++ * updating. ++ */ ++ memcpy(real_tk, tk, sizeof(*tk)); ++ timekeeping_update(real_tk, false, false); + + out: + write_seqcount_end(&timekeeper_seq); + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +- + } + + /** diff --git a/debian/patches/features/all/rt/timekeeping-make-jiffies-lock-internal.patch b/debian/patches/features/all/rt/timekeeping-make-jiffies-lock-internal.patch new file mode 100644 index 000000000..77d6e7663 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-make-jiffies-lock-internal.patch @@ -0,0 +1,44 @@ +Subject: timekeeping: Make jiffies_lock internal +From: Thomas Gleixner +Date: Thu, 14 Feb 2013 22:38:07 +0100 + +Nothing outside of the timekeeping core needs that lock. + +Signed-off-by: Thomas Gleixner +--- + include/linux/jiffies.h | 1 - + kernel/time/tick-internal.h | 2 ++ + kernel/time/timekeeping.c | 1 + + 3 files changed, 3 insertions(+), 1 deletion(-) + +--- a/include/linux/jiffies.h ++++ b/include/linux/jiffies.h +@@ -75,7 +75,6 @@ extern int register_refined_jiffies(long + */ + extern u64 __jiffy_data jiffies_64; + extern unsigned long volatile __jiffy_data jiffies; +-extern seqlock_t jiffies_lock; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void); +--- a/kernel/time/tick-internal.h ++++ b/kernel/time/tick-internal.h +@@ -4,6 +4,8 @@ + #include + #include + ++extern seqlock_t jiffies_lock; ++ + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD + + #define TICK_DO_TIMER_NONE -1 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -23,6 +23,7 @@ + #include + #include + ++#include "tick-internal.h" + + static struct timekeeper timekeeper; + diff --git a/debian/patches/features/all/rt/timekeeping-move-lock-out-of-timekeeper.patch b/debian/patches/features/all/rt/timekeeping-move-lock-out-of-timekeeper.patch new file mode 100644 index 000000000..685b899ca --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-move-lock-out-of-timekeeper.patch @@ -0,0 +1,402 @@ +Subject: timekeeping: Move lock out of timekeeper struct +From: Thomas Gleixner +Date: Fri, 15 Feb 2013 15:05:48 +0100 + +Make the lock a separate entity. Preparatory patch for shadow +timekeeper structure. + +Signed-off-by: Thomas Gleixner +--- + include/linux/timekeeper_internal.h | 2 + kernel/time/timekeeping.c | 96 +++++++++++++++++------------------- + 2 files changed, 47 insertions(+), 51 deletions(-) + +--- a/include/linux/timekeeper_internal.h ++++ b/include/linux/timekeeper_internal.h +@@ -62,8 +62,6 @@ struct timekeeper { + ktime_t offs_boot; + /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ + struct timespec raw_time; +- /* Seqlock for all timekeeper values */ +- seqlock_t lock; + }; + + static inline struct timespec tk_xtime(struct timekeeper *tk) +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -26,6 +26,7 @@ + #include "tick-internal.h" + + static struct timekeeper timekeeper; ++static DEFINE_SEQLOCK(timekeeper_lock); + + /* flag for if timekeeping is suspended */ + int __read_mostly timekeeping_suspended; +@@ -194,11 +195,11 @@ int pvclock_gtod_register_notifier(struc + unsigned long flags; + int ret; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); + /* update timekeeping data */ + update_pvclock_gtod(tk); +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + return ret; + } +@@ -212,13 +213,12 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_ + */ + int pvclock_gtod_unregister_notifier(struct notifier_block *nb) + { +- struct timekeeper *tk = &timekeeper; + unsigned long flags; + int ret; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb); +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + return ret; + } +@@ -279,12 +279,12 @@ void getnstimeofday(struct timespec *ts) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + ts->tv_sec = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + ts->tv_nsec = 0; + timespec_add_ns(ts, nsecs); +@@ -300,11 +300,11 @@ ktime_t ktime_get(void) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + /* + * Use ktime_set/ktime_add_ns to create a proper ktime on + * 32-bit architectures without CONFIG_KTIME_SCALAR. +@@ -331,12 +331,12 @@ void ktime_get_ts(struct timespec *ts) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + ts->tv_sec += tomono.tv_sec; + ts->tv_nsec = 0; +@@ -364,7 +364,7 @@ void getnstime_raw_and_real(struct times + WARN_ON_ONCE(timekeeping_suspended); + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + *ts_raw = tk->raw_time; + ts_real->tv_sec = tk->xtime_sec; +@@ -373,7 +373,7 @@ void getnstime_raw_and_real(struct times + nsecs_raw = timekeeping_get_ns_raw(tk); + nsecs_real = timekeeping_get_ns(tk); + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + timespec_add_ns(ts_raw, nsecs_raw); + timespec_add_ns(ts_real, nsecs_real); +@@ -413,7 +413,7 @@ int do_settimeofday(const struct timespe + if (!timespec_valid_strict(tv)) + return -EINVAL; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + timekeeping_forward_now(tk); + +@@ -427,7 +427,7 @@ int do_settimeofday(const struct timespe + + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -452,7 +452,7 @@ int timekeeping_inject_offset(struct tim + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + timekeeping_forward_now(tk); + +@@ -469,7 +469,7 @@ int timekeeping_inject_offset(struct tim + error: /* even if we error out, we forwarded the time, so call update */ + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -491,7 +491,7 @@ static int change_clocksource(void *data + + new = (struct clocksource *) data; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + timekeeping_forward_now(tk); + if (!new->enable || new->enable(new) == 0) { +@@ -502,7 +502,7 @@ static int change_clocksource(void *data + } + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + return 0; + } +@@ -552,11 +552,11 @@ void getrawmonotonic(struct timespec *ts + s64 nsecs; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + nsecs = timekeeping_get_ns_raw(tk); + *ts = tk->raw_time; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + timespec_add_ns(ts, nsecs); + } +@@ -572,11 +572,11 @@ int timekeeping_valid_for_hres(void) + int ret; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + return ret; + } +@@ -591,11 +591,11 @@ u64 timekeeping_max_deferment(void) + u64 ret; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + ret = tk->clock->max_idle_ns; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + return ret; + } +@@ -656,11 +656,9 @@ void __init timekeeping_init(void) + boot.tv_nsec = 0; + } + +- seqlock_init(&tk->lock); +- + ntp_init(); + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + clock = clocksource_default_clock(); + if (clock->enable) + clock->enable(clock); +@@ -679,7 +677,7 @@ void __init timekeeping_init(void) + tmp.tv_nsec = 0; + tk_set_sleep_time(tk, tmp); + +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + } + + /* time in seconds when suspend began */ +@@ -726,7 +724,7 @@ void timekeeping_inject_sleeptime(struct + if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) + return; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + timekeeping_forward_now(tk); + +@@ -734,7 +732,7 @@ void timekeeping_inject_sleeptime(struct + + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -758,7 +756,7 @@ static void timekeeping_resume(void) + clockevents_resume(); + clocksource_resume(); + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { + ts = timespec_sub(ts, timekeeping_suspend_time); +@@ -769,7 +767,7 @@ static void timekeeping_resume(void) + tk->ntp_error = 0; + timekeeping_suspended = 0; + timekeeping_update(tk, false); +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + touch_softlockup_watchdog(); + +@@ -788,7 +786,7 @@ static int timekeeping_suspend(void) + + read_persistent_clock(&timekeeping_suspend_time); + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + timekeeping_forward_now(tk); + timekeeping_suspended = 1; + +@@ -811,7 +809,7 @@ static int timekeeping_suspend(void) + timekeeping_suspend_time = + timespec_add(timekeeping_suspend_time, delta_delta); + } +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); + clocksource_suspend(); +@@ -1149,7 +1147,7 @@ static void update_wall_time(void) + int shift = 0, maxshift; + unsigned long flags; + +- write_seqlock_irqsave(&tk->lock, flags); ++ write_seqlock_irqsave(&timekeeper_lock, flags); + + /* Make sure we're fully resumed: */ + if (unlikely(timekeeping_suspended)) +@@ -1204,7 +1202,7 @@ static void update_wall_time(void) + timekeeping_update(tk, false); + + out: +- write_sequnlock_irqrestore(&tk->lock, flags); ++ write_sequnlock_irqrestore(&timekeeper_lock, flags); + + } + +@@ -1252,13 +1250,13 @@ void get_monotonic_boottime(struct times + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + sleep = tk->total_sleep_time; + +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + ts->tv_sec += tomono.tv_sec + sleep.tv_sec; + ts->tv_nsec = 0; +@@ -1317,10 +1315,10 @@ struct timespec current_kernel_time(void + unsigned long seq; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + now = tk_xtime(tk); +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + return now; + } +@@ -1333,11 +1331,11 @@ struct timespec get_monotonic_coarse(voi + unsigned long seq; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + now = tk_xtime(tk); + mono = tk->wall_to_monotonic; +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, + now.tv_nsec + mono.tv_nsec); +@@ -1368,11 +1366,11 @@ void get_xtime_and_monotonic_and_sleep_o + unsigned long seq; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + *xtim = tk_xtime(tk); + *wtom = tk->wall_to_monotonic; + *sleep = tk->total_sleep_time; +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + } + + #ifdef CONFIG_HIGH_RES_TIMERS +@@ -1392,14 +1390,14 @@ ktime_t ktime_get_update_offsets(ktime_t + u64 secs, nsecs; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + + secs = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); + + *offs_real = tk->offs_real; + *offs_boot = tk->offs_boot; +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + now = ktime_add_ns(ktime_set(secs, 0), nsecs); + now = ktime_sub(now, *offs_real); +@@ -1417,9 +1415,9 @@ ktime_t ktime_get_monotonic_offset(void) + struct timespec wtom; + + do { +- seq = read_seqbegin(&tk->lock); ++ seq = read_seqbegin(&timekeeper_lock); + wtom = tk->wall_to_monotonic; +- } while (read_seqretry(&tk->lock, seq)); ++ } while (read_seqretry(&timekeeper_lock, seq)); + + return timespec_to_ktime(wtom); + } diff --git a/debian/patches/features/all/rt/timekeeping-shorten-seq-count-region.patch b/debian/patches/features/all/rt/timekeeping-shorten-seq-count-region.patch new file mode 100644 index 000000000..8d75ae54a --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-shorten-seq-count-region.patch @@ -0,0 +1,44 @@ +Subject: timekeeping: Shorten seq_count region +From: Thomas Gleixner +Date: Sat, 16 Feb 2013 00:39:49 +0100 + +Shorten the seqcount write hold region to the actual update of the +timekeeper and the related data (e.g vsyscall). + +On a contemporary x86 system this reduces the maximum latencies on +Preempt-RT from 8us to 4us on the non-timekeeping cores. + +Signed-off-by: Thomas Gleixner +--- + kernel/time/timekeeping.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1165,7 +1165,6 @@ static void update_wall_time(void) + unsigned long flags; + + raw_spin_lock_irqsave(&timekeeper_lock, flags); +- write_seqcount_begin(&timekeeper_seq); + + /* Make sure we're fully resumed: */ + if (unlikely(timekeeping_suspended)) +@@ -1217,6 +1216,7 @@ static void update_wall_time(void) + */ + accumulate_nsecs_to_secs(tk); + ++ write_seqcount_begin(&timekeeper_seq); + /* Update clock->cycle_last with the new value */ + clock->cycle_last = tk->cycle_last; + /* +@@ -1231,9 +1231,8 @@ static void update_wall_time(void) + */ + memcpy(real_tk, tk, sizeof(*tk)); + timekeeping_update(real_tk, false, false); +- +-out: + write_seqcount_end(&timekeeper_seq); ++out: + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + } + diff --git a/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch new file mode 100644 index 000000000..8586d5999 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-split-jiffies-lock.patch @@ -0,0 +1,148 @@ +Subject: timekeeping-split-jiffies-lock.patch +From: Thomas Gleixner +Date: Thu, 14 Feb 2013 22:36:59 +0100 + +Signed-off-by: Thomas Gleixner +--- + kernel/time/jiffies.c | 7 ++++--- + kernel/time/tick-common.c | 10 ++++++---- + kernel/time/tick-internal.h | 3 ++- + kernel/time/tick-sched.c | 16 ++++++++++------ + kernel/time/timekeeping.c | 6 ++++-- + 5 files changed, 26 insertions(+), 16 deletions(-) + +--- a/kernel/time/jiffies.c ++++ b/kernel/time/jiffies.c +@@ -67,7 +67,8 @@ static struct clocksource clocksource_ji + .shift = JIFFIES_SHIFT, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -76,9 +77,9 @@ u64 get_jiffies_64(void) + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +--- a/kernel/time/tick-common.c ++++ b/kernel/time/tick-common.c +@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void) + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } + + update_process_times(user_mode(get_irq_regs())); +@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); + +--- a/kernel/time/tick-internal.h ++++ b/kernel/time/tick-internal.h +@@ -4,7 +4,8 @@ + #include + #include + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD + +--- a/kernel/time/tick-sched.c ++++ b/kernel/time/tick-sched.c +@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti + return; + + /* Reevalute with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } + + /* +@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -325,11 +329,11 @@ static ktime_t tick_nohz_stop_sched_tick + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + last_update = last_jiffies_update; + last_jiffies = jiffies; + time_delta = timekeeping_max_deferment(); +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || + arch_needs_cpu(cpu)) { +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -1461,7 +1461,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_of + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + } diff --git a/debian/patches/features/all/rt/timekeeping-split-timekeeper-lock.patch b/debian/patches/features/all/rt/timekeeping-split-timekeeper-lock.patch new file mode 100644 index 000000000..b9076c41b --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-split-timekeeper-lock.patch @@ -0,0 +1,427 @@ +Subject: timekeeping: Split timekeeper_lock into lock and seqcount +From: Thomas Gleixner +Date: Fri, 15 Feb 2013 15:03:17 +0100 + +We want to shorten the seqcount write hold time. So split the seqlock +into a lock and a seqcount. + +Open code the seqwrite_lock in the places which matter and drop the +sequence counter update where it's pointless. + +Signed-off-by: Thomas Gleixner +--- + kernel/time/timekeeping.c | 118 +++++++++++++++++++++++++--------------------- + 1 file changed, 65 insertions(+), 53 deletions(-) + +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -26,7 +26,8 @@ + #include "tick-internal.h" + + static struct timekeeper timekeeper; +-static DEFINE_SEQLOCK(timekeeper_lock); ++static DEFINE_RAW_SPINLOCK(timekeeper_lock); ++static seqcount_t timekeeper_seq; + + /* flag for if timekeeping is suspended */ + int __read_mostly timekeeping_suspended; +@@ -186,8 +187,6 @@ static void update_pvclock_gtod(struct t + + /** + * pvclock_gtod_register_notifier - register a pvclock timedata update listener +- * +- * Must hold write on timekeeper.lock + */ + int pvclock_gtod_register_notifier(struct notifier_block *nb) + { +@@ -195,11 +194,10 @@ int pvclock_gtod_register_notifier(struc + unsigned long flags; + int ret; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); + ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb); +- /* update timekeeping data */ + update_pvclock_gtod(tk); +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + return ret; + } +@@ -208,23 +206,21 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_ + /** + * pvclock_gtod_unregister_notifier - unregister a pvclock + * timedata update listener +- * +- * Must hold write on timekeeper.lock + */ + int pvclock_gtod_unregister_notifier(struct notifier_block *nb) + { + unsigned long flags; + int ret; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); + ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb); +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + return ret; + } + EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); + +-/* must hold write on timekeeper.lock */ ++/* must hold timekeeper_lock */ + static void timekeeping_update(struct timekeeper *tk, bool clearntp) + { + if (clearntp) { +@@ -279,12 +275,12 @@ void getnstimeofday(struct timespec *ts) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + ts->tv_sec = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + ts->tv_nsec = 0; + timespec_add_ns(ts, nsecs); +@@ -300,11 +296,11 @@ ktime_t ktime_get(void) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + /* + * Use ktime_set/ktime_add_ns to create a proper ktime on + * 32-bit architectures without CONFIG_KTIME_SCALAR. +@@ -331,12 +327,12 @@ void ktime_get_ts(struct timespec *ts) + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + ts->tv_sec += tomono.tv_sec; + ts->tv_nsec = 0; +@@ -364,7 +360,7 @@ void getnstime_raw_and_real(struct times + WARN_ON_ONCE(timekeeping_suspended); + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + *ts_raw = tk->raw_time; + ts_real->tv_sec = tk->xtime_sec; +@@ -373,7 +369,7 @@ void getnstime_raw_and_real(struct times + nsecs_raw = timekeeping_get_ns_raw(tk); + nsecs_real = timekeeping_get_ns(tk); + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + timespec_add_ns(ts_raw, nsecs_raw); + timespec_add_ns(ts_real, nsecs_real); +@@ -413,7 +409,8 @@ int do_settimeofday(const struct timespe + if (!timespec_valid_strict(tv)) + return -EINVAL; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + timekeeping_forward_now(tk); + +@@ -427,7 +424,8 @@ int do_settimeofday(const struct timespe + + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -452,7 +450,8 @@ int timekeeping_inject_offset(struct tim + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + timekeeping_forward_now(tk); + +@@ -469,7 +468,8 @@ int timekeeping_inject_offset(struct tim + error: /* even if we error out, we forwarded the time, so call update */ + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -491,7 +491,8 @@ static int change_clocksource(void *data + + new = (struct clocksource *) data; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + timekeeping_forward_now(tk); + if (!new->enable || new->enable(new) == 0) { +@@ -502,7 +503,8 @@ static int change_clocksource(void *data + } + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + return 0; + } +@@ -552,11 +554,11 @@ void getrawmonotonic(struct timespec *ts + s64 nsecs; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + nsecs = timekeeping_get_ns_raw(tk); + *ts = tk->raw_time; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + timespec_add_ns(ts, nsecs); + } +@@ -572,11 +574,11 @@ int timekeeping_valid_for_hres(void) + int ret; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + return ret; + } +@@ -591,11 +593,11 @@ u64 timekeeping_max_deferment(void) + u64 ret; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + ret = tk->clock->max_idle_ns; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + return ret; + } +@@ -658,7 +660,8 @@ void __init timekeeping_init(void) + + ntp_init(); + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + clock = clocksource_default_clock(); + if (clock->enable) + clock->enable(clock); +@@ -677,7 +680,8 @@ void __init timekeeping_init(void) + tmp.tv_nsec = 0; + tk_set_sleep_time(tk, tmp); + +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + } + + /* time in seconds when suspend began */ +@@ -724,7 +728,8 @@ void timekeeping_inject_sleeptime(struct + if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) + return; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + timekeeping_forward_now(tk); + +@@ -732,7 +737,8 @@ void timekeeping_inject_sleeptime(struct + + timekeeping_update(tk, true); + +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + /* signal hrtimers about time change */ + clock_was_set(); +@@ -756,7 +762,8 @@ static void timekeeping_resume(void) + clockevents_resume(); + clocksource_resume(); + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { + ts = timespec_sub(ts, timekeeping_suspend_time); +@@ -767,7 +774,8 @@ static void timekeeping_resume(void) + tk->ntp_error = 0; + timekeeping_suspended = 0; + timekeeping_update(tk, false); +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + touch_softlockup_watchdog(); + +@@ -786,7 +794,8 @@ static int timekeeping_suspend(void) + + read_persistent_clock(&timekeeping_suspend_time); + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + timekeeping_forward_now(tk); + timekeeping_suspended = 1; + +@@ -809,7 +818,8 @@ static int timekeeping_suspend(void) + timekeeping_suspend_time = + timespec_add(timekeeping_suspend_time, delta_delta); + } +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); + clocksource_suspend(); +@@ -1147,7 +1157,8 @@ static void update_wall_time(void) + int shift = 0, maxshift; + unsigned long flags; + +- write_seqlock_irqsave(&timekeeper_lock, flags); ++ raw_spin_lock_irqsave(&timekeeper_lock, flags); ++ write_seqcount_begin(&timekeeper_seq); + + /* Make sure we're fully resumed: */ + if (unlikely(timekeeping_suspended)) +@@ -1202,7 +1213,8 @@ static void update_wall_time(void) + timekeeping_update(tk, false); + + out: +- write_sequnlock_irqrestore(&timekeeper_lock, flags); ++ write_seqcount_end(&timekeeper_seq); ++ raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + + } + +@@ -1250,13 +1262,13 @@ void get_monotonic_boottime(struct times + WARN_ON(timekeeping_suspended); + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + sleep = tk->total_sleep_time; + +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + ts->tv_sec += tomono.tv_sec + sleep.tv_sec; + ts->tv_nsec = 0; +@@ -1315,10 +1327,10 @@ struct timespec current_kernel_time(void + unsigned long seq; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + now = tk_xtime(tk); +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + return now; + } +@@ -1331,11 +1343,11 @@ struct timespec get_monotonic_coarse(voi + unsigned long seq; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + now = tk_xtime(tk); + mono = tk->wall_to_monotonic; +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, + now.tv_nsec + mono.tv_nsec); +@@ -1366,11 +1378,11 @@ void get_xtime_and_monotonic_and_sleep_o + unsigned long seq; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + *xtim = tk_xtime(tk); + *wtom = tk->wall_to_monotonic; + *sleep = tk->total_sleep_time; +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + } + + #ifdef CONFIG_HIGH_RES_TIMERS +@@ -1390,14 +1402,14 @@ ktime_t ktime_get_update_offsets(ktime_t + u64 secs, nsecs; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + + secs = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); + + *offs_real = tk->offs_real; + *offs_boot = tk->offs_boot; +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + now = ktime_add_ns(ktime_set(secs, 0), nsecs); + now = ktime_sub(now, *offs_real); +@@ -1415,9 +1427,9 @@ ktime_t ktime_get_monotonic_offset(void) + struct timespec wtom; + + do { +- seq = read_seqbegin(&timekeeper_lock); ++ seq = read_seqcount_begin(&timekeeper_seq); + wtom = tk->wall_to_monotonic; +- } while (read_seqretry(&timekeeper_lock, seq)); ++ } while (read_seqcount_retry(&timekeeper_seq, seq)); + + return timespec_to_ktime(wtom); + } diff --git a/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch b/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch deleted file mode 100644 index 7fb90dbfd..000000000 --- a/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch +++ /dev/null @@ -1,527 +0,0 @@ -Subject: timekeeping: Split xtime_lock -From: Thomas Gleixner -Date: Thu, 01 Mar 2012 15:14:06 +0100 - -xtime_lock is going to be split apart in mainline, so we can shorten -the seqcount protected regions and avoid updating seqcount in some -code pathes. This is a straight forward split, so we can avoid the -whole mess with raw seqlocks for RT. - -Signed-off-by: Thomas Gleixner ---- - kernel/time/jiffies.c | 4 - - kernel/time/tick-common.c | 10 ++- - kernel/time/tick-internal.h | 3 - - kernel/time/tick-sched.c | 16 +++--- - kernel/time/timekeeping.c | 116 +++++++++++++++++++++++++------------------- - 5 files changed, 88 insertions(+), 61 deletions(-) - -Index: linux-stable/kernel/time/jiffies.c -=================================================================== ---- linux-stable.orig/kernel/time/jiffies.c -+++ linux-stable/kernel/time/jiffies.c -@@ -74,9 +74,9 @@ u64 get_jiffies_64(void) - u64 ret; - - do { -- seq = read_seqbegin(&xtime_lock); -+ seq = read_seqcount_begin(&xtime_seq); - ret = jiffies_64; -- } while (read_seqretry(&xtime_lock, seq)); -+ } while (read_seqcount_retry(&xtime_seq, seq)); - return ret; - } - EXPORT_SYMBOL(get_jiffies_64); -Index: linux-stable/kernel/time/tick-common.c -=================================================================== ---- linux-stable.orig/kernel/time/tick-common.c -+++ linux-stable/kernel/time/tick-common.c -@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void) - static void tick_periodic(int cpu) - { - if (tick_do_timer_cpu == cpu) { -- write_seqlock(&xtime_lock); -+ raw_spin_lock(&xtime_lock); -+ write_seqcount_begin(&xtime_seq); - - /* Keep track of the next tick event */ - tick_next_period = ktime_add(tick_next_period, tick_period); - - do_timer(1); -- write_sequnlock(&xtime_lock); -+ write_seqcount_end(&xtime_seq); -+ raw_spin_unlock(&xtime_lock); - } - - update_process_times(user_mode(get_irq_regs())); -@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev - ktime_t next; - - do { -- seq = read_seqbegin(&xtime_lock); -+ seq = read_seqcount_begin(&xtime_seq); - next = tick_next_period; -- } while (read_seqretry(&xtime_lock, seq)); -+ } while (read_seqcount_retry(&xtime_seq, seq)); - - clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); - -Index: linux-stable/kernel/time/tick-internal.h -=================================================================== ---- linux-stable.orig/kernel/time/tick-internal.h -+++ linux-stable/kernel/time/tick-internal.h -@@ -141,4 +141,5 @@ static inline int tick_device_is_functio - #endif - - extern void do_timer(unsigned long ticks); --extern seqlock_t xtime_lock; -+extern raw_spinlock_t xtime_lock; -+extern seqcount_t xtime_seq; -Index: linux-stable/kernel/time/tick-sched.c -=================================================================== ---- linux-stable.orig/kernel/time/tick-sched.c -+++ linux-stable/kernel/time/tick-sched.c -@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti - return; - - /* Reevalute with xtime_lock held */ -- write_seqlock(&xtime_lock); -+ raw_spin_lock(&xtime_lock); -+ write_seqcount_begin(&xtime_seq); - - delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 >= tick_period.tv64) { -@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti - /* Keep the tick_next_period variable up to date */ - tick_next_period = ktime_add(last_jiffies_update, tick_period); - } -- write_sequnlock(&xtime_lock); -+ write_seqcount_end(&xtime_seq); -+ raw_spin_unlock(&xtime_lock); - } - - /* -@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo - { - ktime_t period; - -- write_seqlock(&xtime_lock); -+ raw_spin_lock(&xtime_lock); -+ write_seqcount_begin(&xtime_seq); - /* Did we start the jiffies update yet ? */ - if (last_jiffies_update.tv64 == 0) - last_jiffies_update = tick_next_period; - period = last_jiffies_update; -- write_sequnlock(&xtime_lock); -+ write_seqcount_end(&xtime_seq); -+ raw_spin_unlock(&xtime_lock); - return period; - } - -@@ -282,11 +286,11 @@ static ktime_t tick_nohz_stop_sched_tick - - /* Read jiffies and the time when jiffies were updated last */ - do { -- seq = read_seqbegin(&xtime_lock); -+ seq = read_seqcount_begin(&xtime_seq); - last_update = last_jiffies_update; - last_jiffies = jiffies; - time_delta = timekeeping_max_deferment(); -- } while (read_seqretry(&xtime_lock, seq)); -+ } while (read_seqcount_retry(&xtime_seq, seq)); - - if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || - arch_needs_cpu(cpu)) { -Index: linux-stable/kernel/time/timekeeping.c -=================================================================== ---- linux-stable.orig/kernel/time/timekeeping.c -+++ linux-stable/kernel/time/timekeeping.c -@@ -74,7 +74,8 @@ struct timekeeper { - /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ - struct timespec raw_time; - /* Seqlock for all timekeeper values */ -- seqlock_t lock; -+ seqcount_t seq; -+ raw_spinlock_t lock; - }; - - static struct timekeeper timekeeper; -@@ -83,7 +84,8 @@ static struct timekeeper timekeeper; - * This read-write spinlock protects us from races in SMP while - * playing with xtime. - */ --__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); -+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock); -+seqcount_t xtime_seq; - - /* flag for if timekeeping is suspended */ - int __read_mostly timekeeping_suspended; -@@ -300,12 +302,12 @@ void getnstimeofday(struct timespec *ts) - WARN_ON(timekeeping_suspended); - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - ts->tv_sec = tk->xtime_sec; - nsecs = timekeeping_get_ns(tk); - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - ts->tv_nsec = 0; - timespec_add_ns(ts, nsecs); -@@ -321,11 +323,11 @@ ktime_t ktime_get(void) - WARN_ON(timekeeping_suspended); - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - /* - * Use ktime_set/ktime_add_ns to create a proper ktime on - * 32-bit architectures without CONFIG_KTIME_SCALAR. -@@ -352,12 +354,12 @@ void ktime_get_ts(struct timespec *ts) - WARN_ON(timekeeping_suspended); - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - ts->tv_sec = tk->xtime_sec; - nsec = timekeeping_get_ns(tk); - tomono = tk->wall_to_monotonic; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - ts->tv_sec += tomono.tv_sec; - ts->tv_nsec = 0; -@@ -385,7 +387,7 @@ void getnstime_raw_and_real(struct times - WARN_ON_ONCE(timekeeping_suspended); - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - *ts_raw = tk->raw_time; - ts_real->tv_sec = tk->xtime_sec; -@@ -394,7 +396,7 @@ void getnstime_raw_and_real(struct times - nsecs_raw = timekeeping_get_ns_raw(tk); - nsecs_real = timekeeping_get_ns(tk); - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - timespec_add_ns(ts_raw, nsecs_raw); - timespec_add_ns(ts_real, nsecs_real); -@@ -434,7 +436,8 @@ int do_settimeofday(const struct timespe - if (!timespec_valid_strict(tv)) - return -EINVAL; - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - timekeeping_forward_now(tk); - -@@ -448,7 +451,8 @@ int do_settimeofday(const struct timespe - - timekeeping_update(tk, true); - -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - /* signal hrtimers about time change */ - clock_was_set(); -@@ -473,7 +477,8 @@ int timekeeping_inject_offset(struct tim - if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - timekeeping_forward_now(tk); - -@@ -490,7 +495,8 @@ int timekeeping_inject_offset(struct tim - error: /* even if we error out, we forwarded the time, so call update */ - timekeeping_update(tk, true); - -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - /* signal hrtimers about time change */ - clock_was_set(); -@@ -512,7 +518,8 @@ static int change_clocksource(void *data - - new = (struct clocksource *) data; - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - timekeeping_forward_now(tk); - if (!new->enable || new->enable(new) == 0) { -@@ -523,7 +530,8 @@ static int change_clocksource(void *data - } - timekeeping_update(tk, true); - -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - return 0; - } -@@ -573,11 +581,11 @@ void getrawmonotonic(struct timespec *ts - s64 nsecs; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - nsecs = timekeeping_get_ns_raw(tk); - *ts = tk->raw_time; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - timespec_add_ns(ts, nsecs); - } -@@ -593,11 +601,11 @@ int timekeeping_valid_for_hres(void) - int ret; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - return ret; - } -@@ -612,11 +620,11 @@ u64 timekeeping_max_deferment(void) - u64 ret; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - ret = tk->clock->max_idle_ns; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - return ret; - } -@@ -677,11 +685,13 @@ void __init timekeeping_init(void) - boot.tv_nsec = 0; - } - -- seqlock_init(&tk->lock); -+ raw_spin_lock_init(&tk->lock); -+ seqcount_init(&tk->seq); - - ntp_init(); - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - clock = clocksource_default_clock(); - if (clock->enable) - clock->enable(clock); -@@ -700,7 +710,8 @@ void __init timekeeping_init(void) - tmp.tv_nsec = 0; - tk_set_sleep_time(tk, tmp); - -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - } - - /* time in seconds when suspend began */ -@@ -747,7 +758,8 @@ void timekeeping_inject_sleeptime(struct - if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) - return; - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - timekeeping_forward_now(tk); - -@@ -755,7 +767,8 @@ void timekeeping_inject_sleeptime(struct - - timekeeping_update(tk, true); - -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - /* signal hrtimers about time change */ - clock_was_set(); -@@ -778,7 +791,8 @@ static void timekeeping_resume(void) - - clocksource_resume(); - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { - ts = timespec_sub(ts, timekeeping_suspend_time); -@@ -789,7 +803,8 @@ static void timekeeping_resume(void) - tk->ntp_error = 0; - timekeeping_suspended = 0; - timekeeping_update(tk, false); -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - touch_softlockup_watchdog(); - -@@ -808,7 +823,8 @@ static int timekeeping_suspend(void) - - read_persistent_clock(&timekeeping_suspend_time); - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - timekeeping_forward_now(tk); - timekeeping_suspended = 1; - -@@ -831,7 +847,8 @@ static int timekeeping_suspend(void) - timekeeping_suspend_time = - timespec_add(timekeeping_suspend_time, delta_delta); - } -- write_sequnlock_irqrestore(&tk->lock, flags); -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - - clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); - clocksource_suspend(); -@@ -1141,7 +1158,8 @@ static void update_wall_time(void) - unsigned long flags; - s64 remainder; - -- write_seqlock_irqsave(&tk->lock, flags); -+ raw_spin_lock_irqsave(&tk->lock, flags); -+ write_seqcount_begin(&tk->seq); - - /* Make sure we're fully resumed: */ - if (unlikely(timekeeping_suspended)) -@@ -1205,8 +1223,8 @@ static void update_wall_time(void) - timekeeping_update(tk, false); - - out: -- write_sequnlock_irqrestore(&tk->lock, flags); -- -+ write_seqcount_end(&tk->seq); -+ raw_spin_unlock_irqrestore(&tk->lock, flags); - } - - /** -@@ -1253,13 +1271,13 @@ void get_monotonic_boottime(struct times - WARN_ON(timekeeping_suspended); - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - ts->tv_sec = tk->xtime_sec; - nsec = timekeeping_get_ns(tk); - tomono = tk->wall_to_monotonic; - sleep = tk->total_sleep_time; - -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - ts->tv_sec += tomono.tv_sec + sleep.tv_sec; - ts->tv_nsec = 0; -@@ -1318,10 +1336,10 @@ struct timespec current_kernel_time(void - unsigned long seq; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - now = tk_xtime(tk); -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - return now; - } -@@ -1334,11 +1352,11 @@ struct timespec get_monotonic_coarse(voi - unsigned long seq; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - now = tk_xtime(tk); - mono = tk->wall_to_monotonic; -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, - now.tv_nsec + mono.tv_nsec); -@@ -1371,11 +1389,11 @@ void get_xtime_and_monotonic_and_sleep_o - unsigned long seq; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - *xtim = tk_xtime(tk); - *wtom = tk->wall_to_monotonic; - *sleep = tk->total_sleep_time; -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - } - - #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1395,14 +1413,14 @@ ktime_t ktime_get_update_offsets(ktime_t - u64 secs, nsecs; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - - secs = tk->xtime_sec; - nsecs = timekeeping_get_ns(tk); - - *offs_real = tk->offs_real; - *offs_boot = tk->offs_boot; -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - now = ktime_add_ns(ktime_set(secs, 0), nsecs); - now = ktime_sub(now, *offs_real); -@@ -1420,9 +1438,9 @@ ktime_t ktime_get_monotonic_offset(void) - struct timespec wtom; - - do { -- seq = read_seqbegin(&tk->lock); -+ seq = read_seqcount_begin(&tk->seq); - wtom = tk->wall_to_monotonic; -- } while (read_seqretry(&tk->lock, seq)); -+ } while (read_seqcount_retry(&tk->seq, seq)); - - return timespec_to_ktime(wtom); - } -@@ -1436,7 +1454,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_of - */ - void xtime_update(unsigned long ticks) - { -- write_seqlock(&xtime_lock); -+ raw_spin_lock(&xtime_lock); -+ write_seqcount_begin(&xtime_seq); - do_timer(ticks); -- write_sequnlock(&xtime_lock); -+ write_seqcount_end(&xtime_seq); -+ raw_spin_unlock(&xtime_lock); - } diff --git a/debian/patches/features/all/rt/timekeeping-store-cycle-last-in-timekeeper.patch b/debian/patches/features/all/rt/timekeeping-store-cycle-last-in-timekeeper.patch new file mode 100644 index 000000000..c69aadb50 --- /dev/null +++ b/debian/patches/features/all/rt/timekeeping-store-cycle-last-in-timekeeper.patch @@ -0,0 +1,47 @@ +Subject: timekeeping: Store cycle_last value in timekeeper struct as well +From: Thomas Gleixner +Date: Fri, 15 Feb 2013 17:15:49 +0100 + +For implementing a shadow timekeeper and a split calculation/update +region we need to store the cycle_last value in the timekeeper and +update the value in the clocksource struct only in the update region. + +Add the extra storage to the timekeeper. + +Signed-off-by: Thomas Gleixner +--- + include/linux/timekeeper_internal.h | 2 ++ + kernel/time/timekeeping.c | 4 ++-- + 2 files changed, 4 insertions(+), 2 deletions(-) + +--- a/include/linux/timekeeper_internal.h ++++ b/include/linux/timekeeper_internal.h +@@ -20,6 +20,8 @@ struct timekeeper { + u32 shift; + /* Number of clock cycles in one NTP interval. */ + cycle_t cycle_interval; ++ /* Last cycle value (also stored in clock->cycle_last) */ ++ cycle_t cycle_last; + /* Number of clock shifted nano seconds in one NTP interval. */ + u64 xtime_interval; + /* shifted nano seconds left over when rounding cycle_interval */ +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -96,7 +96,7 @@ static void tk_setup_internals(struct ti + + old_clock = tk->clock; + tk->clock = clock; +- clock->cycle_last = clock->read(clock); ++ tk->cycle_last = clock->cycle_last = clock->read(clock); + + /* Do the ns -> cycle conversion first, using original mult */ + tmp = NTP_INTERVAL_LENGTH; +@@ -247,7 +247,7 @@ static void timekeeping_forward_now(stru + clock = tk->clock; + cycle_now = clock->read(clock); + cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; +- clock->cycle_last = cycle_now; ++ tk->cycle_last = clock->cycle_last = cycle_now; + + tk->xtime_nsec += cycle_delta * tk->mult; + diff --git a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch index 200eba3fb..9eb2d06c2 100644 --- a/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch +++ b/debian/patches/features/all/rt/timer-delay-waking-softirqs-from-the-jiffy-tick.patch @@ -56,11 +56,9 @@ Signed-off-by: Thomas Gleixner kernel/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1400,13 +1400,13 @@ void update_process_times(int user_tick) +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1387,13 +1387,13 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); diff --git a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch index 8f6afe7f3..699bfff81 100644 --- a/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch +++ b/debian/patches/features/all/rt/timer-fd-avoid-live-lock.patch @@ -14,11 +14,9 @@ Cc: stable-rt@vger.kernel.org fs/timerfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/fs/timerfd.c -=================================================================== ---- linux-stable.orig/fs/timerfd.c -+++ linux-stable/fs/timerfd.c -@@ -313,7 +313,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, uf +--- a/fs/timerfd.c ++++ b/fs/timerfd.c +@@ -311,7 +311,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, uf if (hrtimer_try_to_cancel(&ctx->tmr) >= 0) break; spin_unlock_irq(&ctx->wqh.lock); diff --git a/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch index 9fcb4a2a8..729696cb2 100644 --- a/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch +++ b/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch @@ -6,13 +6,11 @@ Signed-off-by: Thomas Gleixner --- include/linux/spinlock_rt.h | 12 +++++++++++- kernel/rtmutex.c | 7 +------ - kernel/timer.c | 7 ++++--- - 3 files changed, 16 insertions(+), 10 deletions(-) + kernel/timer.c | 9 +++++++-- + 3 files changed, 19 insertions(+), 9 deletions(-) -Index: linux-stable/include/linux/spinlock_rt.h -=================================================================== ---- linux-stable.orig/include/linux/spinlock_rt.h -+++ linux-stable/include/linux/spinlock_rt.h +--- a/include/linux/spinlock_rt.h ++++ b/include/linux/spinlock_rt.h @@ -53,7 +53,17 @@ extern void __lockfunc __rt_spin_unlock( #define spin_lock_irq(lock) spin_lock(lock) @@ -32,10 +30,8 @@ Index: linux-stable/include/linux/spinlock_rt.h #ifdef CONFIG_LOCKDEP # define spin_lock_nested(lock, subclass) \ -Index: linux-stable/kernel/rtmutex.c -=================================================================== ---- linux-stable.orig/kernel/rtmutex.c -+++ linux-stable/kernel/rtmutex.c +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c @@ -861,15 +861,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait); int __lockfunc rt_spin_trylock(spinlock_t *lock) @@ -53,11 +49,9 @@ Index: linux-stable/kernel/rtmutex.c return ret; } EXPORT_SYMBOL(rt_spin_trylock); -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1393,9 +1393,10 @@ unsigned long get_next_timer_interrupt(u +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1380,9 +1380,10 @@ unsigned long get_next_timer_interrupt(u /* * On PREEMPT_RT we cannot sleep here. If the trylock does not * succeed then we return the worst-case 'expires in 1 tick' @@ -70,12 +64,15 @@ Index: linux-stable/kernel/timer.c return now + 1; #else spin_lock(&base->lock); -@@ -1405,7 +1406,7 @@ unsigned long get_next_timer_interrupt(u +@@ -1392,7 +1393,11 @@ unsigned long get_next_timer_interrupt(u base->next_timer = __next_timer_interrupt(base); expires = base->next_timer; } -- spin_unlock(&base->lock); ++#ifdef CONFIG_PREEMPT_RT_FULL + rt_spin_unlock(&base->lock); ++#else + spin_unlock(&base->lock); ++#endif if (time_before_eq(expires, now)) return now; diff --git a/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch b/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch deleted file mode 100644 index d65632681..000000000 --- a/debian/patches/features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 1f9b191467b70ed79480294395ad99145fea1b5a Mon Sep 17 00:00:00 2001 -From: Paul Gortmaker -Date: Tue, 9 Oct 2012 20:20:10 -0400 -Subject: [PATCH] timer.c: fix build fail for ! RT_FULL -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The patch "timer-handle-idle-trylock-in-get-next-timer-irq.patch" -introduces a use of rt_spin_unlock outside of the RT_FULL, but -since we have: - - #ifdef CONFIG_PREEMPT_RT_FULL - # include - #else /* PREEMPT_RT_FULL */ - -we will not get the definition for !RT_FULL and instead see: - -kernel/timer.c: In function ‘get_next_timer_interrupt’: -kernel/timer.c:1407: error: implicit declaration of function ‘rt_spin_unlock’ -make[2]: *** [kernel/timer.o] Error 1 - -Extend the ifdef usage to cover the unlock case too. - -Signed-off-by: Paul Gortmaker - ---- - kernel/timer.c | 4 ++++ - 1 file changed, 4 insertions(+) - -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1406,7 +1406,11 @@ unsigned long get_next_timer_interrupt(u - base->next_timer = __next_timer_interrupt(base); - expires = base->next_timer; - } -+#ifdef CONFIG_PREEMPT_RT_FULL - rt_spin_unlock(&base->lock); -+#else -+ spin_unlock(&base->lock); -+#endif - - if (time_before_eq(expires, now)) - return now; diff --git a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch index d8481c4fa..aa2ff1352 100644 --- a/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch +++ b/debian/patches/features/all/rt/timers-avoid-the-base-null-otptimization-on-rt.patch @@ -10,11 +10,9 @@ Signed-off-by: Thomas Gleixner kernel/timer.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -739,6 +739,36 @@ static struct tvec_base *lock_timer_base +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -717,6 +717,36 @@ static struct tvec_base *lock_timer_base } } @@ -51,7 +49,7 @@ Index: linux-stable/kernel/timer.c static inline int __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only, int pinned) -@@ -777,14 +807,8 @@ __mod_timer(struct timer_list *timer, un +@@ -755,14 +785,8 @@ __mod_timer(struct timer_list *timer, un * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ diff --git a/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch b/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch index 4ec6b3eb4..79c194a6d 100644 --- a/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch +++ b/debian/patches/features/all/rt/timers-mov-printk_tick-to-soft-interrupt.patch @@ -9,11 +9,9 @@ Signed-off-by: Ingo Molnar kernel/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1402,7 +1402,6 @@ void update_process_times(int user_tick) +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1389,7 +1389,6 @@ void update_process_times(int user_tick) account_process_tick(p, user_tick); run_local_timers(); rcu_check_callbacks(cpu, user_tick); @@ -21,7 +19,7 @@ Index: linux-stable/kernel/timer.c #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_run(); -@@ -1418,6 +1417,7 @@ static void run_timer_softirq(struct sof +@@ -1405,6 +1404,7 @@ static void run_timer_softirq(struct sof { struct tvec_base *base = __this_cpu_read(tvec_bases); diff --git a/debian/patches/features/all/rt/timers-preempt-rt-support.patch b/debian/patches/features/all/rt/timers-preempt-rt-support.patch index ccab1c7a7..436f8b39c 100644 --- a/debian/patches/features/all/rt/timers-preempt-rt-support.patch +++ b/debian/patches/features/all/rt/timers-preempt-rt-support.patch @@ -8,11 +8,9 @@ Signed-off-by: Thomas Gleixner kernel/timer.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c -@@ -1365,7 +1365,17 @@ unsigned long get_next_timer_interrupt(u +--- a/kernel/timer.c ++++ b/kernel/timer.c +@@ -1352,7 +1352,17 @@ unsigned long get_next_timer_interrupt(u if (cpu_is_offline(smp_processor_id())) return expires; @@ -30,7 +28,7 @@ Index: linux-stable/kernel/timer.c if (base->active_timers) { if (time_before_eq(base->next_timer, base->timer_jiffies)) base->next_timer = __next_timer_interrupt(base); -@@ -1375,7 +1385,6 @@ unsigned long get_next_timer_interrupt(u +@@ -1362,7 +1372,6 @@ unsigned long get_next_timer_interrupt(u if (time_before_eq(expires, now)) return now; @@ -38,7 +36,7 @@ Index: linux-stable/kernel/timer.c return cmp_next_hrtimer_event(now, expires); } #endif -@@ -1765,7 +1774,7 @@ static void __cpuinit migrate_timers(int +@@ -1752,7 +1761,7 @@ static void __cpuinit migrate_timers(int BUG_ON(cpu_online(cpu)); old_base = per_cpu(tvec_bases, cpu); @@ -47,7 +45,7 @@ Index: linux-stable/kernel/timer.c /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. -@@ -1786,7 +1795,7 @@ static void __cpuinit migrate_timers(int +@@ -1773,7 +1782,7 @@ static void __cpuinit migrate_timers(int spin_unlock(&old_base->lock); spin_unlock_irq(&new_base->lock); diff --git a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch index 6a4a6c07b..859159af3 100644 --- a/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch +++ b/debian/patches/features/all/rt/timers-prepare-for-full-preemption.patch @@ -11,14 +11,12 @@ Signed-off-by: Thomas Gleixner --- include/linux/timer.h | 2 +- - kernel/timer.c | 35 ++++++++++++++++++++++++++++++++--- - 2 files changed, 33 insertions(+), 4 deletions(-) + kernel/timer.c | 36 +++++++++++++++++++++++++++++++++--- + 2 files changed, 34 insertions(+), 4 deletions(-) -Index: linux-stable/include/linux/timer.h -=================================================================== ---- linux-stable.orig/include/linux/timer.h -+++ linux-stable/include/linux/timer.h -@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list extern int try_to_del_timer_sync(struct timer_list *timer); @@ -27,10 +25,8 @@ Index: linux-stable/include/linux/timer.h extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) -Index: linux-stable/kernel/timer.c -=================================================================== ---- linux-stable.orig/kernel/timer.c -+++ linux-stable/kernel/timer.c +--- a/kernel/timer.c ++++ b/kernel/timer.c @@ -76,6 +76,7 @@ struct tvec_root { struct tvec_base { spinlock_t lock; @@ -39,7 +35,7 @@ Index: linux-stable/kernel/timer.c unsigned long timer_jiffies; unsigned long next_timer; unsigned long active_timers; -@@ -757,12 +758,15 @@ __mod_timer(struct timer_list *timer, un +@@ -735,12 +736,15 @@ __mod_timer(struct timer_list *timer, un debug_activate(timer, expires); @@ -55,7 +51,7 @@ Index: linux-stable/kernel/timer.c new_base = per_cpu(tvec_bases, cpu); if (base != new_base) { -@@ -963,6 +967,29 @@ void add_timer_on(struct timer_list *tim +@@ -941,6 +945,29 @@ void add_timer_on(struct timer_list *tim } EXPORT_SYMBOL_GPL(add_timer_on); @@ -85,7 +81,7 @@ Index: linux-stable/kernel/timer.c /** * del_timer - deactive a timer. * @timer: the timer to be deactivated -@@ -1020,7 +1047,7 @@ int try_to_del_timer_sync(struct timer_l +@@ -998,7 +1025,7 @@ int try_to_del_timer_sync(struct timer_l } EXPORT_SYMBOL(try_to_del_timer_sync); @@ -94,7 +90,7 @@ Index: linux-stable/kernel/timer.c /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated -@@ -1080,7 +1107,7 @@ int del_timer_sync(struct timer_list *ti +@@ -1058,7 +1085,7 @@ int del_timer_sync(struct timer_list *ti int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; @@ -103,12 +99,18 @@ Index: linux-stable/kernel/timer.c } } EXPORT_SYMBOL(del_timer_sync); -@@ -1194,10 +1221,11 @@ static inline void __run_timers(struct t - - spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn, data); -+ base->running_timer = NULL; - spin_lock_irq(&base->lock); +@@ -1175,15 +1202,17 @@ static inline void __run_timers(struct t + if (irqsafe) { + spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock(&base->lock); + } else { + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock_irq(&base->lock); + } } } - base->running_timer = NULL; @@ -116,7 +118,7 @@ Index: linux-stable/kernel/timer.c spin_unlock_irq(&base->lock); } -@@ -1698,6 +1726,7 @@ static int __cpuinit init_timers_cpu(int +@@ -1684,6 +1713,7 @@ static int __cpuinit init_timers_cpu(int } spin_lock_init(&base->lock); diff --git a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch index 3250535a1..df79a43f3 100644 --- a/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch +++ b/debian/patches/features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -25,11 +25,9 @@ Signed-off-by: Thomas Gleixner kernel/sched/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) -Index: linux-stable/kernel/sched/core.c -=================================================================== ---- linux-stable.orig/kernel/sched/core.c -+++ linux-stable/kernel/sched/core.c -@@ -3544,7 +3544,16 @@ asmlinkage void __sched notrace preempt_ +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -3071,7 +3071,16 @@ asmlinkage void __sched notrace preempt_ do { add_preempt_count_notrace(PREEMPT_ACTIVE); diff --git a/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch new file mode 100644 index 000000000..e89eb3940 --- /dev/null +++ b/debian/patches/features/all/rt/treercu-use-simple-waitqueue.patch @@ -0,0 +1,73 @@ +--- + kernel/rcutree.c | 13 +++++++------ + kernel/rcutree.h | 2 +- + 2 files changed, 8 insertions(+), 7 deletions(-) + +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -1319,7 +1319,7 @@ static int __noreturn rcu_gp_kthread(voi + + /* Handle grace-period start. */ + for (;;) { +- wait_event_interruptible(rsp->gp_wq, ++ swait_event_interruptible(rsp->gp_wq, + rsp->gp_flags & + RCU_GP_FLAG_INIT); + if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && +@@ -1338,7 +1338,7 @@ static int __noreturn rcu_gp_kthread(voi + } + for (;;) { + rsp->jiffies_force_qs = jiffies + j; +- ret = wait_event_interruptible_timeout(rsp->gp_wq, ++ ret = swait_event_interruptible_timeout(rsp->gp_wq, + (rsp->gp_flags & RCU_GP_FLAG_FQS) || + (!ACCESS_ONCE(rnp->qsmask) && + !rcu_preempt_blocked_readers_cgp(rnp)), +@@ -1423,7 +1423,7 @@ rcu_start_gp(struct rcu_state *rsp, unsi + local_irq_restore(flags); + + /* Wake up rcu_gp_kthread() to start the grace period. */ +- wake_up(&rsp->gp_wq); ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -1438,7 +1438,7 @@ static void rcu_report_qs_rsp(struct rcu + { + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ + } + + /* +@@ -2003,7 +2003,8 @@ static void force_quiescent_state(struct + } + rsp->gp_flags |= RCU_GP_FLAG_FQS; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ /* Memory barrier implied by wake_up() path. */ ++ swait_wake(&rsp->gp_wq); + } + + /* +@@ -2999,7 +3000,7 @@ static void __init rcu_init_one(struct r + } + + rsp->rda = rda; +- init_waitqueue_head(&rsp->gp_wq); ++ init_swait_head(&rsp->gp_wq); + rnp = rsp->level[rcu_num_lvls - 1]; + for_each_possible_cpu(i) { + while (i > rnp->grphi) +--- a/kernel/rcutree.h ++++ b/kernel/rcutree.h +@@ -397,7 +397,7 @@ struct rcu_state { + unsigned long gpnum; /* Current gp number. */ + unsigned long completed; /* # of last completed gp. */ + struct task_struct *gp_kthread; /* Task for grace periods. */ +- wait_queue_head_t gp_wq; /* Where GP task waits. */ ++ struct swait_head gp_wq; /* Where GP task waits. */ + int gp_flags; /* Commands for GP task. */ + + /* End of fields guarded by root rcu_node's lock. */ diff --git a/debian/patches/features/all/rt/tty-use-local-irq-nort.patch b/debian/patches/features/all/rt/tty-use-local-irq-nort.patch deleted file mode 100644 index cecab62c5..000000000 --- a/debian/patches/features/all/rt/tty-use-local-irq-nort.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Thomas Gleixner -Date: Mon, 17 Aug 2009 19:49:19 +0200 -Subject: tty: Do not disable interrupts in put_ldisc on -rt - -Fixes the following on PREEMPT_RT: - -BUG: sleeping function called from invalid context at kernel/rtmutex.c:684 -in_atomic(): 0, irqs_disabled(): 1, pid: 9116, name: sshd -Pid: 9116, comm: sshd Not tainted 2.6.31-rc6-rt2 #6 -Call Trace: -[] __might_sleep+0xec/0xee -[] rt_spin_lock+0x34/0x75 -[ffffffff81064a83>] atomic_dec_and_spin_lock+0x36/0x54 -[] put_ldisc+0x57/0xa6 -[] tty_ldisc_hangup+0xe7/0x19f -[] do_tty_hangup+0xff/0x319 -[] tty_vhangup+0x15/0x17 -[] pty_close+0x127/0x12b -[] tty_release_dev+0x1ad/0x4c0 -.... - -Signed-off-by: Thomas Gleixner - ---- - drivers/tty/tty_ldisc.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -Index: linux-stable/drivers/tty/tty_ldisc.c -=================================================================== ---- linux-stable.orig/drivers/tty/tty_ldisc.c -+++ linux-stable/drivers/tty/tty_ldisc.c -@@ -52,7 +52,7 @@ static void put_ldisc(struct tty_ldisc * - * We really want an "atomic_dec_and_lock_irqsave()", - * but we don't have it, so this does it by hand. - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { - struct tty_ldisc_ops *ldo = ld->ops; - -@@ -63,7 +63,7 @@ static void put_ldisc(struct tty_ldisc * - kfree(ld); - return; - } -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - wake_up(&ld->wq_idle); - } - diff --git a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch index 442c38d55..01e1f4655 100644 --- a/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ b/debian/patches/features/all/rt/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -35,11 +35,9 @@ Signed-off-by: Thomas Gleixner net/core/dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) -Index: linux-stable/net/core/dev.c -=================================================================== ---- linux-stable.orig/net/core/dev.c -+++ linux-stable/net/core/dev.c -@@ -2967,7 +2967,7 @@ int netif_rx(struct sk_buff *skb) +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3092,7 +3092,7 @@ int netif_rx(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -48,7 +46,7 @@ Index: linux-stable/net/core/dev.c rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -2977,13 +2977,13 @@ int netif_rx(struct sk_buff *skb) +@@ -3102,13 +3102,13 @@ int netif_rx(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); diff --git a/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch index 16dc2c244..781d13406 100644 --- a/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch +++ b/debian/patches/features/all/rt/usb-fix-mouse-problem-copying-large-data.patch @@ -15,11 +15,9 @@ Signed-off-by: Wu Zhangjin drivers/usb/host/ohci-hcd.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) -Index: linux-stable/drivers/usb/host/ohci-hcd.c -=================================================================== ---- linux-stable.orig/drivers/usb/host/ohci-hcd.c -+++ linux-stable/drivers/usb/host/ohci-hcd.c -@@ -829,9 +829,13 @@ static irqreturn_t ohci_irq (struct usb_ +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -857,9 +857,13 @@ static irqreturn_t ohci_irq (struct usb_ } if (ints & OHCI_INTR_WDH) { diff --git a/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch b/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch index b3b232359..fc190ca61 100644 --- a/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch +++ b/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch @@ -12,11 +12,9 @@ Signed-off-by: Thomas Gleixner drivers/usb/core/hcd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/drivers/usb/core/hcd.c -=================================================================== ---- linux-stable.orig/drivers/usb/core/hcd.c -+++ linux-stable/drivers/usb/core/hcd.c -@@ -2158,7 +2158,7 @@ irqreturn_t usb_hcd_irq (int irq, void * +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2217,7 +2217,7 @@ irqreturn_t usb_hcd_irq (int irq, void * * when the first handler doesn't use it. So let's just * assume it's never used. */ @@ -25,7 +23,7 @@ Index: linux-stable/drivers/usb/core/hcd.c if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) rc = IRQ_NONE; -@@ -2167,7 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void * +@@ -2226,7 +2226,7 @@ irqreturn_t usb_hcd_irq (int irq, void * else rc = IRQ_HANDLED; diff --git a/debian/patches/features/all/rt/user-use-local-irq-nort.patch b/debian/patches/features/all/rt/user-use-local-irq-nort.patch index 4ab6f289f..3674e065b 100644 --- a/debian/patches/features/all/rt/user-use-local-irq-nort.patch +++ b/debian/patches/features/all/rt/user-use-local-irq-nort.patch @@ -11,11 +11,9 @@ Signed-off-by: Thomas Gleixner kernel/user.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -Index: linux-stable/kernel/user.c -=================================================================== ---- linux-stable.orig/kernel/user.c -+++ linux-stable/kernel/user.c -@@ -147,11 +147,11 @@ void free_uid(struct user_struct *up) +--- a/kernel/user.c ++++ b/kernel/user.c +@@ -157,11 +157,11 @@ void free_uid(struct user_struct *up) if (!up) return; diff --git a/debian/patches/features/all/rt/wait-simple-implementation.patch b/debian/patches/features/all/rt/wait-simple-implementation.patch new file mode 100644 index 000000000..e47c729f7 --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-implementation.patch @@ -0,0 +1,337 @@ +From: Thomas Gleixner +Date: Mon Dec 12 12:29:04 2011 +0100 +Subject: wait-simple: Simple waitqueue implementation + +wait_queue is a swiss army knife and in most of the cases the +complexity is not needed. For RT waitqueues are a constant source of +trouble as we can't convert the head lock to a raw spinlock due to +fancy and long lasting callbacks. + +Provide a slim version, which allows RT to replace wait queues. This +should go mainline as well, as it lowers memory consumption and +runtime overhead. + +Signed-off-by: Thomas Gleixner + +--- + include/linux/wait-simple.h | 231 ++++++++++++++++++++++++++++++++++++++++++++ + kernel/Makefile | 2 + kernel/wait-simple.c | 68 ++++++++++++ + 3 files changed, 300 insertions(+), 1 deletion(-) + +--- /dev/null ++++ b/include/linux/wait-simple.h +@@ -0,0 +1,231 @@ ++#ifndef _LINUX_WAIT_SIMPLE_H ++#define _LINUX_WAIT_SIMPLE_H ++ ++#include ++#include ++ ++#include ++ ++struct swaiter { ++ struct task_struct *task; ++ struct list_head node; ++}; ++ ++#define DEFINE_SWAITER(name) \ ++ struct swaiter name = { \ ++ .task = current, \ ++ .node = LIST_HEAD_INIT((name).node), \ ++ } ++ ++struct swait_head { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++ ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = { \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ ++ .list = LIST_HEAD_INIT((name).list), \ ++ } ++ ++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); ++ ++#define init_swait_head(swh) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_swait_head((swh), &__key); \ ++ } while (0) ++ ++/* ++ * Waiter functions ++ */ ++static inline bool swaiter_enqueued(struct swaiter *w) ++{ ++ return w->task != NULL; ++} ++ ++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish(struct swait_head *head, struct swaiter *w); ++ ++/* ++ * Adds w to head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++} ++ ++/* ++ * Removes w from head->list. Must be called with head->lock locked. ++ */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* ++ * Check whether a head has waiters enqueued ++ */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ return !list_empty(&h->list); ++} ++ ++/* ++ * Wakeup functions ++ */ ++extern int __swait_wake(struct swait_head *head, unsigned int state); ++ ++static inline int swait_wake(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_NORMAL) : 0; ++} ++ ++static inline int swait_wake_interruptible(struct swait_head *head) ++{ ++ return swait_head_has_waiters(head) ? ++ __swait_wake(head, TASK_INTERRUPTIBLE) : 0; ++} ++ ++/* ++ * Event API ++ */ ++ ++#define __swait_event(wq, condition) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event(wq, condition) \ ++do { \ ++ if (condition) \ ++ break; \ ++ __swait_event(wq, condition); \ ++} while (0) ++ ++#define __swait_event_interruptible(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++#define __swait_event_interruptible_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_interruptible - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event_interruptible(wq, condition) \ ++({ \ ++ int __ret = 0; \ ++ if (!(condition)) \ ++ __swait_event_interruptible(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define swait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ int __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_interruptible_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#define __swait_event_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) ++ ++/** ++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * @timeout: timeout, in jiffies ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ * ++ * The function returns 0 if the @timeout elapsed, and the remaining ++ * jiffies if the condition evaluated to true before the timeout elapsed. ++ */ ++#define swait_event_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#endif +--- a/kernel/Makefile ++++ b/kernel/Makefile +@@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o + kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \ + hrtimer.o nsproxy.o srcu.o semaphore.o \ + notifier.o ksysfs.o cred.o \ +- async.o range.o groups.o lglock.o smpboot.o ++ async.o range.o groups.o lglock.o smpboot.o wait-simple.o + + ifdef CONFIG_FUNCTION_TRACER + # Do not trace debug files and internal ftrace files +--- /dev/null ++++ b/kernel/wait-simple.c +@@ -0,0 +1,68 @@ ++/* ++ * Simple waitqueues without fancy flags and callbacks ++ * ++ * (C) 2011 Thomas Gleixner ++ * ++ * Based on kernel/wait.c ++ * ++ * For licencing details see kernel-base/COPYING ++ */ ++#include ++#include ++#include ++#include ++ ++void __init_swait_head(struct swait_head *head, struct lock_class_key *key) ++{ ++ raw_spin_lock_init(&head->lock); ++ lockdep_set_class(&head->lock, key); ++ INIT_LIST_HEAD(&head->list); ++} ++EXPORT_SYMBOL(__init_swait_head); ++ ++void swait_prepare(struct swait_head *head, struct swaiter *w, int state) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++ set_current_state(state); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++} ++EXPORT_SYMBOL(swait_prepare); ++ ++void swait_finish(struct swait_head *head, struct swaiter *w) ++{ ++ unsigned long flags; ++ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) { ++ raw_spin_lock_irqsave(&head->lock, flags); ++ __swait_dequeue(w); ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ } ++} ++EXPORT_SYMBOL(swait_finish); ++ ++int __swait_wake(struct swait_head *head, unsigned int state) ++{ ++ struct swaiter *curr, *next; ++ unsigned long flags; ++ int woken = 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ ++ list_for_each_entry_safe(curr, next, &head->list, node) { ++ if (wake_up_state(curr->task, state)) { ++ __swait_dequeue(curr); ++ curr->task = NULL; ++ woken++; ++ } ++ } ++ ++ raw_spin_unlock_irqrestore(&head->lock, flags); ++ return woken; ++} ++EXPORT_SYMBOL(__swait_wake); diff --git a/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch new file mode 100644 index 000000000..ecf8adbce --- /dev/null +++ b/debian/patches/features/all/rt/wait-simple-rework-for-completions.patch @@ -0,0 +1,209 @@ +Subject: wait-simple: Rework for use with completions +From: Thomas Gleixner +Date: Thu, 10 Jan 2013 11:47:35 +0100 + +Signed-off-by: Thomas Gleixner +--- + include/linux/wait-simple.h | 56 +++++++---------------------------- + kernel/wait-simple.c | 69 ++++++++++++++++++++++++++++++++++++++------ + 2 files changed, 72 insertions(+), 53 deletions(-) + +--- a/include/linux/wait-simple.h ++++ b/include/linux/wait-simple.h +@@ -22,12 +22,14 @@ struct swait_head { + struct list_head list; + }; + +-#define DEFINE_SWAIT_HEAD(name) \ +- struct swait_head name = { \ ++#define SWAIT_HEAD_INITIALIZER(name) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .list = LIST_HEAD_INIT((name).list), \ + } + ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) ++ + extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); + + #define init_swait_head(swh) \ +@@ -40,59 +42,25 @@ extern void __init_swait_head(struct swa + /* + * Waiter functions + */ +-static inline bool swaiter_enqueued(struct swaiter *w) +-{ +- return w->task != NULL; +-} +- ++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); + extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); + extern void swait_finish(struct swait_head *head, struct swaiter *w); + + /* +- * Adds w to head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +-{ +- list_add(&w->node, &head->list); +-} +- +-/* +- * Removes w from head->list. Must be called with head->lock locked. +- */ +-static inline void __swait_dequeue(struct swaiter *w) +-{ +- list_del_init(&w->node); +-} +- +-/* +- * Check whether a head has waiters enqueued +- */ +-static inline bool swait_head_has_waiters(struct swait_head *h) +-{ +- return !list_empty(&h->list); +-} +- +-/* + * Wakeup functions + */ +-extern int __swait_wake(struct swait_head *head, unsigned int state); ++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); ++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); + +-static inline int swait_wake(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_NORMAL) : 0; +-} +- +-static inline int swait_wake_interruptible(struct swait_head *head) +-{ +- return swait_head_has_waiters(head) ? +- __swait_wake(head, TASK_INTERRUPTIBLE) : 0; +-} ++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) ++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) ++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) ++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) + + /* + * Event API + */ +- + #define __swait_event(wq, condition) \ + do { \ + DEFINE_SWAITER(__wait); \ +--- a/kernel/wait-simple.c ++++ b/kernel/wait-simple.c +@@ -12,6 +12,24 @@ + #include + #include + ++/* Adds w to head->list. Must be called with head->lock locked. */ ++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) ++{ ++ list_add(&w->node, &head->list); ++} ++ ++/* Removes w from head->list. Must be called with head->lock locked. */ ++static inline void __swait_dequeue(struct swaiter *w) ++{ ++ list_del_init(&w->node); ++} ++ ++/* Check whether a head has waiters enqueued */ ++static inline bool swait_head_has_waiters(struct swait_head *h) ++{ ++ return !list_empty(&h->list); ++} ++ + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) + { + raw_spin_lock_init(&head->lock); +@@ -20,19 +38,31 @@ void __init_swait_head(struct swait_head + } + EXPORT_SYMBOL(__init_swait_head); + ++void swait_prepare_locked(struct swait_head *head, struct swaiter *w) ++{ ++ w->task = current; ++ if (list_empty(&w->node)) ++ __swait_enqueue(head, w); ++} ++ + void swait_prepare(struct swait_head *head, struct swaiter *w, int state) + { + unsigned long flags; + + raw_spin_lock_irqsave(&head->lock, flags); +- w->task = current; +- if (list_empty(&w->node)) +- __swait_enqueue(head, w); +- set_current_state(state); ++ swait_prepare_locked(head, w); ++ __set_current_state(state); + raw_spin_unlock_irqrestore(&head->lock, flags); + } + EXPORT_SYMBOL(swait_prepare); + ++void swait_finish_locked(struct swait_head *head, struct swaiter *w) ++{ ++ __set_current_state(TASK_RUNNING); ++ if (w->task) ++ __swait_dequeue(w); ++} ++ + void swait_finish(struct swait_head *head, struct swaiter *w) + { + unsigned long flags; +@@ -46,22 +76,43 @@ void swait_finish(struct swait_head *hea + } + EXPORT_SYMBOL(swait_finish); + +-int __swait_wake(struct swait_head *head, unsigned int state) ++unsigned int ++__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num) + { + struct swaiter *curr, *next; +- unsigned long flags; + int woken = 0; + +- raw_spin_lock_irqsave(&head->lock, flags); +- + list_for_each_entry_safe(curr, next, &head->list, node) { + if (wake_up_state(curr->task, state)) { + __swait_dequeue(curr); ++ /* ++ * The waiting task can free the waiter as ++ * soon as curr->task = NULL is written, ++ * without taking any locks. A memory barrier ++ * is required here to prevent the following ++ * store to curr->task from getting ahead of ++ * the dequeue operation. ++ */ ++ smp_wmb(); + curr->task = NULL; +- woken++; ++ if (++woken == num) ++ break; + } + } ++ return woken; ++} ++ ++unsigned int ++__swait_wake(struct swait_head *head, unsigned int state, unsigned int num) ++{ ++ unsigned long flags; ++ int woken; + ++ if (!swait_head_has_waiters(head)) ++ return 0; ++ ++ raw_spin_lock_irqsave(&head->lock, flags); ++ woken = __swait_wake_locked(head, state, num); + raw_spin_unlock_irqrestore(&head->lock, flags); + return woken; + } diff --git a/debian/patches/features/all/rt/workqueue-use-get-cpu-light.patch b/debian/patches/features/all/rt/workqueue-use-get-cpu-light.patch deleted file mode 100644 index 88cad8f53..000000000 --- a/debian/patches/features/all/rt/workqueue-use-get-cpu-light.patch +++ /dev/null @@ -1,24 +0,0 @@ -Subject: workqueue-use-get-cpu-light.patch -From: Thomas Gleixner -Date: Sun, 17 Jul 2011 21:42:26 +0200 - -Signed-off-by: Thomas Gleixner ---- - kernel/workqueue.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -Index: linux-stable/kernel/workqueue.c -=================================================================== ---- linux-stable.orig/kernel/workqueue.c -+++ linux-stable/kernel/workqueue.c -@@ -1067,8 +1067,8 @@ int queue_work(struct workqueue_struct * - { - int ret; - -- ret = queue_work_on(get_cpu(), wq, work); -- put_cpu(); -+ ret = queue_work_on(get_cpu_light(), wq, work); -+ put_cpu_light(); - - return ret; - } diff --git a/debian/patches/features/all/rt/workqueue-use-locallock.patch b/debian/patches/features/all/rt/workqueue-use-locallock.patch new file mode 100644 index 000000000..a2c611b7e --- /dev/null +++ b/debian/patches/features/all/rt/workqueue-use-locallock.patch @@ -0,0 +1,130 @@ +Subject: Use local irq lock instead of irq disable regions +From: Thomas Gleixner +Date: Sun, 17 Jul 2011 21:42:26 +0200 + +Signed-off-by: Thomas Gleixner +--- + kernel/workqueue.c | 27 +++++++++++++++------------ + 1 file changed, 15 insertions(+), 12 deletions(-) + +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -41,6 +41,7 @@ + #include + #include + #include ++#include + + #include "workqueue_sched.h" + +@@ -278,6 +279,8 @@ EXPORT_SYMBOL_GPL(system_unbound_wq); + struct workqueue_struct *system_freezable_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_wq); + ++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); ++ + #define CREATE_TRACE_POINTS + #include + +@@ -1092,7 +1095,7 @@ static int try_to_grab_pending(struct wo + { + struct global_cwq *gcwq; + +- local_irq_save(*flags); ++ local_lock_irqsave(pendingb_lock, *flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1151,7 +1154,7 @@ static int try_to_grab_pending(struct wo + } + spin_unlock(&gcwq->lock); + fail: +- local_irq_restore(*flags); ++ local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; + cpu_relax(); +@@ -1246,7 +1249,7 @@ static void __queue_work(unsigned int cp + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + debug_work_activate(work); + +@@ -1336,14 +1339,14 @@ bool queue_work_on(int cpu, struct workq + bool ret = false; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock,flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(queue_work_on); +@@ -1451,14 +1454,14 @@ bool queue_delayed_work_on(int cpu, stru + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock, flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL_GPL(queue_delayed_work_on); +@@ -1508,7 +1511,7 @@ bool mod_delayed_work_on(int cpu, struct + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -2936,7 +2939,7 @@ static bool __cancel_work_timer(struct w + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + + flush_work(work); + clear_work_data(work); +@@ -2981,11 +2984,11 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_irq_disable(); ++ local_lock_irq(pendingb_lock); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, + get_work_cwq(&dwork->work)->wq, &dwork->work); +- local_irq_enable(); ++ local_unlock_irq(pendingb_lock); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -3015,7 +3018,7 @@ bool cancel_delayed_work(struct delayed_ + return false; + + set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(cancel_delayed_work); diff --git a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch index fe0b33991..f433dcda2 100644 --- a/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch +++ b/debian/patches/features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch @@ -16,11 +16,9 @@ Signed-off-by: Thomas Gleixner arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) -Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c -=================================================================== ---- linux-stable.orig/arch/x86/crypto/aesni-intel_glue.c -+++ linux-stable/arch/x86/crypto/aesni-intel_glue.c -@@ -245,14 +245,14 @@ static int ecb_encrypt(struct blkcipher_ +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -250,14 +250,14 @@ static int ecb_encrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -38,7 +36,7 @@ Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c return err; } -@@ -269,14 +269,14 @@ static int ecb_decrypt(struct blkcipher_ +@@ -274,14 +274,14 @@ static int ecb_decrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -55,7 +53,7 @@ Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c return err; } -@@ -293,14 +293,14 @@ static int cbc_encrypt(struct blkcipher_ +@@ -298,14 +298,14 @@ static int cbc_encrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -72,7 +70,7 @@ Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c return err; } -@@ -317,14 +317,14 @@ static int cbc_decrypt(struct blkcipher_ +@@ -322,14 +322,14 @@ static int cbc_decrypt(struct blkcipher_ err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -89,7 +87,7 @@ Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c return err; } -@@ -357,18 +357,20 @@ static int ctr_crypt(struct blkcipher_de +@@ -362,18 +362,20 @@ static int ctr_crypt(struct blkcipher_de err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; diff --git a/debian/patches/features/all/rt/x86-disable-debug-stack.patch b/debian/patches/features/all/rt/x86-disable-debug-stack.patch index b3096eada..5c138b34a 100644 --- a/debian/patches/features/all/rt/x86-disable-debug-stack.patch +++ b/debian/patches/features/all/rt/x86-disable-debug-stack.patch @@ -41,10 +41,8 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/dumpstack_64.c | 4 ++++ 3 files changed, 21 insertions(+), 6 deletions(-) -Index: linux-stable/arch/x86/include/asm/page_64_types.h -=================================================================== ---- linux-stable.orig/arch/x86/include/asm/page_64_types.h -+++ linux-stable/arch/x86/include/asm/page_64_types.h +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,21 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) @@ -73,11 +71,9 @@ Index: linux-stable/arch/x86/include/asm/page_64_types.h #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) -Index: linux-stable/arch/x86/kernel/cpu/common.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/common.c -+++ linux-stable/arch/x86/kernel/cpu/common.c -@@ -1089,7 +1089,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1103,7 +1103,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu */ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, @@ -87,10 +83,8 @@ Index: linux-stable/arch/x86/kernel/cpu/common.c }; static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks -Index: linux-stable/arch/x86/kernel/dumpstack_64.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/dumpstack_64.c -+++ linux-stable/arch/x86/kernel/dumpstack_64.c +--- a/arch/x86/kernel/dumpstack_64.c ++++ b/arch/x86/kernel/dumpstack_64.c @@ -21,10 +21,14 @@ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2) diff --git a/debian/patches/features/all/rt/x86-highmem-warn.patch b/debian/patches/features/all/rt/x86-highmem-warn.patch deleted file mode 100644 index 64dbe519d..000000000 --- a/debian/patches/features/all/rt/x86-highmem-warn.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Ingo Molnar -Date: Fri, 3 Jul 2009 08:29:25 -0500 -Subject: x86: highmem: Replace BUG_ON by WARN_ON - -The machine might survive that problem and be at least in a state -which allows us to get more information about the problem. - -Signed-off-by: Ingo Molnar -Signed-off-by: Thomas Gleixner - ---- - arch/x86/mm/highmem_32.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: linux-stable/arch/x86/mm/highmem_32.c -=================================================================== ---- linux-stable.orig/arch/x86/mm/highmem_32.c -+++ linux-stable/arch/x86/mm/highmem_32.c -@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- BUG_ON(!pte_none(*(kmap_pte-idx))); -+ WARN_ON(!pte_none(*(kmap_pte-idx))); - set_pte(kmap_pte-idx, mk_pte(page, prot)); - arch_flush_lazy_mmu_mode(); - diff --git a/debian/patches/features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch b/debian/patches/features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch index 1027a1dd1..646779e7f 100644 --- a/debian/patches/features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch +++ b/debian/patches/features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch @@ -12,10 +12,8 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/hpet.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) -Index: linux-stable/arch/x86/kernel/hpet.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/hpet.c -+++ linux-stable/arch/x86/kernel/hpet.c +--- a/arch/x86/kernel/hpet.c ++++ b/arch/x86/kernel/hpet.c @@ -8,6 +8,7 @@ #include #include diff --git a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch index e3a516f24..1ea2c6972 100644 --- a/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch +++ b/debian/patches/features/all/rt/x86-io-apic-migra-no-unmask.patch @@ -12,11 +12,9 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/apic/io_apic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -Index: linux-stable/arch/x86/kernel/apic/io_apic.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/apic/io_apic.c -+++ linux-stable/arch/x86/kernel/apic/io_apic.c -@@ -2437,7 +2437,8 @@ static bool io_apic_level_ack_pending(st +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -2428,7 +2428,8 @@ static bool io_apic_level_ack_pending(st static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) { /* If we are moving the irq we need to mask it */ diff --git a/debian/patches/features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch b/debian/patches/features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch deleted file mode 100644 index 9f0ae36ec..000000000 --- a/debian/patches/features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch +++ /dev/null @@ -1,29 +0,0 @@ -Subject: x86: kprobes: Remove remove bogus preempt_enable -From: Thomas Gleixner -Date: Thu, 17 Mar 2011 11:02:15 +0100 - -The CONFIG_PREEMPT=n section of setup_singlestep() contains: - - preempt_enable_no_resched(); - -That's bogus as it is asymetric - no preempt_disable() - and it just -never blew up because preempt_enable_no_resched() is a NOP when -CONFIG_PREEMPT=n. Remove it. - -Signed-off-by: Thomas Gleixner ---- - arch/x86/kernel/kprobes.c | 1 - - 1 file changed, 1 deletion(-) - -Index: linux-stable/arch/x86/kernel/kprobes.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/kprobes.c -+++ linux-stable/arch/x86/kernel/kprobes.c -@@ -486,7 +486,6 @@ setup_singlestep(struct kprobe *p, struc - * stepping. - */ - regs->ip = (unsigned long)p->ainsn.insn; -- preempt_enable_no_resched(); - return; - } - #endif diff --git a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch index 556c44279..d74a1cc89 100644 --- a/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch +++ b/debian/patches/features/all/rt/x86-kvm-require-const-tsc-for-rt.patch @@ -7,11 +7,9 @@ Signed-off-by: Thomas Gleixner arch/x86/kvm/x86.c | 7 +++++++ 1 file changed, 7 insertions(+) -Index: linux-stable/arch/x86/kvm/x86.c -=================================================================== ---- linux-stable.orig/arch/x86/kvm/x86.c -+++ linux-stable/arch/x86/kvm/x86.c -@@ -4911,6 +4911,13 @@ int kvm_arch_init(void *opaque) +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5242,6 +5242,13 @@ int kvm_arch_init(void *opaque) goto out; } @@ -24,4 +22,4 @@ Index: linux-stable/arch/x86/kvm/x86.c + r = kvm_mmu_module_init(); if (r) - goto out; + goto out_free_percpu; diff --git a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch index 7541a6ff9..12700306d 100644 --- a/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch +++ b/debian/patches/features/all/rt/x86-mce-timer-hrtimer.patch @@ -9,13 +9,11 @@ avoid this. Signed-off-by: Thomas Gleixner --- - arch/x86/kernel/cpu/mcheck/mce.c | 36 ++++++++++++++++-------------------- - 1 file changed, 16 insertions(+), 20 deletions(-) + arch/x86/kernel/cpu/mcheck/mce.c | 57 ++++++++++++++++++++++----------------- + 1 file changed, 33 insertions(+), 24 deletions(-) -Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c -+++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c +--- a/arch/x86/kernel/cpu/mcheck/mce.c ++++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -41,6 +41,7 @@ #include #include @@ -24,13 +22,19 @@ Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c #include #include -@@ -1264,15 +1265,12 @@ void mce_log_therm_throt_event(__u64 sta +@@ -1259,7 +1260,7 @@ void mce_log_therm_throt_event(__u64 sta static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct timer_list, mce_timer); +static DEFINE_PER_CPU(struct hrtimer, mce_timer); + static unsigned long mce_adjust_timer_default(unsigned long interval) + { +@@ -1269,13 +1270,10 @@ static unsigned long mce_adjust_timer_de + static unsigned long (*mce_adjust_timer)(unsigned long interval) = + mce_adjust_timer_default; + -static void mce_timer_fn(unsigned long data) +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) { @@ -42,15 +46,52 @@ Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); -@@ -1289,17 +1287,18 @@ static void mce_timer_fn(unsigned long d - iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); +@@ -1296,9 +1294,10 @@ static void mce_timer_fn(unsigned long d __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { +- t->expires = jiffies + iv; +- add_timer_on(t, smp_processor_id()); ++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_usecs(iv))); ++ return HRTIMER_RESTART; + } ++ return HRTIMER_NORESTART; + } -- t->expires = jiffies + iv; -- add_timer_on(t, smp_processor_id()); -+ hrtimer_forward(timer, timer->base->get_time(), -+ ns_to_ktime(jiffies_to_usecs(iv))); -+ return HRTIMER_RESTART; + /* +@@ -1306,28 +1305,37 @@ static void mce_timer_fn(unsigned long d + */ + void mce_timer_kick(unsigned long interval) + { +- struct timer_list *t = &__get_cpu_var(mce_timer); +- unsigned long when = jiffies + interval; ++ struct hrtimer *t = &__get_cpu_var(mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); + +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer_pinned(t, when); ++ if (hrtimer_active(t)) { ++ s64 exp; ++ s64 intv_us; ++ ++ intv_us = jiffies_to_usecs(interval); ++ exp = ktime_to_us(hrtimer_expires_remaining(t)); ++ if (intv_us < exp) { ++ hrtimer_cancel(t); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(intv_us * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); ++ } + } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(jiffies_to_usecs(interval) * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); + } + if (interval < iv) + __this_cpu_write(mce_next_interval, interval); } -/* Must not be called in IRQ context where del_timer_sync() can deadlock */ @@ -65,32 +106,38 @@ Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c } static void mce_do_trigger(struct work_struct *work) -@@ -1596,10 +1595,11 @@ static void __mcheck_cpu_init_vendor(str +@@ -1632,7 +1640,7 @@ static void __mcheck_cpu_init_vendor(str + } + } + +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = mce_adjust_timer(check_interval * HZ); + +@@ -1641,16 +1649,17 @@ static void mce_start_timer(unsigned int + if (mca_cfg.ignore_ce || !iv) + return; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); + } static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = &__get_cpu_var(mce_timer); + struct hrtimer *t = &__get_cpu_var(mce_timer); - unsigned long iv = check_interval * HZ; + unsigned int cpu = smp_processor_id(); -- setup_timer(t, mce_timer_fn, smp_processor_id()); +- setup_timer(t, mce_timer_fn, cpu); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = mce_timer_fn; - - if (mce_ignore_ce) - return; -@@ -1607,8 +1607,8 @@ static void __mcheck_cpu_init_timer(void - __this_cpu_write(mce_next_interval, iv); - if (!iv) - return; -- t->expires = round_jiffies(jiffies + iv); -- add_timer_on(t, smp_processor_id()); -+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000), -+ 0, HRTIMER_MODE_REL_PINNED); + mce_start_timer(cpu, t); } - /* Handle unconfigured int18 (should never happen) */ -@@ -2259,6 +2259,8 @@ static void __cpuinit mce_disable_cpu(vo +@@ -2307,6 +2316,8 @@ static void __cpuinit mce_disable_cpu(vo if (!mce_available(__this_cpu_ptr(&cpu_info))) return; @@ -98,8 +145,8 @@ Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); - for (i = 0; i < banks; i++) { -@@ -2285,6 +2287,7 @@ static void __cpuinit mce_reenable_cpu(v + for (i = 0; i < mca_cfg.banks; i++) { +@@ -2333,6 +2344,7 @@ static void __cpuinit mce_reenable_cpu(v if (b->init) wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); } @@ -107,28 +154,23 @@ Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -@@ -2292,7 +2295,6 @@ static int __cpuinit +@@ -2340,7 +2352,6 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct timer_list *t = &per_cpu(mce_timer, cpu); - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: -@@ -2309,16 +2311,10 @@ mce_cpu_callback(struct notifier_block * +@@ -2356,11 +2367,9 @@ mce_cpu_callback(struct notifier_block * break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: -- del_timer_sync(t); smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); break; case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: -- if (!mce_ignore_ce && check_interval) { -- t->expires = round_jiffies(jiffies + -- per_cpu(mce_next_interval, cpu)); -- add_timer_on(t, cpu); -- } smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); break; - case CPU_POST_DEAD: + } + diff --git a/debian/patches/features/all/rt/x86-perf-uncore-deal-with-kfree.patch b/debian/patches/features/all/rt/x86-perf-uncore-deal-with-kfree.patch index 85199845c..94e5b9b63 100644 --- a/debian/patches/features/all/rt/x86-perf-uncore-deal-with-kfree.patch +++ b/debian/patches/features/all/rt/x86-perf-uncore-deal-with-kfree.patch @@ -14,10 +14,8 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/cpu/perf_event_intel_uncore.h | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) -Index: linux-stable/arch/x86/kernel/cpu/perf_event.h -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/perf_event.h -+++ linux-stable/arch/x86/kernel/cpu/perf_event.h +--- a/arch/x86/kernel/cpu/perf_event.h ++++ b/arch/x86/kernel/cpu/perf_event.h @@ -108,6 +108,7 @@ struct intel_shared_regs { struct er_account regs[EXTRA_REG_MAX]; int refcnt; /* per-core: #HT threads */ @@ -26,11 +24,9 @@ Index: linux-stable/arch/x86/kernel/cpu/perf_event.h }; #define MAX_LBR_ENTRIES 16 -Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel.c -+++ linux-stable/arch/x86/kernel/cpu/perf_event_intel.c -@@ -1707,7 +1707,7 @@ static void intel_pmu_cpu_dying(int cpu) +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -1715,7 +1715,7 @@ static void intel_pmu_cpu_dying(int cpu) pc = cpuc->shared_regs; if (pc) { if (pc->core_id == -1 || --pc->refcnt == 0) @@ -39,11 +35,9 @@ Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel.c cpuc->shared_regs = NULL; } -Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c -+++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c -@@ -2603,7 +2603,7 @@ static void __cpuinit uncore_cpu_dying(i +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c +@@ -2636,7 +2636,7 @@ static void __cpuinit uncore_cpu_dying(i box = *per_cpu_ptr(pmu->box, cpu); *per_cpu_ptr(pmu->box, cpu) = NULL; if (box && atomic_dec_and_test(&box->refcnt)) @@ -52,7 +46,7 @@ Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c } } } -@@ -2633,7 +2633,8 @@ static int __cpuinit uncore_cpu_starting +@@ -2666,7 +2666,8 @@ static int __cpuinit uncore_cpu_starting if (exist && exist->phys_id == phys_id) { atomic_inc(&exist->refcnt); *per_cpu_ptr(pmu->box, cpu) = exist; @@ -62,11 +56,9 @@ Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c box = NULL; break; } -Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h -=================================================================== ---- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.h -+++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h -@@ -419,6 +419,7 @@ struct intel_uncore_box { +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h +@@ -421,6 +421,7 @@ struct intel_uncore_box { struct hrtimer hrtimer; struct list_head list; struct intel_uncore_extra_reg shared_regs[0]; diff --git a/debian/patches/features/all/rt/x86-preempt-lazy.patch b/debian/patches/features/all/rt/x86-preempt-lazy.patch index 3bba56176..8fa4e9b11 100644 --- a/debian/patches/features/all/rt/x86-preempt-lazy.patch +++ b/debian/patches/features/all/rt/x86-preempt-lazy.patch @@ -11,22 +11,18 @@ Signed-off-by: Thomas Gleixner arch/x86/kernel/entry_64.S | 24 +++++++++++++++--------- 5 files changed, 36 insertions(+), 14 deletions(-) -Index: linux-stable/arch/x86/Kconfig -=================================================================== ---- linux-stable.orig/arch/x86/Kconfig -+++ linux-stable/arch/x86/Kconfig -@@ -97,6 +97,7 @@ config X86 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -108,6 +108,7 @@ config X86 select KTIME_SCALAR if X86_32 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_PREEMPT_LAZY - - config INSTRUCTION_DECODER - def_bool (KPROBES || PERF_EVENTS || UPROBES) -Index: linux-stable/arch/x86/include/asm/thread_info.h -=================================================================== ---- linux-stable.orig/arch/x86/include/asm/thread_info.h -+++ linux-stable/arch/x86/include/asm/thread_info.h + select HAVE_CONTEXT_TRACKING if X86_64 + select HAVE_IRQ_TIME_ACCOUNTING + select MODULES_USE_ELF_REL if X86_32 +--- a/arch/x86/include/asm/thread_info.h ++++ b/arch/x86/include/asm/thread_info.h @@ -31,6 +31,8 @@ struct thread_info { __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, @@ -36,7 +32,7 @@ Index: linux-stable/arch/x86/include/asm/thread_info.h mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; -@@ -83,6 +85,7 @@ struct thread_info { +@@ -82,6 +84,7 @@ struct thread_info { #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ @@ -44,7 +40,7 @@ Index: linux-stable/arch/x86/include/asm/thread_info.h #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ -@@ -108,6 +111,7 @@ struct thread_info { +@@ -107,6 +110,7 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -52,7 +48,7 @@ Index: linux-stable/arch/x86/include/asm/thread_info.h #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) -@@ -155,6 +159,8 @@ struct thread_info { +@@ -157,6 +161,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) @@ -61,10 +57,8 @@ Index: linux-stable/arch/x86/include/asm/thread_info.h #define PREEMPT_ACTIVE 0x10000000 #ifdef CONFIG_X86_32 -Index: linux-stable/arch/x86/kernel/asm-offsets.c -=================================================================== ---- linux-stable.orig/arch/x86/kernel/asm-offsets.c -+++ linux-stable/arch/x86/kernel/asm-offsets.c +--- a/arch/x86/kernel/asm-offsets.c ++++ b/arch/x86/kernel/asm-offsets.c @@ -33,6 +33,7 @@ void common(void) { OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); @@ -73,11 +67,9 @@ Index: linux-stable/arch/x86/kernel/asm-offsets.c BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); -Index: linux-stable/arch/x86/kernel/entry_32.S -=================================================================== ---- linux-stable.orig/arch/x86/kernel/entry_32.S -+++ linux-stable/arch/x86/kernel/entry_32.S -@@ -349,14 +349,22 @@ ENTRY(resume_kernel) +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -364,14 +364,22 @@ ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? jnz restore_all @@ -103,7 +95,7 @@ Index: linux-stable/arch/x86/kernel/entry_32.S END(resume_kernel) #endif CFI_ENDPROC -@@ -589,7 +597,7 @@ ENDPROC(system_call) +@@ -607,7 +615,7 @@ ENDPROC(system_call) ALIGN RING0_PTREGS_FRAME # can't unwind into user space anyway work_pending: @@ -112,7 +104,7 @@ Index: linux-stable/arch/x86/kernel/entry_32.S jz work_notifysig work_resched: call schedule -@@ -602,7 +610,7 @@ work_resched: +@@ -620,7 +628,7 @@ work_resched: andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? jz restore_all @@ -121,11 +113,9 @@ Index: linux-stable/arch/x86/kernel/entry_32.S jnz work_resched work_notifysig: # deal with pending signals and -Index: linux-stable/arch/x86/kernel/entry_64.S -=================================================================== ---- linux-stable.orig/arch/x86/kernel/entry_64.S -+++ linux-stable/arch/x86/kernel/entry_64.S -@@ -560,8 +560,8 @@ sysret_check: +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -673,8 +673,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: @@ -136,7 +126,7 @@ Index: linux-stable/arch/x86/kernel/entry_64.S TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi -@@ -673,8 +673,8 @@ GLOBAL(int_with_check) +@@ -786,8 +786,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: @@ -147,7 +137,7 @@ Index: linux-stable/arch/x86/kernel/entry_64.S TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi -@@ -969,8 +969,8 @@ bad_iret: +@@ -1094,8 +1094,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE @@ -158,7 +148,7 @@ Index: linux-stable/arch/x86/kernel/entry_64.S TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi -@@ -1003,9 +1003,15 @@ retint_signal: +@@ -1128,9 +1128,15 @@ retint_signal: ENTRY(retint_kernel) cmpl $0,TI_preempt_count(%rcx) jnz retint_restore_args @@ -176,7 +166,7 @@ Index: linux-stable/arch/x86/kernel/entry_64.S jnc retint_restore_args call preempt_schedule_irq jmp exit_intr -@@ -1437,7 +1443,7 @@ paranoid_userspace: +@@ -1522,7 +1528,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ diff --git a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch index 8a2b17e73..5c3ca0990 100644 --- a/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch +++ b/debian/patches/features/all/rt/x86-stackprot-no-random-on-rt.patch @@ -17,10 +17,8 @@ Signed-off-by: Thomas Gleixner arch/x86/include/asm/stackprotector.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) -Index: linux-stable/arch/x86/include/asm/stackprotector.h -=================================================================== ---- linux-stable.orig/arch/x86/include/asm/stackprotector.h -+++ linux-stable/arch/x86/include/asm/stackprotector.h +--- a/arch/x86/include/asm/stackprotector.h ++++ b/arch/x86/include/asm/stackprotector.h @@ -57,7 +57,7 @@ */ static __always_inline void boot_init_stack_canary(void) diff --git a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch index d7dd32b55..96fd475ae 100644 --- a/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch +++ b/debian/patches/features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch @@ -8,23 +8,21 @@ Simplifies the separation of anon_rw_semaphores and rw_semaphores for Signed-off-by: Thomas Gleixner --- - arch/x86/Kconfig | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) + arch/x86/Kconfig | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) -Index: linux-stable/arch/x86/Kconfig -=================================================================== ---- linux-stable.orig/arch/x86/Kconfig -+++ linux-stable/arch/x86/Kconfig -@@ -153,10 +153,10 @@ config ARCH_MAY_HAVE_PC_FDC - def_bool ISA_DMA_API - - config RWSEM_GENERIC_SPINLOCK -- def_bool !X86_XADD -+ def_bool !X86_XADD || PREEMPT_RT_FULL +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -173,8 +173,11 @@ config ARCH_MAY_HAVE_PC_FDC + def_bool y + depends on ISA_DMA_API ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ config RWSEM_XCHGADD_ALGORITHM -- def_bool X86_XADD -+ def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_CALIBRATE_DELAY def_bool y diff --git a/debian/patches/series-rt b/debian/patches/series-rt index 08f36d957..370805434 100644 --- a/debian/patches/series-rt +++ b/debian/patches/series-rt @@ -5,6 +5,21 @@ ############################################################ # UPSTREAM changes queued ############################################################ +features/all/rt/fix-1-2-slub-do-not-dereference-null-pointer-in-node_match.patch +features/all/rt/fix-2-2-slub-tid-must-be-retrieved-from-the-percpu-area-of-the-current-processor.patch +features/all/rt/fix-rq-3elock-vs-logbuf_lock-unlock-race.patch +features/all/rt/genirq-add-default-mask-cmdline-option.patch +features/all/rt/of-fixup-resursive-locking-code-paths.patch +features/all/rt/of-convert-devtree-lock-from-rw_lock-to-raw-spinlock.patch + +features/all/rt/locking-various-init-fixes.patch +features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch +features/all/rt/ntp-make-ntp-lock-raw-sigh.patch +features/all/rt/seqlock-remove-unused-functions.patch +features/all/rt/seqlock-use-seqcount.patch +features/all/rt/generic-cmpxchg-use-raw-local-irq.patch + +features/all/rt/0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch ############################################################ # UPSTREAM FIXES, patches pending @@ -13,7 +28,6 @@ ############################################################ # Stuff broken upstream, patches submitted ############################################################ -features/all/rt/x86-kprobes-remove-bogus-preempt-enable.patch ############################################################ # Stuff which needs addressing upstream, but requires more @@ -28,31 +42,39 @@ features/all/rt/x86-hpet-disable-msi-on-lenovo-w510.patch ############################################################ # Submitted on LKML ############################################################ -# SCHED BLOCK/WQ -features/all/rt/block-shorten-interrupt-disabled-regions.patch +features/all/rt/early-printk-consolidate.patch -# CHECKME sched-distangle-worker-accounting-from-rq-3elock.patch +# SRCU +features/all/rt/0001-kernel-srcu-merge-common-code-into-a-macro.patch +features/all/rt/0002-kernel-SRCU-provide-a-static-initializer.patch ############################################################ # Submitted to mips ML ############################################################ -features/all/rt/mips-enable-interrupts-in-signal.patch ############################################################ # Submitted to ARM ML ############################################################ +features/all/rt/arm-mark-pmu-interupt-no-thread.patch +features/all/rt/arm-allow-irq-threading.patch + +############################################################ +# Submitted to PPC ML +############################################################ +features/all/rt/ppc-mark-low-level-handlers-no-thread.patch ############################################################ # Submitted on LKML ############################################################ -# JBD - -# SCHED - -############################################################ -# Submitted on ppc-devel -############################################################ +features/all/rt/timekeeping-do-not-calc-crap-over-and-over.patch +features/all/rt/timekeeping-make-jiffies-lock-internal.patch +features/all/rt/timekeeping-move-lock-out-of-timekeeper.patch +features/all/rt/timekeeping-split-timekeeper-lock.patch +features/all/rt/timekeeping-store-cycle-last-in-timekeeper.patch +features/all/rt/timekeeping-delay-clock-cycle-last-update.patch +features/all/rt/timekeeping-implement-shadow-timekeeper.patch +features/all/rt/timekeeping-shorten-seq-count-region.patch ############################################################ # Submitted to net-dev @@ -62,43 +84,18 @@ features/all/rt/mips-enable-interrupts-in-signal.patch # Pending in tip ############################################################ -# WATCHDOG - -# CLOCKSOURCE - -# RTMUTEX CLEANUP - -# RAW SPINLOCKS - -# X86 - -############################################################ -# Pending in peterz's scheduler queue -############################################################ - - ############################################################ # Stuff which should go upstream ASAP ############################################################ -# GENIRQ -features/all/rt/genirq-add-default-mask-cmdline-option.patch +# SCHED BLOCK/WQ +features/all/rt/block-shorten-interrupt-disabled-regions.patch -# PPC -features/all/rt/ppc-mark-low-level-handlers-no-thread.patch +# Timekeeping split jiffies lock. Needs a good argument :) +features/all/rt/timekeeping-split-jiffies-lock.patch -# Timekeeping / VDSO - -# SEQLOCK -features/all/rt/seqlock-remove-unused-functions.patch -features/all/rt/seqlock-use-seqcount.patch - -# RAW SPINLOCKS -features/all/rt/timekeeping-split-xtime-lock.patch -features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch -features/all/rt/ntp-make-ntp-lock-raw-sigh.patch - -# MM memcg +# CHECKME: Should local_irq_enable() generally do a preemption check ? +features/all/rt/mips-enable-interrupts-in-signal.patch # Tracing features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -107,15 +104,12 @@ features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch features/all/rt/signal-revert-ptrace-preempt-magic.patch # ARM IRQF_NO_TRHEAD / IRQ THREADING SUPPORT -features/all/rt/arm-mark-pmu-interupt-no-thread.patch -features/all/rt/arm-allow-irq-threading.patch features/all/rt/arm-convert-boot-lock-to-raw.patch features/all/rt/arm-omap-make-wakeupgen_lock-raw.patch # PREEMPT_ENABLE_NO_RESCHED # SIGNALS / POSIXTIMERS -features/all/rt/signals-do-not-wake-self.patch features/all/rt/posix-timers-no-broadcast.patch features/all/rt/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch features/all/rt/oleg-signal-rt-fix.patch @@ -123,7 +117,6 @@ features/all/rt/oleg-signal-rt-fix.patch # SCHED # GENERIC CMPXCHG -features/all/rt/generic-cmpxchg-use-raw-local-irq.patch # SHORTEN PREEMPT DISABLED features/all/rt/drivers-random-reduce-preempt-disabled-region.patch @@ -132,14 +125,9 @@ features/all/rt/drivers-random-reduce-preempt-disabled-region.patch features/all/rt/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch features/all/rt/clocksource-tclib-allow-higher-clockrates.patch -# HW LATENCY DETECTOR - this really wants a rewrite -#hw-latency-detector.patch - # DRIVERS NET features/all/rt/drivers-net-tulip-add-missing-pci-disable.patch features/all/rt/drivers-net-8139-disable-irq-nosync.patch -features/all/rt/drivers-net-ehea-mark-rx-irq-no-thread.patch -features/all/rt/drivers-net-at91-make-mdio-protection-rt-safe.patch # PREEMPT @@ -152,16 +140,9 @@ features/all/rt/peterz-raw_pagefault_disable.patch features/all/rt/filemap-fix-up.patch features/all/rt/mm-remove-preempt-count-from-pf.patch -# HIGHMEM -features/all/rt/x86-highmem-warn.patch - # PM features/all/rt/suspend-prevernt-might-sleep-splats.patch -# DEVICE TREE -features/all/rt/of-fixup-recursive-locking.patch -features/all/rt/of-convert-devtree-lock.patch - # MM/LISTS features/all/rt/list-add-list-last-entry.patch features/all/rt/mm-page-alloc-use-list-last-entry.patch @@ -185,7 +166,6 @@ features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch # RCU # LOCKING INIT FIXES -features/all/rt/locking-various-init-fixes.patch # PCI features/all/rt/pci-access-use-__wake_up_all_locked.patch @@ -201,6 +181,7 @@ features/all/rt/pci-access-use-__wake_up_all_locked.patch # TRACING features/all/rt/latency-hist.patch +# HW LATENCY DETECTOR - this really wants a rewrite # HW latency detector features/all/rt/hwlatdetect.patch @@ -212,7 +193,6 @@ features/all/rt/hwlatdetect.patch #localversion.patch # PRINTK -features/all/rt/early-printk-consolidate.patch features/all/rt/printk-kill.patch features/all/rt/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -237,7 +217,6 @@ features/all/rt/acpi-use-local-irq-nort.patch features/all/rt/user-use-local-irq-nort.patch features/all/rt/resource-counters-use-localirq-nort.patch features/all/rt/usb-hcd-use-local-irq-nort.patch -features/all/rt/tty-use-local-irq-nort.patch features/all/rt/mm-scatterlist-dont-disable-irqs-on-RT.patch # Sigh @@ -327,9 +306,6 @@ features/all/rt/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch # HRTIMERS features/all/rt/hrtimers-prepare-full-preemption.patch features/all/rt/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch -features/all/rt/peter_zijlstra-frob-hrtimer.patch -features/all/rt/hrtimer-add-missing-debug_activate-aid.patch -features/all/rt/hrtimer-fix-reprogram-madness.patch features/all/rt/timer-fd-avoid-live-lock.patch # POSIX-CPU-TIMERS @@ -378,7 +354,7 @@ features/all/rt/rt-sched-have-migrate_disable-ignore-bounded-threads.patch features/all/rt/sched-clear-pf-thread-bound-on-fallback-rq.patch # FTRACE -features/all/rt/ftrace-crap.patch +# XXX checkme ftrace-crap.patch # CHECKME rt-ring-buffer-convert-reader_lock-from-raw_spin_lock-into-spin_lock.patch # CHECKME rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch @@ -396,7 +372,7 @@ features/all/rt/mutex-no-spin-on-rt.patch features/all/rt/softirq-local-lock.patch features/all/rt/softirq-export-in-serving-softirq.patch features/all/rt/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch -features/all/rt/softirq-fix-unplug-deadlock.patch +# XXX checkme softirq-fix-unplug-deadlock.patch features/all/rt/softirq-disable-softirq-stacks-for-rt.patch features/all/rt/softirq-make-fifo.patch features/all/rt/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -408,14 +384,13 @@ features/all/rt/local-vars-migrate-disable.patch # RAID5 features/all/rt/md-raid5-percpu-handling-rt-aware.patch -# RTMUTEX -features/all/rt/rtmutex-lock-killable.patch - # FUTEX/RTMUTEX features/all/rt/rtmutex-futex-prepare-rt.patch features/all/rt/futex-requeue-pi-fix.patch +features/all/rt/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch # RTMUTEX +features/all/rt/rtmutex-lock-killable.patch features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch features/all/rt/spinlock-types-separate-raw.patch features/all/rt/rtmutex-avoid-include-hell.patch @@ -423,22 +398,19 @@ features/all/rt/rt-add-rt-spinlock-to-headers.patch features/all/rt/rt-add-rt-to-mutex-headers.patch features/all/rt/rwsem-add-rt-variant.patch features/all/rt/rt-add-rt-locks.patch +features/all/rt/percpu-rwsem-compilefix.patch # RTMUTEX Fallout features/all/rt/tasklist-lock-fix-section-conflict.patch # NOHZ/RTMUTEX features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch -features/all/rt/timer.c-fix-build-fail-for-RT_FULL.patch # RCU -features/all/rt/rcu-force-preempt-rcu-for-rt.patch features/all/rt/peter_zijlstra-frob-rcu.patch features/all/rt/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch features/all/rt/rcu-tiny-merge-bh.patch features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch -features/all/rt/rcu-fix-build-break.patch -features/all/rt/rt-rcutree-warn-fix.patch # LGLOCKS - lovely features/all/rt/lglocks-rt.patch @@ -447,28 +419,27 @@ features/all/rt/lglocks-rt.patch features/all/rt/drivers-serial-cleanup-locking-for-rt.patch features/all/rt/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch features/all/rt/drivers-tty-fix-omap-lock-crap.patch +features/all/rt/drivers-tty-pl011-irq-disable-madness.patch features/all/rt/rt-serial-warn-fix.patch # FS features/all/rt/fs-namespace-preemption-fix.patch features/all/rt/mm-protect-activate-switch-mm.patch -features/all/rt/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch features/all/rt/fs-block-rt-support.patch features/all/rt/fs-ntfs-disable-interrupt-non-rt.patch # X86 features/all/rt/x86-mce-timer-hrtimer.patch features/all/rt/x86-stackprot-no-random-on-rt.patch -# x86-no-highmem-with-rt.patch -- peterz features/all/rt/x86-use-gen-rwsem-spinlocks-rt.patch features/all/rt/x86-disable-debug-stack.patch # CPU get light -features/all/rt/workqueue-use-get-cpu-light.patch features/all/rt/epoll-use-get-cpu-light.patch features/all/rt/mm-vmalloc-use-get-cpu-light.patch # WORKQUEUE more fixes +features/all/rt/workqueue-use-locallock.patch # CHECKME workqueue-sanity.patch # CHECKME workqueue-fix-PF_THREAD_BOUND.patch # CHECKME workqueue-hotplug-fix.patch @@ -514,6 +485,9 @@ features/all/rt/sysfs-realtime-entry.patch # KMAP/HIGHMEM features/all/rt/mm-rt-kmap-atomic-scheduling.patch +features/all/rt/0002-x86-highmem-add-a-already-used-pte-check.patch +features/all/rt/0003-arm-highmem-flush-tlb-on-unmap.patch +features/all/rt/arm-enable-highmem-for-rt.patch # IPC features/all/rt/ipc-sem-rework-semaphore-wakeups.patch @@ -567,7 +541,6 @@ features/all/rt/net-use-cpu-chill.patch features/all/rt/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch features/all/rt/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch -features/all/rt/rt-disable-rt-group-sched.patch features/all/rt/fs-jbd-pull-plug-when-waiting-for-space.patch features/all/rt/perf-make-swevent-hrtimer-irqsafe.patch features/all/rt/cpu-rt-rework-cpu-down.patch @@ -575,7 +548,6 @@ features/all/rt/cpu-rt-rework-cpu-down.patch # Stable-rt stuff: Fold back when Steve grabbed it features/all/rt/random-make-it-work-on-rt.patch features/all/rt/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch -features/all/rt/mm-slab-fix-potential-deadlock.patch features/all/rt/mm-page-alloc-use-local-lock-on-target-cpu.patch features/all/rt/rt-rw-lockdep-annotations.patch features/all/rt/sched-better-debug-output-for-might-sleep.patch @@ -585,33 +557,51 @@ features/all/rt/stomp-machine-deal-clever-with-stopper-lock.patch features/all/rt/net-another-local-irq-disable-alloc-atomic-headache.patch features/all/rt/net-use-cpu-light-in-ip-send-unicast-reply.patch features/all/rt/peterz-srcu-crypto-chain.patch -features/all/rt/crypto-make-core-static-and-init-scru-early.patch -features/all/rt/fix-crypto-api-init-for-3-6-4-rt10.patch features/all/rt/x86-perf-uncore-deal-with-kfree.patch features/all/rt/softirq-make-serving-softirqs-a-task-flag.patch features/all/rt/softirq-split-handling-function.patch features/all/rt/softirq-split-locks.patch features/all/rt/rcu-tiny-solve-rt-mistery.patch -features/all/rt/slub-correct-per-cpu-slab.patch features/all/rt/mm-enable-slub.patch features/all/rt/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch features/all/rt/rcu-disable-rcu-fast-no-hz-on-rt.patch features/all/rt/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch features/all/rt/softirq-adapt-nohz-pending-debug-code-to-new-scheme.patch -features/all/rt/softirq-add-more-debug.patch -features/all/rt/softirq-fix-nohz-pending-issue-for-real.patch features/all/rt/net-netif-rx-ni-use-local-bh-disable.patch -features/all/rt/fix-random-fallout.patch - features/all/rt/preempt-lazy-support.patch features/all/rt/x86-preempt-lazy.patch features/all/rt/arm-preempt-lazy-support.patch +# 3.8 changes +features/all/rt/net-make-devnet_rename_seq-a-mutex.patch +features/all/rt/powerpc-fsl-msi-use-a-different-locklcass-for-the-ca.patch +features/all/rt/i2c-omap-drop-the-lock-hard-irq-context.patch +features/all/rt/spi-omap-mcspi-check-condition-also-after-timeout.patch +features/all/rt/HACK-printk-drop-the-logbuf_lock-more-often.patch +features/all/rt/fscache_compile_fix.patch +features/all/rt/i915_compile_fix.patch + # Enable full RT features/all/rt/powerpc-preempt-lazy-support.patch +features/all/rt/wait-simple-implementation.patch +features/all/rt/rcutiny-use-simple-waitqueue.patch +features/all/rt/treercu-use-simple-waitqueue.patch +features/all/rt/sched-adjust-reset-on-fork-always.patch +features/all/rt/sched-enqueue-to-head.patch +features/all/rt/sched-consider-pi-boosting-in-setscheduler.patch +features/all/rt/block-use-cpu-chill.patch + +features/all/rt/mm-bounce-local-irq-save-nort.patch +features/all/rt/mmci-remove-bogus-irq-save.patch +features/all/rt/slub-enable-irqs-for-no-wait.patch +features/all/rt/slub_delay_ctor_on_rt.patch +features/all/rt/idle-state.patch +features/all/rt/might-sleep-check-for-idle.patch +features/all/rt/wait-simple-rework-for-completions.patch +features/all/rt/completion-use-simple-wait-queues.patch + features/all/rt/kconfig-disable-a-few-options-rt.patch features/all/rt/kconfig-preempt-rt-full.patch -