[rt] Update to 4.4-rc6-rt1 and re-enable

This commit is contained in:
Ben Hutchings 2015-12-24 21:29:36 +00:00
parent 82d332eacf
commit fb638c19f7
258 changed files with 32213 additions and 1 deletions

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
linux (4.4~rc6-1~exp2) UNRELEASED; urgency=medium
* [rt] Update to 4.4-rc6-rt1 and re-enable
-- Ben Hutchings <ben@decadent.org.uk> Thu, 24 Dec 2015 21:28:51 +0000
linux (4.4~rc6-1~exp1) experimental; urgency=medium
* New upstream release candidate

View File

@ -32,7 +32,7 @@ featuresets:
rt
[featureset-rt_base]
enabled: false
enabled: true
[description]
part-long-up: This kernel is not suitable for SMP (multi-processor,

View File

@ -0,0 +1,43 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 1 Mar 2013 11:17:42 +0100
Subject: futex: Ensure lock/unlock symetry versus pi_lock and hash bucket lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
In exit_pi_state_list() we have the following locking construct:
spin_lock(&hb->lock);
raw_spin_lock_irq(&curr->pi_lock);
...
spin_unlock(&hb->lock);
In !RT this works, but on RT the migrate_enable() function which is
called from spin_unlock() sees atomic context due to the held pi_lock
and just decrements the migrate_disable_atomic counter of the
task. Now the next call to migrate_disable() sees the counter being
negative and issues a warning. That check should be in
migrate_enable() already.
Fix this by dropping pi_lock before unlocking hb->lock and reaquire
pi_lock after that again. This is safe as the loop code reevaluates
head again under the pi_lock.
Reported-by: Yong Zhang <yong.zhang@windriver.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/futex.c | 2 ++
1 file changed, 2 insertions(+)
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -815,7 +815,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_unlock(&hb->lock);
+ raw_spin_lock_irq(&curr->pi_lock);
continue;
}

View File

@ -0,0 +1,178 @@
From 70f4293bd36740fd730ab25abe39281d1b312365 Mon Sep 17 00:00:00 2001
From: Russ Dill <Russ.Dill@ti.com>
Date: Wed, 5 Aug 2015 15:30:44 +0530
Subject: [PATCH 09/21] ARM: OMAP2: Drop the concept of certain power domains
not being able to lose context.
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
It isn't much of a win, and with hibernation, everything loses context.
Signed-off-by: Russ Dill <Russ.Dill@ti.com>
[j-keerthy@ti.com] ported to 4.1
Signed-off-by: Keerthy <j-keerthy@ti.com>
---
arch/arm/mach-omap2/gpio.c | 1
arch/arm/mach-omap2/powerdomain.c | 40 --------------------------------
arch/arm/mach-omap2/powerdomain.h | 1
drivers/gpio/gpio-omap.c | 36 +++++++++++-----------------
include/linux/platform_data/gpio-omap.h | 1
5 files changed, 14 insertions(+), 65 deletions(-)
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -130,7 +130,6 @@ static int __init omap2_gpio_dev_init(st
}
pwrdm = omap_hwmod_get_pwrdm(oh);
- pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
kfree(pdata);
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1166,43 +1166,3 @@ int pwrdm_get_context_loss_count(struct
return count;
}
-/**
- * pwrdm_can_ever_lose_context - can this powerdomain ever lose context?
- * @pwrdm: struct powerdomain *
- *
- * Given a struct powerdomain * @pwrdm, returns 1 if the powerdomain
- * can lose either memory or logic context or if @pwrdm is invalid, or
- * returns 0 otherwise. This function is not concerned with how the
- * powerdomain registers are programmed (i.e., to go off or not); it's
- * concerned with whether it's ever possible for this powerdomain to
- * go off while some other part of the chip is active. This function
- * assumes that every powerdomain can go to either ON or INACTIVE.
- */
-bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm)
-{
- int i;
-
- if (!pwrdm) {
- pr_debug("powerdomain: %s: invalid powerdomain pointer\n",
- __func__);
- return 1;
- }
-
- if (pwrdm->pwrsts & PWRSTS_OFF)
- return 1;
-
- if (pwrdm->pwrsts & PWRSTS_RET) {
- if (pwrdm->pwrsts_logic_ret & PWRSTS_OFF)
- return 1;
-
- for (i = 0; i < pwrdm->banks; i++)
- if (pwrdm->pwrsts_mem_ret[i] & PWRSTS_OFF)
- return 1;
- }
-
- for (i = 0; i < pwrdm->banks; i++)
- if (pwrdm->pwrsts_mem_on[i] & PWRSTS_OFF)
- return 1;
-
- return 0;
-}
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -244,7 +244,6 @@ int pwrdm_state_switch(struct powerdomai
int pwrdm_pre_transition(struct powerdomain *pwrdm);
int pwrdm_post_transition(struct powerdomain *pwrdm);
int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
-bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 state);
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -69,7 +69,7 @@ struct gpio_bank {
struct device *dev;
bool is_mpuio;
bool dbck_flag;
- bool loses_context;
+
bool context_valid;
int stride;
u32 width;
@@ -1208,15 +1208,9 @@ static int omap_gpio_probe(struct platfo
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
#endif
- if (node) {
- if (!of_property_read_bool(node, "ti,gpio-always-on"))
- bank->loses_context = true;
- } else {
- bank->loses_context = pdata->loses_context;
-
- if (bank->loses_context)
- bank->get_context_loss_count =
- pdata->get_context_loss_count;
+ if (!node) {
+ bank->get_context_loss_count =
+ pdata->get_context_loss_count;
}
if (bank->regs->set_dataout && bank->regs->clr_dataout)
@@ -1373,7 +1367,7 @@ static int omap_gpio_runtime_resume(stru
* been initialised and so initialise it now. Also initialise
* the context loss count.
*/
- if (bank->loses_context && !bank->context_valid) {
+ if (!bank->context_valid) {
omap_gpio_init_context(bank);
if (bank->get_context_loss_count)
@@ -1394,17 +1388,15 @@ static int omap_gpio_runtime_resume(stru
writel_relaxed(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
- if (bank->loses_context) {
- if (!bank->get_context_loss_count) {
+ if (!bank->get_context_loss_count) {
+ omap_gpio_restore_context(bank);
+ } else {
+ c = bank->get_context_loss_count(bank->dev);
+ if (c != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- c = bank->get_context_loss_count(bank->dev);
- if (c != bank->context_loss_count) {
- omap_gpio_restore_context(bank);
- } else {
- raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
}
}
@@ -1476,7 +1468,7 @@ void omap2_gpio_prepare_for_idle(int pwr
struct gpio_bank *bank;
list_for_each_entry(bank, &omap_gpio_list, node) {
- if (!BANK_USED(bank) || !bank->loses_context)
+ if (!BANK_USED(bank))
continue;
bank->power_mode = pwr_mode;
@@ -1490,7 +1482,7 @@ void omap2_gpio_resume_after_idle(void)
struct gpio_bank *bank;
list_for_each_entry(bank, &omap_gpio_list, node) {
- if (!BANK_USED(bank) || !bank->loses_context)
+ if (!BANK_USED(bank))
continue;
pm_runtime_get_sync(bank->dev);
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -198,7 +198,6 @@ struct omap_gpio_platform_data {
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
- bool loses_context; /* whether the bank would ever lose context */
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;

View File

@ -0,0 +1,86 @@
From: "Yadi.hu" <yadi.hu@windriver.com>
Date: Wed, 10 Dec 2014 10:32:09 +0800
Subject: ARM: enable irq in translation/section permission fault handlers
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Probably happens on all ARM, with
CONFIG_PREEMPT_RT_FULL
CONFIG_DEBUG_ATOMIC_SLEEP
This simple program....
int main() {
*((char*)0xc0001000) = 0;
};
[ 512.742724] BUG: sleeping function called from invalid context at kernel/rtmutex.c:658
[ 512.743000] in_atomic(): 0, irqs_disabled(): 128, pid: 994, name: a
[ 512.743217] INFO: lockdep is turned off.
[ 512.743360] irq event stamp: 0
[ 512.743482] hardirqs last enabled at (0): [< (null)>] (null)
[ 512.743714] hardirqs last disabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
[ 512.744013] softirqs last enabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
[ 512.744303] softirqs last disabled at (0): [< (null)>] (null)
[ 512.744631] [<c041872c>] (unwind_backtrace+0x0/0x104)
[ 512.745001] [<c09af0c4>] (dump_stack+0x20/0x24)
[ 512.745355] [<c0462490>] (__might_sleep+0x1dc/0x1e0)
[ 512.745717] [<c09b6770>] (rt_spin_lock+0x34/0x6c)
[ 512.746073] [<c0441bf0>] (do_force_sig_info+0x34/0xf0)
[ 512.746457] [<c0442668>] (force_sig_info+0x18/0x1c)
[ 512.746829] [<c041d880>] (__do_user_fault+0x9c/0xd8)
[ 512.747185] [<c041d938>] (do_bad_area+0x7c/0x94)
[ 512.747536] [<c041d990>] (do_sect_fault+0x40/0x48)
[ 512.747898] [<c040841c>] (do_DataAbort+0x40/0xa0)
[ 512.748181] Exception stack(0xecaa1fb0 to 0xecaa1ff8)
Oxc0000000 belongs to kernel address space, user task can not be
allowed to access it. For above condition, correct result is that
test case should receive a “segment fault” and exits but not stacks.
the root cause is commit 02fe2845d6a8 ("avoid enabling interrupts in
prefetch/data abort handlers"),it deletes irq enable block in Data
abort assemble code and move them into page/breakpiont/alignment fault
handlers instead. But author does not enable irq in translation/section
permission fault handlers. ARM disables irq when it enters exception/
interrupt mode, if kernel doesn't enable irq, it would be still disabled
during translation/section permission fault.
We see the above splat because do_force_sig_info is still called with
IRQs off, and that code eventually does a:
spin_lock_irqsave(&t->sighand->siglock, flags);
As this is architecture independent code, and we've not seen any other
need for other arch to have the siglock converted to raw lock, we can
conclude that we should enable irq for ARM translation/section
permission exception.
Signed-off-by: Yadi.hu <yadi.hu@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mm/fault.c | 6 ++++++
1 file changed, 6 insertions(+)
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
if (user_mode(regs))
goto bad_area;
@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
do_bad_area(addr, fsr, regs);
return 0;
}

View File

@ -0,0 +1,77 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Mar 2013 19:01:05 +0100
Subject: printk: Drop the logbuf_lock more often
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The lock is hold with irgs off. The latency drops 500us+ on my arm bugs
with a "full" buffer after executing "dmesg" on the shell.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/printk/printk.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1203,6 +1203,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
+ int attempts = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
@@ -1214,7 +1215,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
-
+ int num_msg;
+try_again:
+ attempts++;
+ if (attempts > 10) {
+ len = -EBUSY;
+ goto out;
+ }
+ num_msg = 0;
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
@@ -1235,6 +1243,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* move first record forward until length fits into the buffer */
@@ -1248,6 +1264,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
+ raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* last message fitting into this dump */
@@ -1288,6 +1312,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
+out:
raw_spin_unlock_irq(&logbuf_lock);
kfree(text);

View File

@ -0,0 +1,26 @@
From: Marcelo Tosatti <mtosatti@redhat.com>
Date: Wed, 8 Apr 2015 20:33:25 -0300
Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Since lapic timer handler only wakes up a simple waitqueue,
it can be executed from hardirq context.
Reduces average cyclictest latency by 3us.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/kvm/lapic.c | 1 +
1 file changed, 1 insertion(+)
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1801,6 +1801,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
+ apic->lapic_timer.timer.irqsafe = 1;
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from

View File

@ -0,0 +1,335 @@
From: Marcelo Tosatti <mtosatti@redhat.com>
Date: Wed, 8 Apr 2015 20:33:24 -0300
Subject: KVM: use simple waitqueue for vcpu->wq
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The problem:
On -RT, an emulated LAPIC timer instances has the following path:
1) hard interrupt
2) ksoftirqd is scheduled
3) ksoftirqd wakes up vcpu thread
4) vcpu thread is scheduled
This extra context switch introduces unnecessary latency in the
LAPIC path for a KVM guest.
The solution:
Allow waking up vcpu thread from hardirq context,
thus avoiding the need for ksoftirqd to be scheduled.
Normal waitqueues make use of spinlocks, which on -RT
are sleepable locks. Therefore, waking up a waitqueue
waiter involves locking a sleeping lock, which
is not allowed from hard interrupt context.
cyclictest command line:
# cyclictest -m -n -q -p99 -l 1000000 -h60 -D 1m
This patch reduces the average latency in my tests from 14us to 11us.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/kvm/arm.c | 8 ++++----
arch/arm/kvm/psci.c | 4 ++--
arch/powerpc/include/asm/kvm_host.h | 4 ++--
arch/powerpc/kvm/book3s_hv.c | 23 +++++++++++------------
arch/s390/include/asm/kvm_host.h | 2 +-
arch/s390/kvm/interrupt.c | 4 ++--
arch/x86/kvm/lapic.c | 6 +++---
include/linux/kvm_host.h | 4 ++--
virt/kvm/async_pf.c | 4 ++--
virt/kvm/kvm_main.c | 16 ++++++++--------
10 files changed, 37 insertions(+), 38 deletions(-)
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -498,18 +498,18 @@ static void kvm_arm_resume_guest(struct
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false;
- wake_up_interruptible(wq);
+ swait_wake_interruptible(wq);
}
}
static void vcpu_sleep(struct kvm_vcpu *vcpu)
{
- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
}
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(st
{
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
- wait_queue_head_t *wq;
+ struct swait_head *wq;
unsigned long cpu_id;
unsigned long context_id;
phys_addr_t target_pc;
@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(st
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
- wake_up_interruptible(wq);
+ swait_wake_interruptible(wq);
return PSCI_RET_SUCCESS;
}
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -286,7 +286,7 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
struct list_head preempt_list;
spinlock_t lock;
- wait_queue_head_t wq;
+ struct swait_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
@@ -626,7 +626,7 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
- wait_queue_head_t *wqp;
+ struct swait_head *wqp;
struct kvmppc_vcore *vcore;
int ret;
int trap;
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int cpu;
- wait_queue_head_t *wqp;
+ struct swait_head *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
- if (waitqueue_active(wqp)) {
- wake_up_interruptible(wqp);
+ if (swaitqueue_active(wqp)) {
+ swait_wake_interruptible(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
tvcpu->arch.prodded = 1;
smp_mb();
if (vcpu->arch.ceded) {
- if (waitqueue_active(&vcpu->wq)) {
- wake_up_interruptible(&vcpu->wq);
+ if (swaitqueue_active(&vcpu->wq)) {
+ swait_wake_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
}
@@ -1441,7 +1441,7 @@ static struct kvmppc_vcore *kvmppc_vcore
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
- init_waitqueue_head(&vcore->wq);
+ init_swait_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
@@ -2513,10 +2513,9 @@ static void kvmppc_vcore_blocked(struct
{
struct kvm_vcpu *vcpu;
int do_sleep = 1;
+ DEFINE_SWAITER(wait);
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE);
/*
* Check one last time for pending exceptions and ceded state after
@@ -2530,7 +2529,7 @@ static void kvmppc_vcore_blocked(struct
}
if (!do_sleep) {
- finish_wait(&vc->wq, &wait);
+ swait_finish(&vc->wq, &wait);
return;
}
@@ -2538,7 +2537,7 @@ static void kvmppc_vcore_blocked(struct
trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
- finish_wait(&vc->wq, &wait);
+ swait_finish(&vc->wq, &wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1);
@@ -2594,7 +2593,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- wake_up(&vc->wq);
+ swait_wake(&vc->wq);
}
}
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -427,7 +427,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt {
spinlock_t lock;
struct kvm_s390_float_interrupt *float_int;
- wait_queue_head_t *wq;
+ struct swait_head *wq;
atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -868,13 +868,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
- if (waitqueue_active(&vcpu->wq)) {
+ if (swaitqueue_active(&vcpu->wq)) {
/*
* The vcpu gave up the cpu voluntarily, mark it as a good
* yield-candidate.
*/
vcpu->preempted = true;
- wake_up_interruptible(&vcpu->wq);
+ swait_wake_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
}
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_
static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
- wait_queue_head_t *q = &vcpu->wq;
+ struct swait_head *q = &vcpu->wq;
struct kvm_timer *ktimer = &apic->lapic_timer;
if (atomic_read(&apic->lapic_timer.pending))
@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kv
atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu);
- if (waitqueue_active(q))
- wake_up_interruptible(q);
+ if (swaitqueue_active(q))
+ swait_wake_interruptible(q);
if (apic_lvtt_tscdeadline(apic))
ktimer->expired_tscdeadline = ktimer->tscdeadline;
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -243,7 +243,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
unsigned char fpu_counter;
- wait_queue_head_t wq;
+ struct swait_head wq;
struct pid *pid;
int sigset_active;
sigset_t sigset;
@@ -794,7 +794,7 @@ static inline bool kvm_arch_has_assigned
}
#endif
-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -98,8 +98,8 @@ static void async_pf_execute(struct work
* This memory barrier pairs with prepare_to_wait's set_current_state()
*/
smp_mb();
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
+ if (swaitqueue_active(&vcpu->wq))
+ swait_wake_interruptible(&vcpu->wq);
mmput(mm);
kvm_put_kvm(vcpu->kvm);
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -227,7 +227,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
vcpu->vcpu_id = id;
vcpu->pid = NULL;
vcpu->halt_poll_ns = 0;
- init_waitqueue_head(&vcpu->wq);
+ init_swait_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
vcpu->pre_pcpu = -1;
@@ -1999,7 +1999,7 @@ static int kvm_vcpu_check_block(struct k
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
- DEFINE_WAIT(wait);
+ DEFINE_SWAITER(wait);
bool waited = false;
u64 block_ns;
@@ -2024,7 +2024,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+ swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_vcpu_check_block(vcpu) < 0)
break;
@@ -2033,7 +2033,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
schedule();
}
- finish_wait(&vcpu->wq, &wait);
+ swait_finish(&vcpu->wq, &wait);
cur = ktime_get();
kvm_arch_vcpu_unblocking(vcpu);
@@ -2065,11 +2065,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
{
int me;
int cpu = vcpu->cpu;
- wait_queue_head_t *wqp;
+ struct swait_head *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
- if (waitqueue_active(wqp)) {
- wake_up_interruptible(wqp);
+ if (swaitqueue_active(wqp)) {
+ swait_wake_interruptible(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -2170,7 +2170,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
continue;
if (vcpu == me)
continue;
- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;

View File

@ -0,0 +1,174 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Wed, 13 Feb 2013 09:26:05 -0500
Subject: acpi/rt: Convert acpi_gbl_hardware lock back to a raw_spinlock_t
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
We hit the following bug with 3.6-rt:
[ 5.898990] BUG: scheduling while atomic: swapper/3/0/0x00000002
[ 5.898991] no locks held by swapper/3/0.
[ 5.898993] Modules linked in:
[ 5.898996] Pid: 0, comm: swapper/3 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
[ 5.898997] Call Trace:
[ 5.899011] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
[ 5.899028] [<ffffffff81577923>] __schedule+0x793/0x7a0
[ 5.899032] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
[ 5.899034] [<ffffffff81577b89>] schedule+0x29/0x70
[ 5.899036] BUG: scheduling while atomic: swapper/7/0/0x00000002
[ 5.899037] no locks held by swapper/7/0.
[ 5.899039] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
[ 5.899040] Modules linked in:
[ 5.899041]
[ 5.899045] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
[ 5.899046] Pid: 0, comm: swapper/7 Not tainted 3.6.11-rt28.19.el6rt.x86_64.debug #1
[ 5.899047] Call Trace:
[ 5.899049] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
[ 5.899052] [<ffffffff810804e7>] __schedule_bug+0x67/0x90
[ 5.899054] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
[ 5.899056] [<ffffffff81577923>] __schedule+0x793/0x7a0
[ 5.899059] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
[ 5.899062] [<ffffffff810b4e40>] ? debug_rt_mutex_print_deadlock+0x50/0x200
[ 5.899068] [<ffffffff8130be64>] acpi_write_bit_register+0x33/0xb0
[ 5.899071] [<ffffffff81577b89>] schedule+0x29/0x70
[ 5.899072] [<ffffffff8130be13>] ? acpi_read_bit_register+0x33/0x51
[ 5.899074] [<ffffffff81578525>] rt_spin_lock_slowlock+0xe5/0x2f0
[ 5.899077] [<ffffffff8131d1fc>] acpi_idle_enter_bm+0x8a/0x28e
[ 5.899079] [<ffffffff81579a58>] ? _raw_spin_unlock_irqrestore+0x38/0x90
[ 5.899081] [<ffffffff8107e5da>] ? this_cpu_load+0x1a/0x30
[ 5.899083] [<ffffffff81578bc6>] rt_spin_lock+0x16/0x40
[ 5.899087] [<ffffffff8144c759>] cpuidle_enter+0x19/0x20
[ 5.899088] [<ffffffff8157d3f0>] ? notifier_call_chain+0x80/0x80
[ 5.899090] [<ffffffff8144c777>] cpuidle_enter_state+0x17/0x50
[ 5.899092] [<ffffffff812f2034>] acpi_os_acquire_lock+0x1f/0x23
[ 5.899094] [<ffffffff8144d1a1>] cpuidle899101] [<ffffffff8130be13>] ?
As the acpi code disables interrupts in acpi_idle_enter_bm, and calls
code that grabs the acpi lock, it causes issues as the lock is currently
in RT a sleeping lock.
The lock was converted from a raw to a sleeping lock due to some
previous issues, and tests that showed it didn't seem to matter.
Unfortunately, it did matter for one of our boxes.
This patch converts the lock back to a raw lock. I've run this code on a
few of my own machines, one being my laptop that uses the acpi quite
extensively. I've been able to suspend and resume without issues.
[ tglx: Made the change exclusive for acpi_gbl_hardware_lock ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: John Kacur <jkacur@gmail.com>
Cc: Clark Williams <clark@redhat.com>
Link: http://lkml.kernel.org/r/1360765565.23152.5.camel@gandalf.local.home
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/acpi/acpica/acglobal.h | 2 +-
drivers/acpi/acpica/hwregs.c | 4 ++--
drivers/acpi/acpica/hwxface.c | 4 ++--
drivers/acpi/acpica/utmutex.c | 4 ++--
include/acpi/platform/aclinux.h | 15 +++++++++++++++
5 files changed, 22 insertions(+), 7 deletions(-)
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pen
* interrupt level
*/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,14 +269,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
/* Clear the fixed events in PM1 A/B */
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
if (ACPI_FAILURE(status)) {
goto exit;
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -374,7 +374,7 @@ acpi_status acpi_write_bit_register(u32
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
/*
* At this point, we know that the parent register is one of the
@@ -435,7 +435,7 @@ acpi_status acpi_write_bit_register(u32
unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(voi
return_ACPI_STATUS (status);
}
- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
@@ -156,7 +156,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
- acpi_os_delete_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -127,6 +127,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
+#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
/* Use native linux version of acpi_os_allocate_zeroed */
@@ -145,6 +146,20 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+#define acpi_os_create_raw_lock(__handle) \
+({ \
+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
+ \
+ if (lock) { \
+ *(__handle) = lock; \
+ raw_spin_lock_init(*(__handle)); \
+ } \
+ lock ? AE_OK : AE_NO_MEMORY; \
+ })
+
+#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
+
+
/*
* OSL interfaces used by debugger/disassembler
*/

View File

@ -0,0 +1,104 @@
From: Anders Roxell <anders.roxell@linaro.org>
Date: Thu, 14 May 2015 17:52:17 +0200
Subject: arch/arm64: Add lazy preempt support
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
arm64 is missing support for PREEMPT_RT. The main feature which is
lacking is support for lazy preemption. The arch-specific entry code,
thread information structure definitions, and associated data tables
have to be extended to provide this support. Then the Kconfig file has
to be extended to indicate the support is available, and also to
indicate that support for full RT preemption is now available.
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
---
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/thread_info.h | 3 +++
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 13 ++++++++++---
4 files changed, 15 insertions(+), 3 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -76,6 +76,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
+ select HAVE_PREEMPT_LAZY
select HAVE_SYSCALL_TRACEPOINTS
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -49,6 +49,7 @@ struct thread_info {
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
@@ -103,6 +104,7 @@ static inline struct thread_info *curren
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
+#define TIF_NEED_RESCHED_LAZY 4
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -118,6 +120,7 @@ static inline struct thread_info *curren
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -363,11 +363,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
- cbnz w24, 1f // preempt count != 0
+ cbnz w24, 2f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
- bl el1_preempt
+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+
+ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
+ cbnz w24, 2f // preempt lazy count != 0
+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
1:
+ bl el1_preempt
+2:
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
@@ -381,6 +386,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
ret x24
#endif
@@ -625,6 +631,7 @@ ENDPROC(cpu_switch_to)
*/
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'

View File

@ -0,0 +1,130 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Sat, 6 Mar 2010 17:47:10 +0100
Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Setup and remove the interrupt handler in clock event mode selection.
This avoids calling the (shared) interrupt handler when the device is
not used.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy: redo the patch with NR_IRQS_LEGACY which is probably required since
commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
Patch based on what Sami Pietikäinen <Sami.Pietikainen@wapice.com> suggested].
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/clocksource/timer-atmel-pit.c | 15 ++++++++-------
drivers/clocksource/timer-atmel-st.c | 28 ++++++++++++++++++++--------
2 files changed, 28 insertions(+), 15 deletions(-)
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -96,6 +96,7 @@ static int pit_clkevt_shutdown(struct cl
/* disable irq, leaving the clocksource active */
pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
+ free_irq(atmel_pit_irq, data);
return 0;
}
@@ -105,6 +106,13 @@ static int pit_clkevt_shutdown(struct cl
static int pit_clkevt_set_periodic(struct clock_event_device *dev)
{
struct pit_data *data = clkevt_to_pit_data(dev);
+ int ret;
+
+ ret = request_irq(data->irq, at91sam926x_pit_interrupt,
+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+ "at91_tick", data);
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
/* update clocksource counter */
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
@@ -206,13 +214,6 @@ static void __init at91sam926x_pit_commo
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
clocksource_register_hz(&data->clksrc, pit_rate);
- /* Set up irq handler */
- ret = request_irq(data->irq, at91sam926x_pit_interrupt,
- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- "at91_tick", data);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
-
/* Set up and register clockevents */
data->clkevt.name = "pit";
data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_
last_crtr = read_CRTR();
}
+static int atmel_st_irq;
+
static int clkevt32k_shutdown(struct clock_event_device *evt)
{
clkdev32k_disable_and_flush_irq();
irqmask = 0;
regmap_write(regmap_st, AT91_ST_IER, irqmask);
+ free_irq(atmel_st_irq, regmap_st);
return 0;
}
static int clkevt32k_set_oneshot(struct clock_event_device *dev)
{
+ int ret;
+
clkdev32k_disable_and_flush_irq();
+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+ "at91_tick", regmap_st);
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
+
/*
* ALM for oneshot irqs, set by next_event()
* before 32 seconds have passed.
@@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct
static int clkevt32k_set_periodic(struct clock_event_device *dev)
{
+ int irq;
+
clkdev32k_disable_and_flush_irq();
+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+ "at91_tick", regmap_st);
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
+
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
@@ -198,7 +217,7 @@ static void __init atmel_st_timer_init(s
{
struct clk *sclk;
unsigned int sclk_rate, val;
- int irq, ret;
+ int ret;
regmap_st = syscon_node_to_regmap(node);
if (IS_ERR(regmap_st))
@@ -214,13 +233,6 @@ static void __init atmel_st_timer_init(s
if (!irq)
panic(pr_fmt("Unable to get IRQ from DT\n"));
- /* Make IRQs happen for the system timer */
- ret = request_irq(irq, at91rm9200_timer_interrupt,
- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- "at91_tick", regmap_st);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
-
sclk = of_clk_get(node, 0);
if (IS_ERR(sclk))
panic(pr_fmt("Unable to get slow clock\n"));

View File

@ -0,0 +1,33 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 May 2010 18:29:35 +0200
Subject: ARM: at91: tclib: Default to tclib timer for RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
RT is not too happy about the shared timer interrupt in AT91
devices. Default to tclib timer for RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/misc/Kconfig | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,6 +54,7 @@ config AD525X_DPOT_SPI
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
+ default y if PREEMPT_RT_FULL
help
Select this if you want a library to allocate the Timer/Counter
blocks found on many Atmel processors. This facilitates using
@@ -86,7 +87,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
bool "TC Block use 32 KiHz clock"
depends on ATMEL_TCB_CLKSRC
- default y
+ default y if !PREEMPT_RT_FULL
help
Select this to use 32 KiHz base clock rate as TC block clock
source for clock events.

View File

@ -0,0 +1,408 @@
From: Frank Rowand <frank.rowand@am.sony.com>
Date: Mon, 19 Sep 2011 14:51:14 -0700
Subject: arm: Convert arm boot_lock to raw
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The arm boot_lock is used by the secondary processor startup code. The locking
task is the idle thread, which has idle->sched_class == &idle_sched_class.
idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the
lock, the attempt to wake it when the lock becomes available will fail:
try_to_wake_up()
...
activate_task()
enqueue_task()
p->sched_class->enqueue_task(rq, p, flags)
Fix by converting boot_lock to a raw spin lock.
Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/mach-exynos/platsmp.c | 12 ++++++------
arch/arm/mach-hisi/platmcpm.c | 22 +++++++++++-----------
arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
arch/arm/mach-prima2/platsmp.c | 10 +++++-----
arch/arm/mach-qcom/platsmp.c | 10 +++++-----
arch/arm/mach-spear/platsmp.c | 10 +++++-----
arch/arm/mach-sti/platsmp.c | 10 +++++-----
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
8 files changed, 47 insertions(+), 47 deletions(-)
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -230,7 +230,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void exynos_secondary_init(unsigned int cpu)
{
@@ -243,8 +243,8 @@ static void exynos_secondary_init(unsign
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
@@ -308,7 +308,7 @@ static int exynos_boot_secondary(unsigne
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -335,7 +335,7 @@ static int exynos_boot_secondary(unsigne
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return -ETIMEDOUT;
}
}
@@ -381,7 +381,7 @@ static int exynos_boot_secondary(unsigne
* calibrations, then wait for it to finish
*/
fail:
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? ret : 0;
}
--- a/arch/arm/mach-hisi/platmcpm.c
+++ b/arch/arm/mach-hisi/platmcpm.c
@@ -61,7 +61,7 @@
static void __iomem *sysctrl, *fabric;
static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static u32 fabric_phys_addr;
/*
* [0]: bootwrapper physical address
@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
return -EINVAL;
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
if (hip04_cpu_table[cluster][cpu])
goto out;
@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned
out:
hip04_cpu_table[cluster][cpu]++;
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 0;
}
@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
hip04_cpu_table[cluster][cpu]--;
if (hip04_cpu_table[cluster][cpu] == 1) {
/* A power_up request went ahead of us. */
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return;
} else if (hip04_cpu_table[cluster][cpu] > 1) {
pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l
}
last_man = hip04_cluster_is_down(cluster);
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
if (last_man) {
/* Since it's Cortex A15, disable L2 prefetching. */
asm volatile(
@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
count = TIMEOUT_MSEC / POLL_MSEC;
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
for (tries = 0; tries < count; tries++) {
if (hip04_cpu_table[cluster][cpu])
goto err;
@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
if (data & CORE_WFI_STATUS(cpu))
break;
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
/* Wait for clean L2 when the whole cluster is down. */
msleep(POLL_MSEC);
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
}
if (tries >= count)
goto err;
@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l
goto err;
if (hip04_cluster_is_down(cluster))
hip04_set_snoop_filter(cluster, 0);
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 1;
err:
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
return 0;
}
#endif
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -43,7 +43,7 @@
/* SCU base address */
static void __iomem *scu_base;
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
@@ -74,8 +74,8 @@ static void omap4_secondary_init(unsigne
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -89,7 +89,7 @@ static int omap4_boot_secondary(unsigned
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
@@ -166,7 +166,7 @@ static int omap4_boot_secondary(unsigned
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return 0;
}
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -22,7 +22,7 @@
static void __iomem *clk_base;
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void sirfsoc_secondary_init(unsigned int cpu)
{
@@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsig
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static const struct of_device_id clk_ids[] = {
@@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsign
/* make sure write buffer is drained */
mb();
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsign
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -46,7 +46,7 @@
extern void secondary_startup_arm(void);
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
#ifdef CONFIG_HOTPLUG_CPU
static void qcom_cpu_die(unsigned int cpu)
@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int scss_release_secondary(unsigned int cpu)
@@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* Send the secondary CPU a soft interrupt, thereby causing
@@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return ret;
}
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -32,7 +32,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
@@ -47,8 +47,8 @@ static void spear13xx_secondary_init(uns
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsi
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsi
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -35,7 +35,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
static void sti_secondary_init(unsigned int cpu)
{
@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned i
* set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned i
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -30,7 +30,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
void versatile_secondary_init(unsigned int cpu)
{
@@ -43,8 +43,8 @@ void versatile_secondary_init(unsigned i
/*
* Synchronise with the boot thread.
*/
- spin_lock(&boot_lock);
- spin_unlock(&boot_lock);
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
}
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
@@ -55,7 +55,7 @@ int versatile_boot_secondary(unsigned in
* Set synchronisation state between this boot processor
* and the secondary one
*/
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
/*
* This is really belt and braces; we hold unintended secondary
@@ -85,7 +85,7 @@ int versatile_boot_secondary(unsigned in
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}

View File

@ -0,0 +1,144 @@
Subject: arm: Enable highmem for rt
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
fixup highmem for ARM.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/include/asm/switch_to.h | 8 ++++++
arch/arm/mm/highmem.c | 45 ++++++++++++++++++++++++++++++++++-----
include/linux/highmem.h | 1
3 files changed, 49 insertions(+), 5 deletions(-)
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(s
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
+ switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -54,12 +54,13 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
+ pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
- preempt_disable();
+ preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -93,7 +94,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
@@ -110,6 +114,9 @@ void __kunmap_atomic(void *kvaddr)
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = __pte(0);
+#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
#else
@@ -122,17 +129,18 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
- preempt_enable();
+ preempt_enable_nort();
}
EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
+ pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
struct page *page = pfn_to_page(pfn);
- preempt_disable();
+ preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -143,7 +151,34 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
+#if defined CONFIG_PREEMPT_RT_FULL
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ int i;
+
+ /*
+ * Clear @prev's kmap_atomic mappings
+ */
+ for (i = 0; i < prev_p->kmap_idx; i++) {
+ int idx = i + KM_TYPE_NR * smp_processor_id();
+
+ set_fixmap_pte(idx, __pte(0));
+ }
+ /*
+ * Restore @next_p's kmap_atomic mappings
+ */
+ for (i = 0; i < next_p->kmap_idx; i++) {
+ int idx = i + KM_TYPE_NR * smp_processor_id();
+
+ if (!pte_none(next_p->kmap_pte[i]))
+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
+ }
+}
+#endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>

View File

@ -0,0 +1,28 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 21:37:27 +0100
Subject: arm/highmem: Flush tlb on unmap
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The tlb should be flushed on unmap and thus make the mapping entry
invalid. This is only done in the non-debug case which does not look
right.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/mm/highmem.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -112,10 +112,10 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
- set_fixmap_pte(idx, __pte(0));
#else
(void) idx; /* to kill a warning */
#endif
+ set_fixmap_pte(idx, __pte(0));
kmap_atomic_idx_pop();
} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
/* this address was obtained through kmap_high_get() */

View File

@ -0,0 +1,106 @@
Subject: arm: Add support for lazy preemption
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 31 Oct 2012 12:04:11 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Implement the arm pieces for lazy preempt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/Kconfig | 1 +
arch/arm/include/asm/thread_info.h | 3 +++
arch/arm/kernel/asm-offsets.c | 1 +
arch/arm/kernel/entry-armv.S | 13 +++++++++++--
arch/arm/kernel/signal.c | 3 ++-
5 files changed, 18 insertions(+), 3 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -68,6 +68,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -49,6 +49,7 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
@@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(stru
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_NEED_RESCHED_LAZY 8
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -65,6 +65,7 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -215,11 +215,18 @@ ENDPROC(__dabt_svc)
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
+ bne 1f @ return from exeption
+ ldr r0, [tsk, #TI_FLAGS] @ get flags
+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
+ blne svc_preempt @ preempt!
+
+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
+ teq r8, #0 @ if preempt lazy count != 0
movne r0, #0 @ force flags to 0
- tst r0, #_TIF_NEED_RESCHED
+ tst r0, #_TIF_NEED_RESCHED_LAZY
blne svc_preempt
+1:
#endif
svc_exit r5, irq = 1 @ return from exception
@@ -234,6 +241,8 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
+ bne 1b
+ tst r0, #_TIF_NEED_RESCHED_LAZY
reteq r8 @ go again
b 1b
#endif
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un
*/
trace_hardirqs_off();
do {
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
+ _TIF_NEED_RESCHED_LAZY))) {
schedule();
} else {
if (unlikely(!user_mode(regs)))

View File

@ -0,0 +1,84 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 20 Sep 2013 14:31:54 +0200
Subject: arm/unwind: use a raw_spin_lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Mostly unwind is done with irqs enabled however SLUB may call it with
irqs disabled while creating a new SLUB cache.
I had system freeze while loading a module which called
kmem_cache_create() on init. That means SLUB's __slab_alloc() disabled
interrupts and then
->new_slab_objects()
->new_slab()
->setup_object()
->setup_object_debug()
->init_tracking()
->set_track()
->save_stack_trace()
->save_stack_trace_tsk()
->walk_stackframe()
->unwind_frame()
->unwind_find_idx()
=>spin_lock_irqsave(&unwind_lock);
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/kernel/unwind.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_u
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_f
/* module unwind tables */
struct unwind_table *table;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_f
break;
}
}
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(un
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_tabl
if (!tab)
return;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}

View File

@ -0,0 +1,24 @@
Subject: arm64/xen: Make XEN depend on !RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 12 Oct 2015 11:18:40 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
It's not ready and probably never will be, unless xen folks have a
look at it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm64/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -562,7 +562,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
- depends on ARM64 && OF
+ depends on ARM64 && OF && !PREEMPT_RT_FULL
select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.

View File

@ -0,0 +1,65 @@
From: Steven Rostedt <srostedt@redhat.com>
Date: Fri, 3 Jul 2009 08:44:29 -0500
Subject: ata: Do not disable interrupts in ide code for preempt-rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Use the local_irq_*_nort variants.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/ata/libata-sff.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
unsigned long flags;
unsigned int consumed;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return consumed;
}
@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
unsigned long flags;
/* FIXME: use a bounce buffer */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
do_write);
kunmap_atomic(buf);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_
unsigned long flags;
/* FIXME: use bounce buffer */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page);
/* do the actual data transfer */
@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_
count, rw);
kunmap_atomic(buf);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,

View File

@ -0,0 +1,84 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat, 3 May 2014 11:00:29 +0200
Subject: blk-mq: revert raw locks, post pone notifier to POST_DEAD
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The blk_mq_cpu_notify_lock should be raw because some CPU down levels
are called with interrupts off. The notifier itself calls currently one
function that is blk_mq_hctx_notify().
That function acquires the ctx->lock lock which is sleeping and I would
prefer to keep it that way. That function only moves IO-requests from
the CPU that is going offline to another CPU and it is currently the
only one. Therefore I revert the list lock back to sleeping spinlocks
and let the notifier run at POST_DEAD time.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-mq-cpu.c | 17 ++++++++++-------
block/blk-mq.c | 2 +-
2 files changed, 11 insertions(+), 8 deletions(-)
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -16,7 +16,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -25,7 +25,10 @@ static int blk_mq_main_cpu_notify(struct
struct blk_mq_cpu_notifier *notify;
int ret = NOTIFY_OK;
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ if (action != CPU_POST_DEAD)
+ return NOTIFY_OK;
+
+ spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
ret = notify->notify(notify->data, action, cpu);
@@ -33,7 +36,7 @@ static int blk_mq_main_cpu_notify(struct
break;
}
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
return ret;
}
@@ -41,16 +44,16 @@ void blk_mq_register_cpu_notifier(struct
{
BUG_ON(!notifier->notify);
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- raw_spin_lock(&blk_mq_cpu_notify_lock);
+ spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
- raw_spin_unlock(&blk_mq_cpu_notify_lock);
+ spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1640,7 +1640,7 @@ static int blk_mq_hctx_notify(void *data
{
struct blk_mq_hw_ctx *hctx = data;
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ if (action == CPU_POST_DEAD)
return blk_mq_hctx_cpu_offline(hctx, cpu);
/*

View File

@ -0,0 +1,115 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 13 Feb 2015 11:01:26 +0100
Subject: block: blk-mq: Use swait
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
| in_atomic(): 1, irqs_disabled(): 0, pid: 255, name: kworker/u257:6
| 5 locks held by kworker/u257:6/255:
| #0: ("events_unbound"){.+.+.+}, at: [<ffffffff8108edf1>] process_one_work+0x171/0x5e0
| #1: ((&entry->work)){+.+.+.}, at: [<ffffffff8108edf1>] process_one_work+0x171/0x5e0
| #2: (&shost->scan_mutex){+.+.+.}, at: [<ffffffffa000faa3>] __scsi_add_device+0xa3/0x130 [scsi_mod]
| #3: (&set->tag_list_lock){+.+...}, at: [<ffffffff812f09fa>] blk_mq_init_queue+0x96a/0xa50
| #4: (rcu_read_lock_sched){......}, at: [<ffffffff8132887d>] percpu_ref_kill_and_confirm+0x1d/0x120
| Preemption disabled at:[<ffffffff812eff76>] blk_mq_freeze_queue_start+0x56/0x70
|
| CPU: 2 PID: 255 Comm: kworker/u257:6 Not tainted 3.18.7-rt0+ #1
| Workqueue: events_unbound async_run_entry_fn
| 0000000000000003 ffff8800bc29f998 ffffffff815b3a12 0000000000000000
| 0000000000000000 ffff8800bc29f9b8 ffffffff8109aa16 ffff8800bc29fa28
| ffff8800bc5d1bc8 ffff8800bc29f9e8 ffffffff815b8dd4 ffff880000000000
| Call Trace:
| [<ffffffff815b3a12>] dump_stack+0x4f/0x7c
| [<ffffffff8109aa16>] __might_sleep+0x116/0x190
| [<ffffffff815b8dd4>] rt_spin_lock+0x24/0x60
| [<ffffffff810b6089>] __wake_up+0x29/0x60
| [<ffffffff812ee06e>] blk_mq_usage_counter_release+0x1e/0x20
| [<ffffffff81328966>] percpu_ref_kill_and_confirm+0x106/0x120
| [<ffffffff812eff76>] blk_mq_freeze_queue_start+0x56/0x70
| [<ffffffff812f0000>] blk_mq_update_tag_set_depth+0x40/0xd0
| [<ffffffff812f0a1c>] blk_mq_init_queue+0x98c/0xa50
| [<ffffffffa000dcf0>] scsi_mq_alloc_queue+0x20/0x60 [scsi_mod]
| [<ffffffffa000ea35>] scsi_alloc_sdev+0x2f5/0x370 [scsi_mod]
| [<ffffffffa000f494>] scsi_probe_and_add_lun+0x9e4/0xdd0 [scsi_mod]
| [<ffffffffa000fb26>] __scsi_add_device+0x126/0x130 [scsi_mod]
| [<ffffffffa013033f>] ata_scsi_scan_host+0xaf/0x200 [libata]
| [<ffffffffa012b5b6>] async_port_probe+0x46/0x60 [libata]
| [<ffffffff810978fb>] async_run_entry_fn+0x3b/0xf0
| [<ffffffff8108ee81>] process_one_work+0x201/0x5e0
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-core.c | 6 +++---
block/blk-mq.c | 6 +++---
include/linux/blkdev.h | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -644,7 +644,7 @@ int blk_queue_enter(struct request_queue
if (!gfpflags_allow_blocking(gfp))
return -EBUSY;
- ret = wait_event_interruptible(q->mq_freeze_wq,
+ ret = swait_event_interruptible(q->mq_freeze_wq,
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
@@ -664,7 +664,7 @@ static void blk_queue_usage_counter_rele
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
@@ -726,7 +726,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- init_waitqueue_head(&q->mq_freeze_wq);
+ init_swait_head(&q->mq_freeze_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
/*
@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct reques
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -456,7 +456,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- wait_queue_head_t mq_freeze_wq;
+ struct swait_head mq_freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;

View File

@ -0,0 +1,102 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 29 Jan 2015 15:10:08 +0100
Subject: block/mq: don't complete requests via IPI
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The IPI runs in hardirq context and there are sleeping locks. This patch
moves the completion into a workqueue.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-core.c | 3 +++
block/blk-mq.c | 20 ++++++++++++++++++++
include/linux/blk-mq.h | 1 +
include/linux/blkdev.h | 1 +
4 files changed, 25 insertions(+)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct re
rq->resid_len = 0;
rq->sense = NULL;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -325,6 +328,17 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+ struct request *rq = container_of(work, struct request, work);
+
+ rq->q->softirq_done_fn(rq);
+}
+
+#else
+
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
@@ -332,6 +346,8 @@ static void __blk_mq_complete_request_re
rq->q->softirq_done_fn(rq);
}
+#endif
+
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
@@ -348,10 +364,14 @@ static void blk_mq_ipi_complete_request(
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
} else {
rq->q->softirq_done_fn(rq);
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -212,6 +212,7 @@ static inline u16 blk_mq_unique_tag_to_t
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -89,6 +89,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
+ struct work_struct work;
unsigned long fifo_time;
};

View File

@ -0,0 +1,125 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 18 Feb 2015 18:37:26 +0100
Subject: block/mq: drop per ctx cpu_lock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
While converting the get_cpu() to get_cpu_light() I added a cpu lock to
ensure the same code is not invoked twice on the same CPU. And now I run
into this:
| kernel BUG at kernel/locking/rtmutex.c:996!
| invalid opcode: 0000 [#1] PREEMPT SMP
| CPU0: 13 PID: 75 Comm: kworker/u258:0 Tainted: G I 3.18.7-rt1.5+ #12
| Workqueue: writeback bdi_writeback_workfn (flush-8:0)
| task: ffff88023742a620 ti: ffff88023743c000 task.ti: ffff88023743c000
| RIP: 0010:[<ffffffff81523cc0>] [<ffffffff81523cc0>] rt_spin_lock_slowlock+0x280/0x2d0
| Call Trace:
| [<ffffffff815254e7>] rt_spin_lock+0x27/0x60
taking the same lock again
|
| [<ffffffff8127c771>] blk_mq_insert_requests+0x51/0x130
| [<ffffffff8127d4a9>] blk_mq_flush_plug_list+0x129/0x140
| [<ffffffff81272461>] blk_flush_plug_list+0xd1/0x250
| [<ffffffff81522075>] schedule+0x75/0xa0
| [<ffffffff8152474d>] do_nanosleep+0xdd/0x180
| [<ffffffff810c8312>] __hrtimer_nanosleep+0xd2/0x1c0
| [<ffffffff810c8456>] cpu_chill+0x56/0x80
| [<ffffffff8107c13d>] try_to_grab_pending+0x1bd/0x390
| [<ffffffff8107c431>] cancel_delayed_work+0x21/0x170
| [<ffffffff81279a98>] blk_mq_stop_hw_queue+0x18/0x40
| [<ffffffffa000ac6f>] scsi_queue_rq+0x7f/0x830 [scsi_mod]
| [<ffffffff8127b0de>] __blk_mq_run_hw_queue+0x1ee/0x360
| [<ffffffff8127b528>] blk_mq_map_request+0x108/0x190
take the lock ^^^
|
| [<ffffffff8127c8d2>] blk_sq_make_request+0x82/0x350
| [<ffffffff8126f6c0>] generic_make_request+0xd0/0x120
| [<ffffffff8126f788>] submit_bio+0x78/0x190
| [<ffffffff811bd537>] _submit_bh+0x117/0x180
| [<ffffffff811bf528>] __block_write_full_page.constprop.38+0x138/0x3f0
| [<ffffffff811bf880>] block_write_full_page+0xa0/0xe0
| [<ffffffff811c02b3>] blkdev_writepage+0x13/0x20
| [<ffffffff81127b25>] __writepage+0x15/0x40
| [<ffffffff8112873b>] write_cache_pages+0x1fb/0x440
| [<ffffffff811289be>] generic_writepages+0x3e/0x60
| [<ffffffff8112a17c>] do_writepages+0x1c/0x30
| [<ffffffff811b3603>] __writeback_single_inode+0x33/0x140
| [<ffffffff811b462d>] writeback_sb_inodes+0x2bd/0x490
| [<ffffffff811b4897>] __writeback_inodes_wb+0x97/0xd0
| [<ffffffff811b4a9b>] wb_writeback+0x1cb/0x210
| [<ffffffff811b505b>] bdi_writeback_workfn+0x25b/0x380
| [<ffffffff8107b50b>] process_one_work+0x1bb/0x490
| [<ffffffff8107c7ab>] worker_thread+0x6b/0x4f0
| [<ffffffff81081863>] kthread+0xe3/0x100
| [<ffffffff8152627c>] ret_from_fork+0x7c/0xb0
After looking at this for a while it seems that it is save if blk_mq_ctx is
used multiple times, the in struct lock protects the access.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-mq.c | 4 ----
block/blk-mq.h | 8 --------
2 files changed, 12 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1405,9 +1405,7 @@ static blk_qc_t blk_sq_make_request(stru
blk_mq_put_ctx(data.ctx);
if (request_count >= BLK_MAX_REQUEST_COUNT) {
- spin_unlock(&data.ctx->cpu_lock);
blk_flush_plug_list(plug, false);
- spin_lock(&data.ctx->cpu_lock);
trace_block_plug(q);
}
@@ -1609,7 +1607,6 @@ static int blk_mq_hctx_cpu_offline(struc
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
- __blk_mq_put_ctx(ctx);
if (list_empty(&tmp))
return NOTIFY_OK;
@@ -1803,7 +1800,6 @@ static void blk_mq_init_cpu_queues(struc
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
- spin_lock_init(&__ctx->cpu_lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -9,7 +9,6 @@ struct blk_mq_ctx {
struct list_head rq_list;
} ____cacheline_aligned_in_smp;
- spinlock_t cpu_lock;
unsigned int cpu;
unsigned int index_hw;
@@ -78,7 +77,6 @@ static inline struct blk_mq_ctx *__blk_m
struct blk_mq_ctx *ctx;
ctx = per_cpu_ptr(q->queue_ctx, cpu);
- spin_lock(&ctx->cpu_lock);
return ctx;
}
@@ -93,14 +91,8 @@ static inline struct blk_mq_ctx *blk_mq_
return __blk_mq_get_ctx(q, get_cpu_light());
}
-static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx)
-{
- spin_unlock(&ctx->cpu_lock);
-}
-
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- __blk_mq_put_ctx(ctx);
put_cpu_light();
}

View File

@ -0,0 +1,52 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: block/mq: do not invoke preempt_disable()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
preempt_disable() and get_cpu() don't play well together with the sleeping
locks it tries to allocate later.
It seems to be enough to replace it with get_cpu_light() and migrate_disable().
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-mq.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -343,7 +343,7 @@ static void blk_mq_ipi_complete_request(
return;
}
- cpu = get_cpu();
+ cpu = get_cpu_light();
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
@@ -355,7 +355,7 @@ static void blk_mq_ipi_complete_request(
} else {
rq->q->softirq_done_fn(rq);
}
- put_cpu();
+ put_cpu_light();
}
static void __blk_mq_complete_request(struct request *rq)
@@ -862,14 +862,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
if (!async) {
- int cpu = get_cpu();
+ int cpu = get_cpu_light();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
- put_cpu();
+ put_cpu_light();
return;
}
- put_cpu();
+ put_cpu_light();
}
kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),

View File

@ -0,0 +1,90 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 9 Apr 2014 10:37:23 +0200
Subject: block: mq: use cpu_light()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
there is a might sleep splat because get_cpu() disables preemption and
later we grab a lock. As a workaround for this we use get_cpu_light()
and an additional lock to prevent taking the same ctx.
There is a lock member in the ctx already but there some functions which do ++
on the member and this works with irq off but on RT we would need the extra lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-mq.c | 4 ++++
block/blk-mq.h | 17 ++++++++++++++---
2 files changed, 18 insertions(+), 3 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1385,7 +1385,9 @@ static blk_qc_t blk_sq_make_request(stru
blk_mq_put_ctx(data.ctx);
if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ spin_unlock(&data.ctx->cpu_lock);
blk_flush_plug_list(plug, false);
+ spin_lock(&data.ctx->cpu_lock);
trace_block_plug(q);
}
@@ -1587,6 +1589,7 @@ static int blk_mq_hctx_cpu_offline(struc
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
+ __blk_mq_put_ctx(ctx);
if (list_empty(&tmp))
return NOTIFY_OK;
@@ -1780,6 +1783,7 @@ static void blk_mq_init_cpu_queues(struc
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
+ spin_lock_init(&__ctx->cpu_lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -9,6 +9,7 @@ struct blk_mq_ctx {
struct list_head rq_list;
} ____cacheline_aligned_in_smp;
+ spinlock_t cpu_lock;
unsigned int cpu;
unsigned int index_hw;
@@ -74,7 +75,11 @@ struct blk_align_bitmap {
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
- return per_cpu_ptr(q->queue_ctx, cpu);
+ struct blk_mq_ctx *ctx;
+
+ ctx = per_cpu_ptr(q->queue_ctx, cpu);
+ spin_lock(&ctx->cpu_lock);
+ return ctx;
}
/*
@@ -85,12 +90,18 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
- return __blk_mq_get_ctx(q, get_cpu());
+ return __blk_mq_get_ctx(q, get_cpu_light());
+}
+
+static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+{
+ spin_unlock(&ctx->cpu_lock);
}
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
{
- put_cpu();
+ __blk_mq_put_ctx(ctx);
+ put_cpu_light();
}
struct blk_mq_alloc_data {

View File

@ -0,0 +1,97 @@
Subject: block: Shorten interrupt disabled regions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 22 Jun 2011 19:47:02 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Moving the blk_sched_flush_plug() call out of the interrupt/preempt
disabled region in the scheduler allows us to replace
local_irq_save/restore(flags) by local_irq_disable/enable() in
blk_flush_plug().
Now instead of doing this we disable interrupts explicitely when we
lock the request_queue and reenable them when we drop the lock. That
allows interrupts to be handled when the plug list contains requests
for more than one queue.
Aside of that this change makes the scope of the irq disabled region
more obvious. The current code confused the hell out of me when
looking at:
local_irq_save(flags);
spin_lock(q->queue_lock);
...
queue_unplugged(q...);
scsi_request_fn();
spin_unlock(q->queue_lock);
spin_lock(shost->host_lock);
spin_unlock_irq(shost->host_lock);
-------------------^^^ ????
spin_lock_irq(q->queue_lock);
spin_unlock(q->lock);
local_irq_restore(flags);
Also add a comment to __blk_run_queue() documenting that
q->request_fn() can drop q->queue_lock and reenable interrupts, but
must return with q->queue_lock held and interrupts disabled.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
---
block/blk-core.c | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3182,7 +3182,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -3230,7 +3230,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
- unsigned long flags;
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
@@ -3250,11 +3249,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
- /*
- * Save and disable interrupts here, to avoid doing it for every
- * queue lock we have to take.
- */
- local_irq_save(flags);
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
@@ -3267,7 +3261,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
- spin_lock(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
}
/*
@@ -3294,8 +3288,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
-
- local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)

View File

@ -0,0 +1,46 @@
Subject: block: Use cpu_chill() for retry loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 20 Dec 2012 18:28:26 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Steven also observed a live lock when there was a
concurrent priority boosting going on.
Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
block/blk-ioc.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include "blk.h"
@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
+ cpu_chill();
goto retry;
}
}

View File

@ -0,0 +1,37 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:58 -0500
Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Introduce RT/NON-RT WARN/BUG statements to avoid ifdefs in the code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/asm-generic/bug.h | 14 ++++++++++++++
1 file changed, 14 insertions(+)
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -206,6 +206,20 @@ extern void warn_slowpath_null(const cha
# define WARN_ON_SMP(x) ({0;})
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define BUG_ON_RT(c) BUG_ON(c)
+# define BUG_ON_NONRT(c) do { } while (0)
+# define WARN_ON_RT(condition) WARN_ON(condition)
+# define WARN_ON_NONRT(condition) do { } while (0)
+# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
+#else
+# define BUG_ON_RT(c) do { } while (0)
+# define BUG_ON_NONRT(c) BUG_ON(c)
+# define WARN_ON_RT(condition) do { } while (0)
+# define WARN_ON_NONRT(condition) WARN_ON(condition)
+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,65 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Sat, 21 Jun 2014 10:09:48 +0200
Subject: memcontrol: Prevent scheduling while atomic in cgroup code
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
mm, memcg: make refill_stock() use get_cpu_light()
Nikita reported the following memcg scheduling while atomic bug:
Call Trace:
[e22d5a90] [c0007ea8] show_stack+0x4c/0x168 (unreliable)
[e22d5ad0] [c0618c04] __schedule_bug+0x94/0xb0
[e22d5ae0] [c060b9ec] __schedule+0x530/0x550
[e22d5bf0] [c060bacc] schedule+0x30/0xbc
[e22d5c00] [c060ca24] rt_spin_lock_slowlock+0x180/0x27c
[e22d5c70] [c00b39dc] res_counter_uncharge_until+0x40/0xc4
[e22d5ca0] [c013ca88] drain_stock.isra.20+0x54/0x98
[e22d5cc0] [c01402ac] __mem_cgroup_try_charge+0x2e8/0xbac
[e22d5d70] [c01410d4] mem_cgroup_charge_common+0x3c/0x70
[e22d5d90] [c0117284] __do_fault+0x38c/0x510
[e22d5df0] [c011a5f4] handle_pte_fault+0x98/0x858
[e22d5e50] [c060ed08] do_page_fault+0x42c/0x6fc
[e22d5f40] [c000f5b4] handle_page_fault+0xc/0x80
What happens:
refill_stock()
get_cpu_var()
drain_stock()
res_counter_uncharge()
res_counter_uncharge_until()
spin_lock() <== boom
Fix it by replacing get/put_cpu_var() with get/put_cpu_light().
Reported-by: Nikita Yushchenko <nyushchenko@dev.rtsoft.ru>
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/memcontrol.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1913,14 +1913,17 @@ static void drain_local_stock(struct wor
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+ struct memcg_stock_pcp *stock;
+ int cpu = get_cpu_light();
+
+ stock = &per_cpu(memcg_stock, cpu);
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
stock->cached = memcg;
}
stock->nr_pages += nr_pages;
- put_cpu_var(memcg_stock);
+ put_cpu_light();
}
/*

View File

@ -0,0 +1,87 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 13 Feb 2015 15:52:24 +0100
Subject: cgroups: use simple wait in css_release()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
To avoid:
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:914
|in_atomic(): 1, irqs_disabled(): 0, pid: 92, name: rcuc/11
|2 locks held by rcuc/11/92:
| #0: (rcu_callback){......}, at: [<ffffffff810e037e>] rcu_cpu_kthread+0x3de/0x940
| #1: (rcu_read_lock_sched){......}, at: [<ffffffff81328390>] percpu_ref_call_confirm_rcu+0x0/0xd0
|Preemption disabled at:[<ffffffff813284e2>] percpu_ref_switch_to_atomic_rcu+0x82/0xc0
|CPU: 11 PID: 92 Comm: rcuc/11 Not tainted 3.18.7-rt0+ #1
| ffff8802398cdf80 ffff880235f0bc28 ffffffff815b3a12 0000000000000000
| 0000000000000000 ffff880235f0bc48 ffffffff8109aa16 0000000000000000
| ffff8802398cdf80 ffff880235f0bc78 ffffffff815b8dd4 000000000000df80
|Call Trace:
| [<ffffffff815b3a12>] dump_stack+0x4f/0x7c
| [<ffffffff8109aa16>] __might_sleep+0x116/0x190
| [<ffffffff815b8dd4>] rt_spin_lock+0x24/0x60
| [<ffffffff8108d2cd>] queue_work_on+0x6d/0x1d0
| [<ffffffff8110c881>] css_release+0x81/0x90
| [<ffffffff8132844e>] percpu_ref_call_confirm_rcu+0xbe/0xd0
| [<ffffffff813284e2>] percpu_ref_switch_to_atomic_rcu+0x82/0xc0
| [<ffffffff810e03e5>] rcu_cpu_kthread+0x445/0x940
| [<ffffffff81098a2d>] smpboot_thread_fn+0x18d/0x2d0
| [<ffffffff810948d8>] kthread+0xe8/0x100
| [<ffffffff815b9c3c>] ret_from_fork+0x7c/0xb0
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/cgroup-defs.h | 2 ++
kernel/cgroup.c | 9 +++++----
2 files changed, 7 insertions(+), 4 deletions(-)
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
+#include <linux/work-simple.h>
#ifdef CONFIG_CGROUPS
@@ -136,6 +137,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
+ struct swork_event destroy_swork;
};
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4724,10 +4724,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
-static void css_release_work_fn(struct work_struct *work)
+static void css_release_work_fn(struct swork_event *sev)
{
struct cgroup_subsys_state *css =
- container_of(work, struct cgroup_subsys_state, destroy_work);
+ container_of(sev, struct cgroup_subsys_state, destroy_swork);
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
@@ -4766,8 +4766,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_release_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ INIT_SWORK(&css->destroy_swork, css_release_work_fn);
+ swork_queue(&css->destroy_swork);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
@@ -5363,6 +5363,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
+ BUG_ON(swork_get());
/*
* Used to destroy pidlists and separate to serve as flush domain.

View File

@ -0,0 +1,158 @@
From: Benedikt Spranger <b.spranger@linutronix.de>
Date: Mon, 8 Mar 2010 18:57:04 +0100
Subject: clocksource: TCLIB: Allow higher clock rates for clock events
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
As default the TCLIB uses the 32KiHz base clock rate for clock events.
Add a compile time selection to allow higher clock resulution.
(fixed up by Sami Pietikäinen <Sami.Pietikainen@wapice.com>)
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++++---------------
drivers/misc/Kconfig | 12 ++++++++++--
2 files changed, 31 insertions(+), 17 deletions(-)
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
- * source, used in either periodic or oneshot mode. This runs
- * at 32 KiHZ, and can handle delays of up to two seconds.
+ * source, used in either periodic or oneshot mode.
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
+ u32 freq;
void __iomem *regs;
};
@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
static u32 timer_clock;
static int tc_shutdown(struct clock_event_device *d)
@@ -113,7 +106,7 @@ static int tc_set_oneshot(struct clock_e
clk_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and stop */
+ /* count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -135,10 +128,10 @@ static int tc_set_periodic(struct clock_
*/
clk_enable(tcd->clk);
- /* slow clock, count up to RC, then irq and restart */
+ /* count up to RC, then irq and restart */
__raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
regs + ATMEL_TC_REG(2, CMR));
- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
/* Enable clock and interrupts on RC compare */
__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@@ -165,7 +158,11 @@ static struct tc_clkevt_device clkevt =
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
.rating = 125,
+#else
+ .rating = 200,
+#endif
.set_next_event = tc_next_event,
.set_state_shutdown = tc_shutdown,
.set_state_periodic = tc_set_periodic,
@@ -187,8 +184,9 @@ static irqreturn_t ch2_irq(int irq, void
return IRQ_NONE;
}
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
{
+ unsigned divisor = atmel_tc_divisors[divisor_idx];
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
@@ -209,7 +207,11 @@ static int __init setup_clkevents(struct
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
- timer_clock = clk32k_divisor_idx;
+ timer_clock = divisor_idx;
+ if (!divisor)
+ clkevt.freq = 32768;
+ else
+ clkevt.freq = clk_get_rate(t2_clk) / divisor;
clkevt.clkevt.cpumask = cpumask_of(0);
@@ -220,7 +222,7 @@ static int __init setup_clkevents(struct
return ret;
}
- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
return ret;
}
@@ -357,7 +359,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
ret = setup_clkevents(tc, clk32k_divisor_idx);
+#else
+ ret = setup_clkevents(tc, best_divisor_idx);
+#endif
if (ret)
goto err_unregister_clksrc;
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC
are combined to make a single 32-bit timer.
When GENERIC_CLOCKEVENTS is defined, the third timer channel
- may be used as a clock event device supporting oneshot mode
- (delays of up to two seconds) based on the 32 KiHz clock.
+ may be used as a clock event device supporting oneshot mode.
config ATMEL_TCB_CLKSRC_BLOCK
int
@@ -84,6 +83,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
TC can be used for other purposes, such as PWM generation and
interval timing.
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ bool "TC Block use 32 KiHz clock"
+ depends on ATMEL_TCB_CLKSRC
+ default y
+ help
+ Select this to use 32 KiHz base clock rate as TC block clock
+ source for clock events.
+
+
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n

View File

@ -0,0 +1,225 @@
Subject: completion: Use simple wait queues
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 Jan 2013 11:23:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
contention on the waitqueue lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/wireless/orinoco/orinoco_usb.c | 2 -
drivers/usb/gadget/function/f_fs.c | 2 -
drivers/usb/gadget/legacy/inode.c | 4 +--
include/linux/completion.h | 9 +++-----
include/linux/uprobes.h | 1
kernel/sched/completion.c | 32 ++++++++++++++---------------
kernel/sched/core.c | 10 +++++++--
7 files changed, 33 insertions(+), 27 deletions(-)
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
- wait_event_interruptible(ctx->done.wait,
+ swait_event_interruptible(ctx->done.wait,
ctx->done.done);
}
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1405,7 +1405,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- waitqueue_active(&ffs->ep0req_completion.wait));
+ swaitqueue_active(&ffs->ep0req_completion.wait));
kfree(ffs->dev_name);
kfree(ffs);
}
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
- value = wait_event_interruptible (done.wait, done.done);
+ value = swait_event_interruptible (done.wait, done.done);
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
- wait_event (done.wait, done.done);
+ swait_event (done.wait, done.done);
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
*/
-
-#include <linux/wait.h>
+#include <linux/wait-simple.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -24,11 +23,11 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_head wait;
};
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
@@ -73,7 +72,7 @@ struct completion {
static inline void init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_head(&x->wait);
}
/**
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
#include <linux/types.h>
+#include <linux/wait.h>
struct vm_area_struct;
struct mm_struct;
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -30,10 +30,10 @@ void complete(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_SWAITER(wait);
- __add_wait_queue_tail_exclusive(&x->wait, &wait);
+ swait_prepare_locked(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
- spin_unlock_irq(&x->wait.lock);
+ raw_spin_unlock_irq(&x->wait.lock);
timeout = action(timeout);
- spin_lock_irq(&x->wait.lock);
+ raw_spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
- __remove_wait_queue(&x->wait, &wait);
+ swait_finish_locked(&x->wait, &wait);
if (!x->done)
return timeout;
}
@@ -89,9 +89,9 @@ static inline long __sched
{
might_sleep();
- spin_lock_irq(&x->wait.lock);
+ raw_spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state);
- spin_unlock_irq(&x->wait.lock);
+ raw_spin_unlock_irq(&x->wait.lock);
return timeout;
}
@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct comp
if (!READ_ONCE(x->done))
return 0;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -311,7 +311,7 @@ bool completion_done(struct completion *
* after it's acquired the lock.
*/
smp_rmb();
- spin_unlock_wait(&x->wait.lock);
+ raw_spin_unlock_wait(&x->wait.lock);
return true;
}
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3143,7 +3143,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
#endif
if (p->migrate_disable) {
@@ -3173,7 +3176,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
+ if (unlikely(p->migrate_disable_atomic)) {
+ tracing_off();
+ WARN_ON_ONCE(1);
+ }
#endif
WARN_ON_ONCE(p->migrate_disable <= 0);

View File

@ -0,0 +1,27 @@
Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 22:51:33 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
RT does not increment preempt count when a 'sleeping' spinlock is
locked. Update PREEMPT_LOCK_OFFSET for that case.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/preempt.h | 4 ++++
1 file changed, 4 insertions(+)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -91,7 +91,11 @@
/*
* The preempt_count offset after spin_lock()
*/
+#if !defined(CONFIG_PREEMPT_RT_FULL)
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+#define PREEMPT_LOCK_OFFSET 0
+#endif
/*
* The preempt_count offset needed for things like:

View File

@ -0,0 +1,53 @@
Subject: sched: Take RT softirq semantics into account in cond_resched()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 14 Jul 2011 09:56:44 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The softirq semantics work different on -RT. There is no SOFTIRQ_MASK in
the preemption counter which leads to the BUG_ON() statement in
__cond_resched_softirq(). As for -RT it is enough to perform a "normal"
schedule.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 4 ++++
kernel/sched/core.c | 2 ++
2 files changed, 6 insertions(+)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2984,12 +2984,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
+#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
+#else
+# define cond_resched_softirq() cond_resched()
+#endif
static inline void cond_resched_rcu(void)
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4832,6 +4832,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
+#ifndef CONFIG_PREEMPT_RT_FULL
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
@@ -4845,6 +4846,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
+#endif
/**
* yield - yield the current processor to other threads.

View File

@ -0,0 +1,56 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Thu, 5 Dec 2013 09:16:52 -0500
Subject: cpu hotplug: Document why PREEMPT_RT uses a spinlock
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The patch:
cpu: Make hotplug.lock a "sleeping" spinlock on RT
Tasks can block on hotplug.lock in pin_current_cpu(), but their
state might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.
Fixed a bug where the hotplug lock on PREEMPT_RT can be called after a
task set its state to TASK_UNINTERRUPTIBLE and before it called
schedule. If the hotplug_lock used a mutex, and there was contention,
the current task's state would be turned to TASK_RUNNABLE and the
schedule call will not sleep. This caused unexpected results.
Although the patch had a description of the change, the code had no
comments about it. This causes confusion to those that review the code,
and as PREEMPT_RT is held in a quilt queue and not git, it's not as easy
to see why a change was made. Even if it was in git, the code should
still have a comment for something as subtle as this.
Document the rational for using a spinlock on PREEMPT_RT in the hotplug
lock code.
Reported-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -110,6 +110,14 @@ struct hotplug_pcp {
int grab_lock;
struct completion synced;
#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Note, on PREEMPT_RT, the hotplug lock must save the state of
+ * the task, otherwise the mutex will cause the task to fail
+ * to sleep when required. (Because it's called from migrate_disable())
+ *
+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
+ * state.
+ */
spinlock_t lock;
#else
struct mutex mutex;

View File

@ -0,0 +1,117 @@
Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 02 Mar 2012 10:36:57 -0500
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 34 +++++++++++++++++++++++++++-------
1 file changed, 27 insertions(+), 7 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -60,10 +60,16 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
+
/* wait queue to wake up the active_writer */
wait_queue_head_t wq;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* Makes the lock keep the task's state */
+ spinlock_t lock;
+#else
/* verifies that no writer will get active while readers are active */
struct mutex lock;
+#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -76,12 +82,26 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+#ifdef CONFIG_PREEMPT_RT_FULL
+ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = {.name = "cpu_hotplug.lock" },
#endif
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
+# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
+# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
+# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+#endif
+
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
@@ -118,8 +138,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
- mutex_lock(&cpu_hotplug.lock);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_lock();
+ hotplug_unlock();
preempt_disable();
goto retry;
}
@@ -192,9 +212,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
atomic_inc(&cpu_hotplug.refcount);
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -247,11 +267,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
- mutex_lock(&cpu_hotplug.lock);
+ hotplug_lock();
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
if (likely(!atomic_read(&cpu_hotplug.refcount)))
break;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
@@ -260,7 +280,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
+ hotplug_unlock();
cpuhp_lock_release();
}

View File

@ -0,0 +1,548 @@
From: Steven Rostedt <srostedt@redhat.com>
Date: Mon, 16 Jul 2012 08:07:43 +0000
Subject: cpu/rt: Rework cpu down for PREEMPT_RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
tasks can be preempted in many more places than in non-RT. In
order to handle per_cpu variables, tasks may be pinned to a CPU
for a while, and even sleep. But these tasks need to be off the CPU
if that CPU is going down.
Several synchronization methods have been tried, but when stressed
they failed. This is a new approach.
A sync_tsk thread is still created and tasks may still block on a
lock when the CPU is going down, but how that works is a bit different.
When cpu_down() starts, it will create the sync_tsk and wait on it
to inform that current tasks that are pinned on the CPU are no longer
pinned. But new tasks that are about to be pinned will still be allowed
to do so at this time.
Then the notifiers are called. Several notifiers will bring down tasks
that will enter these locations. Some of these tasks will take locks
of other tasks that are on the CPU. If we don't let those other tasks
continue, but make them block until CPU down is done, the tasks that
the notifiers are waiting on will never complete as they are waiting
for the locks held by the tasks that are blocked.
Thus we still let the task pin the CPU until the notifiers are done.
After the notifiers run, we then make new tasks entering the pinned
CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
in the hotplug_pcp descriptor.
To help things along, a new function in the scheduler code is created
called migrate_me(). This function will try to migrate the current task
off the CPU this is going down if possible. When the sync_tsk is created,
all tasks will then try to migrate off the CPU going down. There are
several cases that this wont work, but it helps in most cases.
After the notifiers are called and if a task can't migrate off but enters
the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
until the CPU down is complete. Then the scheduler will force the migration
anyway.
Also, I found that THREAD_BOUND need to also be accounted for in the
pinned CPU, and the migrate_disable no longer treats them special.
This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
kernel/cpu.c | 240 ++++++++++++++++++++++++++++++++++++++++----------
kernel/sched/core.c | 82 ++++++++++++++++-
3 files changed, 283 insertions(+), 46 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2282,6 +2282,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
@@ -2294,6 +2298,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
+static inline int migrate_me(void) { return 0; }
+static inline void tell_sched_cpu_down_begin(int cpu) { }
+static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_COMMON
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -60,16 +60,10 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
-
/* wait queue to wake up the active_writer */
wait_queue_head_t wq;
-#ifdef CONFIG_PREEMPT_RT_FULL
- /* Makes the lock keep the task's state */
- spinlock_t lock;
-#else
/* verifies that no writer will get active while readers are active */
struct mutex lock;
-#endif
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
@@ -81,27 +75,13 @@ static struct {
#endif
} cpu_hotplug = {
.active_writer = NULL,
- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
-#ifdef CONFIG_PREEMPT_RT_FULL
- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
-#else
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-#endif
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
#ifdef CONFIG_DEBUG_LOCK_ALLOC
.dep_map = {.name = "cpu_hotplug.lock" },
#endif
};
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
-# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
-#else
-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
-# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
-#endif
-
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
@@ -109,12 +89,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+/**
+ * hotplug_pcp - per cpu hotplug descriptor
+ * @unplug: set when pin_current_cpu() needs to sync tasks
+ * @sync_tsk: the task that waits for tasks to finish pinned sections
+ * @refcount: counter of tasks in pinned sections
+ * @grab_lock: set when the tasks entering pinned sections should wait
+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
+ * @mutex_init: zero if the mutex hasn't been initialized yet.
+ *
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
+ * is used as a flag and still exists after @sync_tsk has exited and
+ * @sync_tsk set to NULL.
+ */
struct hotplug_pcp {
struct task_struct *unplug;
+ struct task_struct *sync_tsk;
int refcount;
+ int grab_lock;
struct completion synced;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spinlock_t lock;
+#else
+ struct mutex mutex;
+#endif
+ int mutex_init;
};
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
+#else
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
+#endif
+
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
@@ -128,18 +138,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
+ int force = 0;
retry:
hp = this_cpu_ptr(&hotplug_pcp);
- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
hp->unplug == current) {
hp->refcount++;
return;
}
- preempt_enable();
- hotplug_lock();
- hotplug_unlock();
+ if (hp->grab_lock) {
+ preempt_enable();
+ hotplug_lock(hp);
+ hotplug_unlock(hp);
+ } else {
+ preempt_enable();
+ /*
+ * Try to push this task off of this CPU.
+ */
+ if (!migrate_me()) {
+ preempt_disable();
+ hp = this_cpu_ptr(&hotplug_pcp);
+ if (!hp->grab_lock) {
+ /*
+ * Just let it continue it's already pinned
+ * or about to sleep.
+ */
+ force = 1;
+ goto retry;
+ }
+ preempt_enable();
+ }
+ }
preempt_disable();
goto retry;
}
@@ -160,26 +191,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
-/*
- * FIXME: Is this really correct under all circumstances ?
- */
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+}
+
static int sync_unplug_thread(void *data)
{
struct hotplug_pcp *hp = data;
preempt_disable();
hp->unplug = current;
+ wait_for_pinned_cpus(hp);
+
+ /*
+ * This thread will synchronize the cpu_down() with threads
+ * that have pinned the CPU. When the pinned CPU count reaches
+ * zero, we inform the cpu_down code to continue to the next step.
+ */
set_current_state(TASK_UNINTERRUPTIBLE);
- while (hp->refcount) {
- schedule_preempt_disabled();
+ preempt_enable();
+ complete(&hp->synced);
+
+ /*
+ * If all succeeds, the next step will need tasks to wait till
+ * the CPU is offline before continuing. To do this, the grab_lock
+ * is set and tasks going into pin_current_cpu() will block on the
+ * mutex. But we still need to wait for those that are already in
+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
+ * will kick this thread out.
+ */
+ while (!hp->grab_lock && !kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+
+ /* Make sure grab_lock is seen before we see a stale completion */
+ smp_mb();
+
+ /*
+ * Now just before cpu_down() enters stop machine, we need to make
+ * sure all tasks that are in pinned CPU sections are out, and new
+ * tasks will now grab the lock, keeping them from entering pinned
+ * CPU sections.
+ */
+ if (!kthread_should_stop()) {
+ preempt_disable();
+ wait_for_pinned_cpus(hp);
+ preempt_enable();
+ complete(&hp->synced);
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
set_current_state(TASK_UNINTERRUPTIBLE);
}
set_current_state(TASK_RUNNING);
- preempt_enable();
- complete(&hp->synced);
+
+ /*
+ * Force this thread off this CPU as it's going down and
+ * we don't want any more work on this CPU.
+ */
+ current->flags &= ~PF_NO_SETAFFINITY;
+ do_set_cpus_allowed(current, cpu_present_mask);
+ migrate_me();
return 0;
}
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
+{
+ wake_up_process(hp->sync_tsk);
+ wait_for_completion(&hp->synced);
+}
+
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
@@ -187,23 +276,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
- struct task_struct *tsk;
+ int err;
+
+ /* Protected by cpu_hotplug.lock */
+ if (!hp->mutex_init) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+ spin_lock_init(&hp->lock);
+#else
+ mutex_init(&hp->mutex);
+#endif
+ hp->mutex_init = 1;
+ }
+
+ /* Inform the scheduler to migrate tasks off this CPU */
+ tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
- if (IS_ERR(tsk))
- return (PTR_ERR(tsk));
- kthread_bind(tsk, cpu);
- wake_up_process(tsk);
- wait_for_completion(&hp->synced);
+
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(hp->sync_tsk)) {
+ err = PTR_ERR(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+ return err;
+ }
+ kthread_bind(hp->sync_tsk, cpu);
+
+ /*
+ * Wait for tasks to get out of the pinned sections,
+ * it's still OK if new tasks enter. Some CPU notifiers will
+ * wait for tasks that are going to enter these sections and
+ * we must not have them block.
+ */
+ __cpu_unplug_sync(hp);
+
return 0;
}
+static void cpu_unplug_sync(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ init_completion(&hp->synced);
+ /* The completion needs to be initialzied before setting grab_lock */
+ smp_wmb();
+
+ /* Grab the mutex before setting grab_lock */
+ hotplug_lock(hp);
+ hp->grab_lock = 1;
+
+ /*
+ * The CPU notifiers have been completed.
+ * Wait for tasks to get out of pinned CPU sections and have new
+ * tasks block until the CPU is completely down.
+ */
+ __cpu_unplug_sync(hp);
+
+ /* All done with the sync thread */
+ kthread_stop(hp->sync_tsk);
+ hp->sync_tsk = NULL;
+}
+
static void cpu_unplug_done(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
hp->unplug = NULL;
+ /* Let all tasks know cpu unplug is finished before cleaning up */
+ smp_wmb();
+
+ if (hp->sync_tsk)
+ kthread_stop(hp->sync_tsk);
+
+ if (hp->grab_lock) {
+ hotplug_unlock(hp);
+ /* protected by cpu_hotplug.lock */
+ hp->grab_lock = 0;
+ }
+ tell_sched_cpu_down_done(cpu);
}
void get_online_cpus(void)
@@ -212,9 +361,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
atomic_inc(&cpu_hotplug.refcount);
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
}
EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -267,11 +416,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
- hotplug_lock();
+ mutex_lock(&cpu_hotplug.lock);
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
if (likely(!atomic_read(&cpu_hotplug.refcount)))
break;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
@@ -280,7 +429,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
- hotplug_unlock();
+ mutex_unlock(&cpu_hotplug.lock);
cpuhp_lock_release();
}
@@ -516,6 +665,9 @@ static int _cpu_down(unsigned int cpu, i
smpboot_park_threads(cpu);
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
+ cpu_unplug_sync(cpu);
+
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1220,6 +1220,84 @@ void do_set_cpus_allowed(struct task_str
enqueue_task(rq, p, ENQUEUE_RESTORE);
}
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
+static DEFINE_MUTEX(sched_down_mutex);
+static cpumask_t sched_down_cpumask;
+
+void tell_sched_cpu_down_begin(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_set_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+void tell_sched_cpu_down_done(int cpu)
+{
+ mutex_lock(&sched_down_mutex);
+ cpumask_clear_cpu(cpu, &sched_down_cpumask);
+ mutex_unlock(&sched_down_mutex);
+}
+
+/**
+ * migrate_me - try to move the current task off this cpu
+ *
+ * Used by the pin_current_cpu() code to try to get tasks
+ * to move off the current CPU as it is going down.
+ * It will only move the task if the task isn't pinned to
+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
+ * and the task has to be in a RUNNING state. Otherwise the
+ * movement of the task will wake it up (change its state
+ * to running) when the task did not expect it.
+ *
+ * Returns 1 if it succeeded in moving the current task
+ * 0 otherwise.
+ */
+int migrate_me(void)
+{
+ struct task_struct *p = current;
+ struct migration_arg arg;
+ struct cpumask *cpumask;
+ struct cpumask *mask;
+ unsigned long flags;
+ unsigned int dest_cpu;
+ struct rq *rq;
+
+ /*
+ * We can not migrate tasks bounded to a CPU or tasks not
+ * running. The movement of the task will wake it up.
+ */
+ if (p->flags & PF_NO_SETAFFINITY || p->state)
+ return 0;
+
+ mutex_lock(&sched_down_mutex);
+ rq = task_rq_lock(p, &flags);
+
+ cpumask = this_cpu_ptr(&sched_cpumasks);
+ mask = &p->cpus_allowed;
+
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
+
+ if (!cpumask_weight(cpumask)) {
+ /* It's only on this CPU? */
+ task_rq_unlock(rq, p, &flags);
+ mutex_unlock(&sched_down_mutex);
+ return 0;
+ }
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
+
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
+ task_rq_unlock(rq, p, &flags);
+
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ mutex_unlock(&sched_down_mutex);
+
+ return 1;
+}
+
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
@@ -3085,7 +3163,7 @@ void migrate_disable(void)
{
struct task_struct *p = current;
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic++;
#endif
@@ -3118,7 +3196,7 @@ void migrate_enable(void)
unsigned long flags;
struct rq *rq;
- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+ if (in_atomic()) {
#ifdef CONFIG_SCHED_DEBUG
p->migrate_disable_atomic--;
#endif

View File

@ -0,0 +1,107 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 4 Mar 2014 12:28:32 -0500
Subject: cpu_chill: Add a UNINTERRUPTIBLE hrtimer_nanosleep
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
We hit another bug that was caused by switching cpu_chill() from
msleep() to hrtimer_nanosleep().
This time it is a livelock. The problem is that hrtimer_nanosleep()
calls schedule with the state == TASK_INTERRUPTIBLE. But these means
that if a signal is pending, the scheduler wont schedule, and will
simply change the current task state back to TASK_RUNNING. This
nullifies the whole point of cpu_chill() in the first place. That is,
if a task is spinning on a try_lock() and it preempted the owner of the
lock, if it has a signal pending, it will never give up the CPU to let
the owner of the lock run.
I made a static function __hrtimer_nanosleep() that takes a fifth
parameter "state", which determines the task state of that the
nanosleep() will be in. The normal hrtimer_nanosleep() will act the
same, but cpu_chill() will call the __hrtimer_nanosleep() directly with
the TASK_UNINTERRUPTIBLE state.
cpu_chill() only cares that the first sleep happens, and does not care
about the state of the restart schedule (in hrtimer_nanosleep_restart).
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/time/hrtimer.c | 25 ++++++++++++++++++-------
1 file changed, 18 insertions(+), 7 deletions(-)
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1657,12 +1657,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
+ unsigned long state)
{
hrtimer_init_sleeper(t, current);
do {
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(state);
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
@@ -1704,7 +1705,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
+ /* cpu_chill() does not care about restart state. */
+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
goto out;
rmtp = restart->nanosleep.rmtp;
@@ -1721,8 +1723,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
- const enum hrtimer_mode mode, const clockid_t clockid)
+static long
+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid,
+ unsigned long state)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
@@ -1735,7 +1739,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
- if (do_nanosleep(&t, mode))
+ if (do_nanosleep(&t, mode, state))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
@@ -1762,6 +1766,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid)
+{
+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
+}
+
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
@@ -1788,7 +1798,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
+ TASK_UNINTERRUPTIBLE);
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
}

View File

@ -0,0 +1,53 @@
From: Tiejun Chen <tiejun.chen@windriver.com>
Subject: cpu_down: move migrate_enable() back
Date: Thu, 7 Nov 2013 10:06:07 +0800
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Commit 08c1ab68, "hotplug-use-migrate-disable.patch", intends to
use migrate_enable()/migrate_disable() to replace that combination
of preempt_enable() and preempt_disable(), but actually in
!CONFIG_PREEMPT_RT_FULL case, migrate_enable()/migrate_disable()
are still equal to preempt_enable()/preempt_disable(). So that
followed cpu_hotplug_begin()/cpu_unplug_begin(cpu) would go schedule()
to trigger schedule_debug() like this:
_cpu_down()
|
+ migrate_disable() = preempt_disable()
|
+ cpu_hotplug_begin() or cpu_unplug_begin()
|
+ schedule()
|
+ __schedule()
|
+ preempt_disable();
|
+ __schedule_bug() is true!
So we should move migrate_enable() as the original scheme.
Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
---
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -657,6 +657,7 @@ static int _cpu_down(unsigned int cpu, i
err = -EBUSY;
goto restore_cpus;
}
+ migrate_enable();
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
@@ -741,7 +742,6 @@ static int _cpu_down(unsigned int cpu, i
out_release:
cpu_unplug_done(cpu);
out_cancel:
- migrate_enable();
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -0,0 +1,33 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 9 Apr 2015 15:23:01 +0200
Subject: cpufreq: drop K8's driver from beeing selected
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Ralf posted a picture of a backtrace from
| powernowk8_target_fn() -> transition_frequency_fidvid() and then at the
| end:
| 932 policy = cpufreq_cpu_get(smp_processor_id());
| 933 cpufreq_cpu_put(policy);
crashing the system on -RT. I assumed that policy was a NULL pointer but
was rulled out. Since Ralf can't do any more investigations on this and
I have no machine with this, I simply switch it off.
Reported-by: Ralf Mardorf <ralf.mardorf@alice-dsl.net>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/cpufreq/Kconfig.x86 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -123,7 +123,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.

View File

@ -0,0 +1,35 @@
Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 14 Dec 2011 01:03:49 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
We can't deal with the cpumask allocations which happen in atomic
context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/Kconfig | 2 +-
lib/Kconfig | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -851,7 +851,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
- select CPUMASK_OFFSTACK
+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -395,6 +395,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
+ depends on !PREEMPT_RT_FULL
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids

View File

@ -0,0 +1,242 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 21 Feb 2014 17:24:04 +0100
Subject: crypto: Reduce preempt disabled regions, more algos
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Don Estabrook reported
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
| kernel: WARNING: CPU: 2 PID: 858 at kernel/sched/core.c:2462 migrate_enable+0x17b/0x200()
| kernel: WARNING: CPU: 3 PID: 865 at kernel/sched/core.c:2428 migrate_disable+0xed/0x100()
and his backtrace showed some crypto functions which looked fine.
The problem is the following sequence:
glue_xts_crypt_128bit()
{
blkcipher_walk_virt(); /* normal migrate_disable() */
glue_fpu_begin(); /* get atomic */
while (nbytes) {
__glue_xts_crypt_128bit();
blkcipher_walk_done(); /* with nbytes = 0, migrate_enable()
* while we are atomic */
};
glue_fpu_end() /* no longer atomic */
}
and this is why the counter get out of sync and the warning is printed.
The other problem is that we are non-preemptible between
glue_fpu_begin() and glue_fpu_end() and the latency grows. To fix this,
I shorten the FPU off region and ensure blkcipher_walk_done() is called
with preemption enabled. This might hurt the performance because we now
enable/disable the FPU state more often but we gain lower latency and
the bug is gone.
Reported-by: Don Estabrook <don.estabrook@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/crypto/cast5_avx_glue.c | 21 +++++++++------------
arch/x86/crypto/glue_helper.c | 31 +++++++++++++++----------------
2 files changed, 24 insertions(+), 28 deletions(-)
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fp
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_de
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_de
} while (nbytes >= bsize);
done:
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
-
- cast5_fpu_end(fpu_enabled);
return err;
}
@@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
nbytes = __cbc_decrypt(desc, &walk);
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- cast5_fpu_end(fpu_enabled);
return err;
}
@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct b
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_de
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(false, nbytes);
nbytes = __ctr_crypt(desc, &walk);
+ cast5_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- cast5_fpu_end(fpu_enabled);
-
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
unsigned int nbytes, i, func_bytes;
- bool fpu_enabled = false;
+ bool fpu_enabled;
int err;
err = blkcipher_walk_virt(desc, walk);
@@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const
u8 *wdst = walk->dst.virt.addr;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
@@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const
}
done:
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
return err;
}
@@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct
while ((nbytes = walk.nbytes)) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
return err;
}
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
@@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct c
struct scatterlist *src, unsigned int nbytes)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct c
while ((nbytes = walk.nbytes) >= bsize) {
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
+ desc, false, nbytes);
nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- glue_fpu_end(fpu_enabled);
-
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct c
void *tweak_ctx, void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
+ bool fpu_enabled;
struct blkcipher_walk walk;
int err;
@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct c
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled,
+ desc, false,
nbytes < bsize ? bsize : nbytes);
-
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
+ glue_fpu_end(fpu_enabled);
while (nbytes) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ desc, false, nbytes);
nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+ glue_fpu_end(fpu_enabled);
err = blkcipher_walk_done(desc, &walk, nbytes);
nbytes = walk.nbytes;
}
-
- glue_fpu_end(fpu_enabled);
-
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);

View File

@ -0,0 +1,26 @@
Subject: debugobjects: Make RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:41:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Avoid filling the pool / allocating memory with irqs off().
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
lib/debugobjects.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -309,7 +309,10 @@ static void
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (preempt_count() == 0 && !irqs_disabled())
+#endif
+ fill_pool();
db = get_bucket((unsigned long) addr);

View File

@ -0,0 +1,27 @@
Subject: dm: Make rt aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 14 Nov 2011 23:06:09 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
interrupts legitimately enabled here as we cant deadlock against the
irq thread due to the "sleeping spinlocks" conversion.
Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/md/dm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2126,7 +2126,7 @@ static void dm_request_fn(struct request
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
}
goto out;

View File

@ -0,0 +1,26 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:24 -0500
Subject: drivers/net: Use disable_irq_nosync() in 8139too
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Use disable_irq_nosync() instead of disable_irq() as this might be
called in atomic context with netpoll.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/realtek/8139too.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(stru
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}

View File

@ -0,0 +1,127 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 20 Jun 2009 11:36:54 +0200
Subject: drivers/net: fix livelock issues
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
optimization. The reason is that the softirq thread is rescheduling
itself on that return value. Depending on priorities it starts to
monoplize the CPU and livelock on UP systems.
Remove it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
drivers/net/ethernet/neterion/s2io.c | 7 +------
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
drivers/net/rionet.c | 6 +-----
7 files changed, 9 insertions(+), 31 deletions(-)
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2221,11 +2221,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
}
tpd_req = atl1c_cal_tpd_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
- if (netif_msg_pktdata(adapter))
- dev_info(&adapter->pdev->dev, "tx locked\n");
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&adapter->tx_lock, flags);
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1880,8 +1880,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
- return NETDEV_TX_LOCKED;
+ spin_lock_irqsave(&adapter->tx_lock, flags);
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
+ spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4084,12 +4084,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- if (do_spin_lock)
- spin_lock_irqsave(&fifo->tx_lock, flags);
- else {
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&fifo->tx_lock, flags);
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2137,10 +2137,8 @@ static int pch_gbe_xmit_frame(struct sk_
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1629,13 +1629,8 @@ static netdev_tx_t bdx_tx_transmit(struc
unsigned long flags;
ENTER;
- local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
- BDX_DRV_NAME, ndev->name);
- return NETDEV_TX_LOCKED;
- }
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -174,11 +174,7 @@ static int rionet_start_xmit(struct sk_b
unsigned long flags;
int add_num = 1;
- local_irq_save(flags);
- if (!spin_trylock(&rnet->tx_lock)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&rnet->tx_lock, flags);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;

View File

@ -0,0 +1,49 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Fri, 3 Jul 2009 08:30:00 -0500
Subject: drivers/net: vortex fix locking issues
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Argh, cut and paste wasn't enough...
Use this patch instead. It needs an irq disable. But, believe it or not,
on SMP this is actually better. If the irq is shared (as it is in Mark's
case), we don't stop the irq of other devices from being handled on
another CPU (unfortunately for Mark, he pinned all interrupts to one CPU).
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/net/ethernet/3com/3c59x.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -842,9 +842,9 @@ static void poll_vortex(struct net_devic
{
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
#endif
@@ -1916,12 +1916,12 @@ static void vortex_tx_timeout(struct net
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
if (vp->full_bus_master_tx)
boomerang_interrupt(dev->irq, dev);
else
vortex_interrupt(dev->irq, dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
}

View File

@ -0,0 +1,33 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:30 -0500
Subject: drivers: random: Reduce preempt disabled region
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
No need to keep preemption disabled across the whole function.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/char/random.c | 3 ---
1 file changed, 3 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -796,8 +796,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -838,7 +836,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,

View File

@ -0,0 +1,43 @@
Subject: tty/serial/omap: Make the locking RT aware
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 28 Jul 2011 13:32:57 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the
optimsation we are looking for. Redo it to make it work on -RT and
non-RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/omap-serial.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(up->dev);
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = spin_trylock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
else
- spin_lock(&up->port.lock);
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the IER then disable the interrupts
@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init

View File

@ -0,0 +1,48 @@
Subject: tty/serial/pl011: Make the locking work on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 08 Jan 2013 21:36:51 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The lock is a sleeping lock and local_irq_save() is not the optimsation
we are looking for. Redo it to make it work on -RT and non-RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/tty/serial/amba-pl011.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2067,13 +2067,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
- local_irq_save(flags);
+ /*
+ * local_irq_save(flags);
+ *
+ * This local_irq_save() is nonsense. If we come in via sysrq
+ * handling then interrupts are already disabled. Aside of
+ * that the port.sysrq check is racy on SMP regardless.
+ */
if (uap->port.sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&uap->port.lock);
+ locked = spin_trylock_irqsave(&uap->port.lock, flags);
else
- spin_lock(&uap->port.lock);
+ spin_lock_irqsave(&uap->port.lock, flags);
/*
* First save the CR then disable the interrupts
@@ -2098,8 +2104,7 @@ pl011_console_write(struct console *co,
writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
- spin_unlock(&uap->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
clk_disable(uap->clk);
}

View File

@ -0,0 +1,59 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 25 Apr 2013 18:12:52 +0200
Subject: drm/i915: drop trace_i915_gem_ring_dispatch on rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
This tracepoint is responsible for:
|[<814cc358>] __schedule_bug+0x4d/0x59
|[<814d24cc>] __schedule+0x88c/0x930
|[<814d3b90>] ? _raw_spin_unlock_irqrestore+0x40/0x50
|[<814d3b95>] ? _raw_spin_unlock_irqrestore+0x45/0x50
|[<810b57b5>] ? task_blocks_on_rt_mutex+0x1f5/0x250
|[<814d27d9>] schedule+0x29/0x70
|[<814d3423>] rt_spin_lock_slowlock+0x15b/0x278
|[<814d3786>] rt_spin_lock+0x26/0x30
|[<a00dced9>] gen6_gt_force_wake_get+0x29/0x60 [i915]
|[<a00e183f>] gen6_ring_get_irq+0x5f/0x100 [i915]
|[<a00b2a33>] ftrace_raw_event_i915_gem_ring_dispatch+0xe3/0x100 [i915]
|[<a00ac1b3>] i915_gem_do_execbuffer.isra.13+0xbd3/0x1430 [i915]
|[<810f8943>] ? trace_buffer_unlock_commit+0x43/0x60
|[<8113e8d2>] ? ftrace_raw_event_kmem_alloc+0xd2/0x180
|[<8101d063>] ? native_sched_clock+0x13/0x80
|[<a00acf29>] i915_gem_execbuffer2+0x99/0x280 [i915]
|[<a00114a3>] drm_ioctl+0x4c3/0x570 [drm]
|[<8101d0d9>] ? sched_clock+0x9/0x10
|[<a00ace90>] ? i915_gem_execbuffer+0x480/0x480 [i915]
|[<810f1c18>] ? rb_commit+0x68/0xa0
|[<810f1c6c>] ? ring_buffer_unlock_commit+0x1c/0xa0
|[<81197467>] do_vfs_ioctl+0x97/0x540
|[<81021318>] ? ftrace_raw_event_sys_enter+0xd8/0x130
|[<811979a1>] sys_ioctl+0x91/0xb0
|[<814db931>] tracesys+0xe1/0xe6
Chris Wilson does not like to move i915_trace_irq_get() out of the macro
|No. This enables the IRQ, as well as making a number of
|very expensively serialised read, unconditionally.
so it is gone now on RT.
Reported-by: Joakim Hernberg <jbh@alchemy.lu>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 ++
1 file changed, 2 insertions(+)
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1264,7 +1264,9 @@ i915_gem_ringbuffer_submission(struct i9
if (ret)
return ret;
+#ifndef CONFIG_PREEMPT_RT_BASE
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+#endif
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);

View File

@ -0,0 +1,99 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sun, 16 Aug 2015 14:27:50 +0200
Subject: dump stack: don't disable preemption during trace
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
I see here large latencies during a stack dump on x86. The
preempt_disable() and get_cpu() should forbid moving the task to another
CPU during a stack dump and avoiding two stack traces in parallel on the
same CPU. However a stack trace from a second CPU may still happen in
parallel. Also nesting is allowed so a stack trace happens in
process-context and we may have another one from IRQ context. With migrate
disable we keep this code preemptible and allow a second backtrace on
the same CPU by another task.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/kernel/dumpstack_32.c | 4 ++--
arch/x86/kernel/dumpstack_64.c | 8 ++++----
lib/dump_stack.c | 4 ++--
3 files changed, 8 insertions(+), 8 deletions(-)
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = get_cpu();
+ const unsigned cpu = get_cpu_light();
int graph = 0;
u32 *prev_esp;
@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task
break;
touch_nmi_watchdog();
}
- put_cpu();
+ put_cpu_light();
}
EXPORT_SYMBOL(dump_trace);
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
- const unsigned cpu = get_cpu();
+ const unsigned cpu = get_cpu_light();
struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task
* This handles the process stack:
*/
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
- put_cpu();
+ put_cpu_light();
}
EXPORT_SYMBOL(dump_trace);
@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *t
int cpu;
int i;
- preempt_disable();
+ migrate_disable();
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *t
pr_cont(" %016lx", *stack++);
touch_nmi_watchdog();
}
- preempt_enable();
+ migrate_enable();
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -33,7 +33,7 @@ asmlinkage __visible void dump_stack(voi
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- preempt_disable();
+ migrate_disable();
retry:
cpu = smp_processor_id();
@@ -52,7 +52,7 @@ asmlinkage __visible void dump_stack(voi
if (!was_locked)
atomic_set(&dump_lock, -1);
- preempt_enable();
+ migrate_enable();
}
#else
asmlinkage __visible void dump_stack(void)

View File

@ -0,0 +1,31 @@
Subject: fs/epoll: Do not disable preemption on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 16:35:35 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
ep_call_nested() takes a sleeping lock so we can't disable preemption.
The light version is enough since ep_call_nested() doesn't mind beeing
invoked twice on the same CPU.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/eventpoll.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -505,12 +505,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu();
+ int this_cpu = get_cpu_light();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
- put_cpu();
+ put_cpu_light();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)

View File

@ -0,0 +1,107 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 16 Feb 2015 18:49:10 +0100
Subject: fs/aio: simple simple work
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:768
|in_atomic(): 1, irqs_disabled(): 0, pid: 26, name: rcuos/2
|2 locks held by rcuos/2/26:
| #0: (rcu_callback){.+.+..}, at: [<ffffffff810b1a12>] rcu_nocb_kthread+0x1e2/0x380
| #1: (rcu_read_lock_sched){.+.+..}, at: [<ffffffff812acd26>] percpu_ref_kill_rcu+0xa6/0x1c0
|Preemption disabled at:[<ffffffff810b1a93>] rcu_nocb_kthread+0x263/0x380
|Call Trace:
| [<ffffffff81582e9e>] dump_stack+0x4e/0x9c
| [<ffffffff81077aeb>] __might_sleep+0xfb/0x170
| [<ffffffff81589304>] rt_spin_lock+0x24/0x70
| [<ffffffff811c5790>] free_ioctx_users+0x30/0x130
| [<ffffffff812ace34>] percpu_ref_kill_rcu+0x1b4/0x1c0
| [<ffffffff810b1a93>] rcu_nocb_kthread+0x263/0x380
| [<ffffffff8106e046>] kthread+0xd6/0xf0
| [<ffffffff81591eec>] ret_from_fork+0x7c/0xb0
replace this preempt_disable() friendly swork.
Reported-By: Mike Galbraith <umgwanakikbuti@gmail.com>
Suggested-by: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
fs/aio.c | 24 +++++++++++++++++-------
1 file changed, 17 insertions(+), 7 deletions(-)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
+#include <linux/work-simple.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -115,7 +116,7 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
- struct work_struct free_work;
+ struct swork_event free_work;
/*
* signals when all in-flight requests are done
@@ -253,6 +254,7 @@ static int __init aio_setup(void)
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
+ BUG_ON(swork_get());
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
@@ -568,9 +570,9 @@ static int kiocb_cancel(struct aio_kiocb
return cancel(&kiocb->common);
}
-static void free_ioctx(struct work_struct *work)
+static void free_ioctx(struct swork_event *sev)
{
- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
pr_debug("freeing %p\n", ctx);
@@ -589,8 +591,8 @@ static void free_ioctx_reqs(struct percp
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
- INIT_WORK(&ctx->free_work, free_ioctx);
- schedule_work(&ctx->free_work);
+ INIT_SWORK(&ctx->free_work, free_ioctx);
+ swork_queue(&ctx->free_work);
}
/*
@@ -598,9 +600,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
-static void free_ioctx_users(struct percpu_ref *ref)
+static void free_ioctx_users_work(struct swork_event *sev)
{
- struct kioctx *ctx = container_of(ref, struct kioctx, users);
+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
@@ -619,6 +621,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
+static void free_ioctx_users(struct percpu_ref *ref)
+{
+ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
+ swork_queue(&ctx->free_work);
+}
+
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;

View File

@ -0,0 +1,23 @@
Subject: block: Turn off warning which is bogus on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jun 2011 17:05:09 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
On -RT the context is always with IRQs enabled. Ignore this warning on -RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
block/blk-core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON_NONRT(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);

View File

@ -0,0 +1,86 @@
Subject: fs: dcache: Use cpu_chill() in trylock loops
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 07 Mar 2012 21:00:34 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Retry loops on RT might loop forever when the modifying side was
preempted. Use cpu_chill() instead of cpu_relax() to let the system
make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/autofs4/autofs_i.h | 1 +
fs/autofs4/expire.c | 2 +-
fs/dcache.c | 5 +++--
fs/namespace.c | 3 ++-
4 files changed, 7 insertions(+), 4 deletions(-)
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -150,7 +150,7 @@ static struct dentry *get_next_positive_
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
spin_unlock(&p->d_lock);
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -589,7 +590,7 @@ static struct dentry *dentry_kill(struct
failed:
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
return dentry; /* try again with same dentry */
}
@@ -2398,7 +2399,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/init.h> /* init_rootfs */
@@ -355,7 +356,7 @@ int __mnt_want_write(struct vfsmount *m)
smp_mb();
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
preempt_enable();
- cpu_relax();
+ cpu_chill();
preempt_disable();
}
/*

View File

@ -0,0 +1,97 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 10:11:25 +0100
Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
bit_spin_locks break under RT.
Based on a previous patch from Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--
include/linux/buffer_head.h | 8 ++++++++
include/linux/jbd2.h | 24 ++++++++++++++++++++++++
2 files changed, 32 insertions(+)
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -77,6 +77,10 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
spinlock_t b_uptodate_lock;
+#if IS_ENABLED(CONFIG_JBD2)
+ spinlock_t b_state_lock;
+ spinlock_t b_journal_head_lock;
+#endif
#endif
};
@@ -108,6 +112,10 @@ static inline void buffer_head_init_lock
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
+#if IS_ENABLED(CONFIG_JBD2)
+ spin_lock_init(&bh->b_state_lock);
+ spin_lock_init(&bh->b_journal_head_lock);
+#endif
#endif
}
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -352,32 +352,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_State, &bh->b_state);
+#else
+ spin_lock(&bh->b_state_lock);
+#endif
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_trylock(BH_State, &bh->b_state);
+#else
+ return spin_trylock(&bh->b_state_lock);
+#endif
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
return bit_spin_is_locked(BH_State, &bh->b_state);
+#else
+ return spin_is_locked(&bh->b_state_lock);
+#endif
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_State, &bh->b_state);
+#else
+ spin_unlock(&bh->b_state_lock);
+#endif
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_lock(BH_JournalHead, &bh->b_state);
+#else
+ spin_lock(&bh->b_journal_head_lock);
+#endif
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
bit_spin_unlock(BH_JournalHead, &bh->b_state);
+#else
+ spin_unlock(&bh->b_journal_head_lock);
+#endif
}
#define J_ASSERT(assert) BUG_ON(!(assert))

View File

@ -0,0 +1,32 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 17 Feb 2014 17:30:03 +0100
Subject: fs: jbd2: pull your plug when waiting for space
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Two cps in parallel managed to stall the the ext4 fs. It seems that
journal code is either waiting for locks or sleeping waiting for
something to happen. This seems similar to what Mike observed on ext3,
here is his description:
|With an -rt kernel, and a heavy sync IO load, tasks can jam
|up on journal locks without unplugging, which can lead to
|terminal IO starvation. Unplug and schedule when waiting
|for space.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
fs/jbd2/checkpoint.c | 2 ++
1 file changed, 2 insertions(+)
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
+ if (current->plug)
+ io_schedule();
mutex_lock(&journal->j_checkpoint_mutex);
/*

View File

@ -0,0 +1,31 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 19 Jul 2009 08:44:27 -0500
Subject: fs: namespace preemption fix
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
On RT we cannot loop with preemption disabled here as
mnt_make_readonly() might have been preempted. We can safely enable
preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
as well.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/namespace.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -353,8 +353,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
+ preempt_enable();
cpu_relax();
+ preempt_disable();
+ }
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until

View File

@ -0,0 +1,60 @@
From: Mike Galbraith <efault@gmx.de>
Date: Fri, 3 Jul 2009 08:44:12 -0500
Subject: fs: ntfs: disable interrupt only on !RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
> * Nick Piggin <nickpiggin@yahoo.com.au> wrote:
>
> > > [10138.175796] [<c0105de3>] show_trace+0x12/0x14
> > > [10138.180291] [<c0105dfb>] dump_stack+0x16/0x18
> > > [10138.184769] [<c011609f>] native_smp_call_function_mask+0x138/0x13d
> > > [10138.191117] [<c0117606>] smp_call_function+0x1e/0x24
> > > [10138.196210] [<c012f85c>] on_each_cpu+0x25/0x50
> > > [10138.200807] [<c0115c74>] flush_tlb_all+0x1e/0x20
> > > [10138.205553] [<c016caaf>] kmap_high+0x1b6/0x417
> > > [10138.210118] [<c011ec88>] kmap+0x4d/0x4f
> > > [10138.214102] [<c026a9d8>] ntfs_end_buffer_async_read+0x228/0x2f9
> > > [10138.220163] [<c01a0e9e>] end_bio_bh_io_sync+0x26/0x3f
> > > [10138.225352] [<c01a2b09>] bio_endio+0x42/0x6d
> > > [10138.229769] [<c02c2a08>] __end_that_request_first+0x115/0x4ac
> > > [10138.235682] [<c02c2da7>] end_that_request_chunk+0x8/0xa
> > > [10138.241052] [<c0365943>] ide_end_request+0x55/0x10a
> > > [10138.246058] [<c036dae3>] ide_dma_intr+0x6f/0xac
> > > [10138.250727] [<c0366d83>] ide_intr+0x93/0x1e0
> > > [10138.255125] [<c015afb4>] handle_IRQ_event+0x5c/0xc9
> >
> > Looks like ntfs is kmap()ing from interrupt context. Should be using
> > kmap_atomic instead, I think.
>
> it's not atomic interrupt context but irq thread context - and -rt
> remaps kmap_atomic() to kmap() internally.
Hm. Looking at the change to mm/bounce.c, perhaps I should do this
instead?
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/ntfs/aops.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -143,13 +143,13 @@ static void ntfs_end_buffer_async_read(s
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);

View File

@ -0,0 +1,162 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 18 Mar 2011 09:18:52 +0100
Subject: buffer_head: Replace bh_uptodate_lock for -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Wrap the bit_spin_lock calls into a separate inline and add the RT
replacements with a real spinlock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/buffer.c | 21 +++++++--------------
fs/ntfs/aops.c | 10 +++-------
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 44 insertions(+), 21 deletions(-)
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -305,8 +305,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -319,8 +318,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors and they are all
@@ -332,9 +330,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/*
@@ -362,8 +358,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -375,15 +370,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
end_page_writeback(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3325,6 +3317,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(s
unlock_page(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/**
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -75,8 +75,42 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t b_uptodate_lock;
+#endif
};
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+ unsigned long flags;
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
+#else
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+#endif
+ return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
+ local_irq_restore(flags);
+#else
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+#endif
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spin_lock_init(&bh->b_uptodate_lock);
+#endif
+}
+
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.

View File

@ -0,0 +1,74 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 21:56:42 +0200
Subject: trace: Add migrate-disabled counter to tracing output
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/trace_events.h | 2 ++
kernel/trace/trace.c | 9 ++++++---
kernel/trace/trace_events.c | 2 ++
kernel/trace/trace_output.c | 5 +++++
4 files changed, 15 insertions(+), 3 deletions(-)
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -66,6 +66,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
+ unsigned short migrate_disable;
+ unsigned short padding;
};
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1663,6 +1663,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
@@ -2560,9 +2562,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
- "# |||| / delay \n"
- "# cmd pid ||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
+ "# |||| / _--=> migrate-disable\n"
+ "# ||||| / delay \n"
+ "# cmd pid |||||| time | caller \n"
+ "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -186,6 +186,8 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
+ __common_field(unsigned short, migrate_disable);
+ __common_field(unsigned short, padding);
return ret;
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -428,6 +428,11 @@ int trace_print_lat_fmt(struct trace_seq
else
trace_seq_putc(s, '.');
+ if (entry->migrate_disable)
+ trace_seq_printf(s, "%x", entry->migrate_disable);
+ else
+ trace_seq_putc(s, '.');
+
return !trace_seq_has_overflowed(s);
}

View File

@ -0,0 +1,114 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: futex: Fix bug on when a requeued RT task times out
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Requeue with timeout causes a bug with PREEMPT_RT_FULL.
The bug comes from a timed out condition.
TASK 1 TASK 2
------ ------
futex_wait_requeue_pi()
futex_wait_queue_me()
<timed out>
double_lock_hb();
raw_spin_lock(pi_lock);
if (current->pi_blocked_on) {
} else {
current->pi_blocked_on = PI_WAKE_INPROGRESS;
run_spin_unlock(pi_lock);
spin_lock(hb->lock); <-- blocked!
plist_for_each_entry_safe(this) {
rt_mutex_start_proxy_lock();
task_blocks_on_rt_mutex();
BUG_ON(task->pi_blocked_on)!!!!
The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
it will block and set the "pi_blocked_on" to the hb->lock.
When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
because the task1's pi_blocked_on is no longer set to that, but instead,
set to the hb->lock.
The fix:
When calling rt_mutex_start_proxy_lock() a check is made to see
if the proxy tasks pi_blocked_on is set. If so, exit out early.
Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
the proxy task that it is being requeued, and will handle things
appropriately.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
kernel/locking/rtmutex_common.h | 1 +
2 files changed, 32 insertions(+), 1 deletion(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -71,7 +71,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
- return waiter && waiter != PI_WAKEUP_INPROGRESS;
+ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
+ waiter != PI_REQUEUE_INPROGRESS;
}
/*
@@ -1631,6 +1632,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * In PREEMPT_RT there's an added race.
+ * If the task, that we are about to requeue, times out,
+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
+ * to skip this task. But right after the task sets
+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
+ * This will replace the PI_WAKEUP_INPROGRESS with the actual
+ * lock that it blocks on. We *must not* place this task
+ * on this proxy lock in that case.
+ *
+ * To prevent this race, we first take the task's pi_lock
+ * and check if it has updated its pi_blocked_on. If it has,
+ * we assume that it woke up and we return -EAGAIN.
+ * Otherwise, we set the task's pi_blocked_on to
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+ raw_spin_unlock_irq(&task->pi_lock);
+#endif
+
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
RT_MUTEX_FULL_CHAINWALK);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -98,6 +98,7 @@ enum rtmutex_chainwalk {
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,

View File

@ -0,0 +1,38 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:57 -0500
Subject: genirq: Disable irqpoll on -rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Creates long latencies for no value
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/irq/spurious.c | 8 ++++++++
1 file changed, 8 insertions(+)
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+ return 1;
+#endif
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+ return 1;
+#endif
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");

View File

@ -0,0 +1,145 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 21 Aug 2013 17:48:46 +0200
Subject: genirq: Do not invoke the affinity callback via a workqueue on RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
warning.
This patch moves the invokation into a process context so that we only
wakeup() a process while holding the lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/interrupt.h | 1
kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 77 insertions(+), 3 deletions(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -217,6 +217,7 @@ struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
+ struct list_head list;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -183,6 +183,62 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
+static struct task_struct *set_affinity_helper;
+static LIST_HEAD(affinity_list);
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
+
+static int set_affinity_thread(void *unused)
+{
+ while (1) {
+ struct irq_affinity_notify *notify;
+ int empty;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ raw_spin_lock_irq(&affinity_list_lock);
+ empty = list_empty(&affinity_list);
+ raw_spin_unlock_irq(&affinity_list_lock);
+
+ if (empty)
+ schedule();
+ if (kthread_should_stop())
+ break;
+ set_current_state(TASK_RUNNING);
+try_next:
+ notify = NULL;
+
+ raw_spin_lock_irq(&affinity_list_lock);
+ if (!list_empty(&affinity_list)) {
+ notify = list_first_entry(&affinity_list,
+ struct irq_affinity_notify, list);
+ list_del_init(&notify->list);
+ }
+ raw_spin_unlock_irq(&affinity_list_lock);
+
+ if (!notify)
+ continue;
+ _irq_affinity_notify(notify);
+ goto try_next;
+ }
+ return 0;
+}
+
+static void init_helper_thread(void)
+{
+ if (set_affinity_helper)
+ return;
+ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
+ "affinity-cb");
+ WARN_ON(IS_ERR(set_affinity_helper));
+}
+#else
+
+static inline void init_helper_thread(void) { }
+
+#endif
+
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
@@ -222,7 +278,17 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ raw_spin_lock(&affinity_list_lock);
+ if (list_empty(&desc->affinity_notify->list))
+ list_add_tail(&affinity_list,
+ &desc->affinity_notify->list);
+ raw_spin_unlock(&affinity_list_lock);
+ wake_up_process(set_affinity_helper);
+#else
schedule_work(&desc->affinity_notify->work);
+#endif
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -260,10 +326,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
-static void irq_affinity_notify(struct work_struct *work)
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
{
- struct irq_affinity_notify *notify =
- container_of(work, struct irq_affinity_notify, work);
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
@@ -285,6 +349,13 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ _irq_affinity_notify(notify);
+}
+
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
@@ -314,6 +385,8 @@ irq_set_affinity_notifier(unsigned int i
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
+ INIT_LIST_HEAD(&notify->list);
+ init_helper_thread();
}
raw_spin_lock_irqsave(&desc->lock, flags);

View File

@ -0,0 +1,49 @@
Subject: genirq: Force interrupt thread on RT
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 03 Apr 2011 11:57:29 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Force threaded_irqs and optimize the code (force_irqthreads) in regard
to this.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/interrupt.h | 6 +++++-
kernel/irq/manage.c | 2 ++
2 files changed, 7 insertions(+), 1 deletion(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -379,9 +379,13 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
extern bool force_irqthreads;
+# else
+# define force_irqthreads (true)
+# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads (false)
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,6 +22,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
+# endif
#endif
static void __synchronize_hardirq(struct irq_desc *desc)

View File

@ -0,0 +1,47 @@
From: Mike Galbraith <umgwanakikbuti@gmail.com>
Date: Tue, 24 Mar 2015 08:14:49 +0100
Subject: hotplug: Use set_cpus_allowed_ptr() in sync_unplug_thread()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
do_set_cpus_allowed() is not safe vs ->sched_class change.
crash> bt
PID: 11676 TASK: ffff88026f979da0 CPU: 22 COMMAND: "sync_unplug/22"
#0 [ffff880274d25bc8] machine_kexec at ffffffff8103b41c
#1 [ffff880274d25c18] crash_kexec at ffffffff810d881a
#2 [ffff880274d25cd8] oops_end at ffffffff81525818
#3 [ffff880274d25cf8] do_invalid_op at ffffffff81003096
#4 [ffff880274d25d90] invalid_op at ffffffff8152d3de
[exception RIP: set_cpus_allowed_rt+18]
RIP: ffffffff8109e012 RSP: ffff880274d25e48 RFLAGS: 00010202
RAX: ffffffff8109e000 RBX: ffff88026f979da0 RCX: ffff8802770cb6e8
RDX: 0000000000000000 RSI: ffffffff81add700 RDI: ffff88026f979da0
RBP: ffff880274d25e78 R8: ffffffff816112e0 R9: 0000000000000001
R10: 0000000000000001 R11: 0000000000011940 R12: ffff88026f979da0
R13: ffff8802770cb6d0 R14: ffff880274d25fd8 R15: 0000000000000000
ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
#5 [ffff880274d25e60] do_set_cpus_allowed at ffffffff8108e65f
#6 [ffff880274d25e80] sync_unplug_thread at ffffffff81058c08
#7 [ffff880274d25ed8] kthread at ffffffff8107cad6
#8 [ffff880274d25f50] ret_from_fork at ffffffff8152bbbc
crash> task_struct ffff88026f979da0 | grep class
sched_class = 0xffffffff816111e0 <fair_sched_class+64>,
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -268,7 +268,7 @@ static int sync_unplug_thread(void *data
* we don't want any more work on this CPU.
*/
current->flags &= ~PF_NO_SETAFFINITY;
- do_set_cpus_allowed(current, cpu_present_mask);
+ set_cpus_allowed_ptr(current, cpu_present_mask);
migrate_me();
return 0;
}

View File

@ -0,0 +1,205 @@
Subject: hotplug: Lightweight get online cpus
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 15 Jun 2011 12:36:06 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
get_online_cpus() is a heavy weight function which involves a global
mutex. migrate_disable() wants a simpler construct which prevents only
a CPU from going doing while a task is in a migrate disabled section.
Implement a per cpu lockless mechanism, which serializes only in the
real unplug case on a global mutex. That serialization affects only
tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 7 +--
kernel/cpu.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 122 insertions(+), 4 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -222,9 +222,6 @@ static inline void smpboot_thread_init(v
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
-static inline void pin_current_cpu(void) { }
-static inline void unpin_current_cpu(void) { }
-
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
@@ -234,6 +231,8 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
@@ -251,6 +250,8 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
+static inline void pin_current_cpu(void) { }
+static inline void unpin_current_cpu(void) { }
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -89,6 +89,100 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+ struct completion synced;
+};
+
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
+ *
+ * Lightweight version of get_online_cpus() to prevent cpu from being
+ * unplugged when code runs in a migration disabled region.
+ *
+ * Must be called with preemption disabled (preempt_count = 1)!
+ */
+void pin_current_cpu(void)
+{
+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
+
+retry:
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ hp->unplug == current) {
+ hp->refcount++;
+ return;
+ }
+ preempt_enable();
+ mutex_lock(&cpu_hotplug.lock);
+ mutex_unlock(&cpu_hotplug.lock);
+ preempt_disable();
+ goto retry;
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
+ *
+ * Must be called with preemption or interrupts disabled!
+ */
+void unpin_current_cpu(void)
+{
+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp);
+
+ WARN_ON(hp->refcount <= 0);
+
+ /* This is safe. sync_unplug_thread is pinned to this cpu */
+ if (!--hp->refcount && hp->unplug && hp->unplug != current)
+ wake_up_process(hp->unplug);
+}
+
+/*
+ * FIXME: Is this really correct under all circumstances ?
+ */
+static int sync_unplug_thread(void *data)
+{
+ struct hotplug_pcp *hp = data;
+
+ preempt_disable();
+ hp->unplug = current;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (hp->refcount) {
+ schedule_preempt_disabled();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ preempt_enable();
+ complete(&hp->synced);
+ return 0;
+}
+
+/*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+ */
+static int cpu_unplug_begin(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
+ wake_up_process(tsk);
+ wait_for_completion(&hp->synced);
+ return 0;
+}
+
+static void cpu_unplug_done(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ hp->unplug = NULL;
+}
void get_online_cpus(void)
{
@@ -338,13 +432,14 @@ static int take_cpu_down(void *_param)
/* Requires cpu_add_remove_lock to be held */
static int _cpu_down(unsigned int cpu, int tasks_frozen)
{
- int err, nr_calls = 0;
+ int mycpu, err, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct take_cpu_down_param tcd_param = {
.mod = mod,
.hcpu = hcpu,
};
+ cpumask_var_t cpumask;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -352,7 +447,27 @@ static int _cpu_down(unsigned int cpu, i
if (!cpu_online(cpu))
return -EINVAL;
+ /* Move the downtaker off the unplug cpu */
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+ preempt_disable();
+ mycpu = smp_processor_id();
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+ preempt_enable();
+ return -EBUSY;
+ }
+ preempt_enable();
+
cpu_hotplug_begin();
+ err = cpu_unplug_begin(cpu);
+ if (err) {
+ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
@@ -424,6 +539,8 @@ static int _cpu_down(unsigned int cpu, i
check_for_tasks(cpu);
out_release:
+ cpu_unplug_done(cpu);
+out_cancel:
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -0,0 +1,25 @@
Subject: hotplug: sync_unplug: No "\n" in task name
From: Yong Zhang <yong.zhang0@gmail.com>
Date: Sun, 16 Oct 2011 18:56:43 +0800
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Otherwise the output will look a little odd.
Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -168,7 +168,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);
- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
+ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(tsk))
return (PTR_ERR(tsk));
kthread_bind(tsk, cpu);

View File

@ -0,0 +1,40 @@
Subject: hotplug: Use migrate disable on unplug
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 17 Jul 2011 19:35:29 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Migration needs to be disabled accross the unplug handling to make
sure that the unplug thread is off the unplugged cpu.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/cpu.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -455,14 +455,13 @@ static int _cpu_down(unsigned int cpu, i
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
- preempt_disable();
+ migrate_disable();
mycpu = smp_processor_id();
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
- preempt_enable();
+ migrate_enable();
return -EBUSY;
}
- preempt_enable();
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
@@ -543,6 +542,7 @@ static int _cpu_down(unsigned int cpu, i
out_release:
cpu_unplug_done(cpu);
out_cancel:
+ migrate_enable();
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);

View File

@ -0,0 +1,118 @@
From: Yang Shi <yang.shi@windriver.com>
Date: Mon, 16 Sep 2013 14:09:19 -0700
Subject: hrtimer: Move schedule_work call to helper thread
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
When run ltp leapsec_timer test, the following call trace is caught:
BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: swapper/1
Preemption disabled at:[<ffffffff810857f3>] cpu_startup_entry+0x133/0x310
CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.10.10-rt3 #2
Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010
ffffffff81c2f800 ffff880076843e40 ffffffff8169918d ffff880076843e58
ffffffff8106db31 ffff88007684b4a0 ffff880076843e70 ffffffff8169d9c0
ffff88007684b4a0 ffff880076843eb0 ffffffff81059da1 0000001876851200
Call Trace:
<IRQ> [<ffffffff8169918d>] dump_stack+0x19/0x1b
[<ffffffff8106db31>] __might_sleep+0xf1/0x170
[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50
[<ffffffff81059da1>] queue_work_on+0x61/0x100
[<ffffffff81065aa1>] clock_was_set_delayed+0x21/0x30
[<ffffffff810883be>] do_timer+0x40e/0x660
[<ffffffff8108f487>] tick_do_update_jiffies64+0xf7/0x140
[<ffffffff8108fe42>] tick_check_idle+0x92/0xc0
[<ffffffff81044327>] irq_enter+0x57/0x70
[<ffffffff816a040e>] smp_apic_timer_interrupt+0x3e/0x9b
[<ffffffff8169f80a>] apic_timer_interrupt+0x6a/0x70
<EOI> [<ffffffff8155ea1c>] ? cpuidle_enter_state+0x4c/0xc0
[<ffffffff8155eb68>] cpuidle_idle_call+0xd8/0x2d0
[<ffffffff8100b59e>] arch_cpu_idle+0xe/0x30
[<ffffffff8108585e>] cpu_startup_entry+0x19e/0x310
[<ffffffff8168efa2>] start_secondary+0x1ad/0x1b0
The clock_was_set_delayed is called in hard IRQ handler (timer interrupt), which
calls schedule_work.
Under PREEMPT_RT_FULL, schedule_work calls spinlocks which could sleep, so it's
not safe to call schedule_work in interrupt context.
Reference upstream commit b68d61c705ef02384c0538b8d9374545097899ca
(rt,ntp: Move call to schedule_delayed_work() to helper thread)
from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git, which
makes a similar change.
add a helper thread which does the call to schedule_work and wake up that
thread instead of calling schedule_work directly.
Signed-off-by: Yang Shi <yang.shi@windriver.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/time/hrtimer.c | 40 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -48,6 +48,7 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <linux/timer.h>
+#include <linux/kthread.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
@@ -713,6 +714,44 @@ static void clock_was_set_work(struct wo
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * RT can not call schedule_work from real interrupt context.
+ * Need to make a thread to do the real work.
+ */
+static struct task_struct *clock_set_delay_thread;
+static bool do_clock_set_delay;
+
+static int run_clock_set_delay(void *ignore)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (do_clock_set_delay) {
+ do_clock_set_delay = false;
+ schedule_work(&hrtimer_work);
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+void clock_was_set_delayed(void)
+{
+ do_clock_set_delay = true;
+ /* Make visible before waking up process */
+ smp_wmb();
+ wake_up_process(clock_set_delay_thread);
+}
+
+static __init int create_clock_set_delay_thread(void)
+{
+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
+ BUG_ON(!clock_set_delay_thread);
+ return 0;
+}
+early_initcall(create_clock_set_delay_thread);
+#else /* PREEMPT_RT_FULL */
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* interrupt device on all cpus.
@@ -721,6 +760,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
+#endif
#else

View File

@ -0,0 +1,29 @@
From e35e67cb032e78055b63eae5a3a370664fabfc01 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 23 Dec 2015 20:57:41 +0100
Subject: [PATCH] hrtimer: enfore 64byte alignment
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
a list_head expired to struct hrtimer_clock_base and with it we run into
BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/hrtimer.h | 4 ----
1 file changed, 4 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -124,11 +124,7 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
-#ifdef CONFIG_64BIT
# define HRTIMER_CLOCK_BASE_ALIGN 64
-#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
-#endif
/**
* struct hrtimer_clock_base - the timer base for a specific clock

View File

@ -0,0 +1,326 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
In preempt-rt we can not call the callbacks which take sleeping locks
from the timer interrupt context.
Bring back the softirq split for now, until we fixed the signal
delivery problem for real.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
include/linux/hrtimer.h | 4 +
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
kernel/time/hrtimer.c | 142 +++++++++++++++++++++++++++++++++++++++++++----
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
6 files changed, 139 insertions(+), 11 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -102,6 +102,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
@@ -141,6 +143,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
+ struct list_head expired;
ktime_t (*get_time)(void);
ktime_t offset;
} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
@@ -184,6 +187,7 @@ struct hrtimer_cpu_base {
raw_spinlock_t lock;
seqcount_t seq;
struct hrtimer *running;
+ struct hrtimer *running_soft;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -438,6 +438,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
+ rq->hrtick_timer.irqsafe = 1;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwid
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.irqsafe = 1;
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -730,11 +730,8 @@ static inline int hrtimer_is_hres_enable
static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
+static inline void hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base) { }
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
@@ -883,7 +880,7 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
- if (base && base->cpu_base && !hrtimer_hres_active())
+ if (base && base->cpu_base && !timer->irqsafe)
wait_event(base->cpu_base->wait,
!(hrtimer_callback_running(timer)));
}
@@ -933,6 +930,11 @@ static void __remove_hrtimer(struct hrti
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
+ if (unlikely(!list_empty(&timer->cb_entry))) {
+ list_del_init(&timer->cb_entry);
+ return;
+ }
+
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
@@ -1162,6 +1164,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
+ INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1202,6 +1205,7 @@ bool hrtimer_active(const struct hrtimer
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
+ cpu_base->running_soft == timer ||
cpu_base->running == timer)
return true;
@@ -1292,10 +1296,111 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ int leftmost;
+
+ if (restart != HRTIMER_NORESTART &&
+ !(timer->state & HRTIMER_STATE_ENQUEUED)) {
+
+ leftmost = enqueue_hrtimer(timer, base);
+ if (!leftmost)
+ return;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (!hrtimer_is_hres_active(timer)) {
+ /*
+ * Kick to reschedule the next tick to handle the new timer
+ * on dynticks target.
+ */
+ if (base->cpu_base->nohz_active)
+ wake_up_nohz_cpu(base->cpu_base->cpu);
+ } else {
+
+ hrtimer_reprogram(timer, base);
+ }
+#endif
+ }
+}
+
+/*
+ * The changes in mainline which removed the callback modes from
+ * hrtimer are not yet working with -rt. The non wakeup_process()
+ * based callbacks which involve sleeping locks need to be treated
+ * seperately.
+ */
+static void hrtimer_rt_run_pending(void)
+{
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
+ struct hrtimer *timer;
+ int index, restart;
+
+ local_irq_disable();
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
+
+ raw_spin_lock(&cpu_base->lock);
+
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+ base = &cpu_base->clock_base[index];
+
+ while (!list_empty(&base->expired)) {
+ timer = list_first_entry(&base->expired,
+ struct hrtimer, cb_entry);
+
+ /*
+ * Same as the above __run_hrtimer function
+ * just we run with interrupts enabled.
+ */
+ debug_deactivate(timer);
+ cpu_base->running_soft = timer;
+ raw_write_seqcount_barrier(&cpu_base->seq);
+
+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+ restart = fn(timer);
+ raw_spin_lock_irq(&cpu_base->lock);
+
+ hrtimer_rt_reprogram(restart, timer, base);
+ raw_write_seqcount_barrier(&cpu_base->seq);
+
+ WARN_ON_ONCE(cpu_base->running_soft != timer);
+ cpu_base->running_soft = NULL;
+ }
+ }
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
+}
+
+static int hrtimer_rt_defer(struct hrtimer *timer)
+{
+ if (timer->irqsafe)
+ return 0;
+
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
+ return 1;
+}
+
+#else
+
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+
+#endif
+
+
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
unsigned int active = cpu_base->active_bases;
+ int raise = 0;
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
@@ -1335,15 +1440,20 @@ static void __hrtimer_run_queues(struct
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
- __run_hrtimer(cpu_base, base, timer, &basenow);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(cpu_base, base, timer, &basenow);
+ else
+ raise = 1;
}
}
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1481,8 +1591,6 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
-
- wake_up_timer_waiters(cpu_base);
}
/*
@@ -1504,6 +1612,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
+ sl->timer.irqsafe = 1;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1638,6 +1747,7 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
}
cpu_base->cpu = cpu;
@@ -1742,11 +1852,21 @@ static struct notifier_block hrtimers_nb
.notifier_call = hrtimer_cpu_notify,
};
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
+#endif
+
void __init hrtimers_init(void)
{
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+#endif
}
/**
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1105,6 +1105,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -507,6 +507,7 @@ static void watchdog_enable(unsigned int
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
+ hrtimer->irqsafe = 1;
/* Enable the perf event */
watchdog_nmi_enable(cpu);

View File

@ -0,0 +1,205 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:29:34 -0500
Subject: hrtimers: Prepare full preemption
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Make cancellation of a running callback in softirq context safe
against preemption.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/hrtimer.h | 12 +++++++++++-
kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
kernel/time/itimer.c | 1 +
kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
4 files changed, 77 insertions(+), 2 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -204,6 +204,9 @@ struct hrtimer_cpu_base {
unsigned int nr_hangs;
unsigned int max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
@@ -392,6 +395,13 @@ static inline void hrtimer_restart(struc
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
+/* Softirq preemption could deadlock timer removal */
+#ifdef CONFIG_PREEMPT_RT_BASE
+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
+#else
+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
+#endif
+
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
@@ -411,7 +421,7 @@ static inline int hrtimer_is_queued(stru
* Helper function to check, whether the timer is running the callback
* function
*/
-static inline int hrtimer_callback_running(struct hrtimer *timer)
+static inline int hrtimer_callback_running(const struct hrtimer *timer)
{
return timer->base->cpu_base->running == timer;
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -866,6 +866,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
+
+/**
+ * hrtimer_wait_for_timer - Wait for a running timer
+ *
+ * @timer: timer to wait for
+ *
+ * The function waits in case the timers callback function is
+ * currently executed on the waitqueue of the timer base. The
+ * waitqueue is woken up after the timer callback function has
+ * finished execution.
+ */
+void hrtimer_wait_for_timer(const struct hrtimer *timer)
+{
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base && base->cpu_base && !hrtimer_hres_active())
+ wait_event(base->cpu_base->wait,
+ !(hrtimer_callback_running(timer)));
+}
+
+#else
+# define wake_up_timer_waiters(b) do { } while (0)
+#endif
+
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
@@ -1076,7 +1102,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
- cpu_relax();
+ hrtimer_wait_for_timer(timer);
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1455,6 +1481,8 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
}
/*
@@ -1614,6 +1642,9 @@ static void init_hrtimers_cpu(int cpu)
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ init_waitqueue_head(&cpu_base->wait);
+#endif
}
#ifdef CONFIG_HOTPLUG_CPU
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
+ hrtimer_wait_for_timer(&tsk->signal->real_timer);
goto again;
}
expires = timeval_to_ktime(value->it_value);
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
return overrun;
}
+/*
+ * Protected by RCU!
+ */
+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (kc->timer_set == common_timer_set)
+ hrtimer_wait_for_timer(&timr->it.real.timer);
+ else
+ /* FIXME: Whacky hack for posix-cpu-timers */
+ schedule_timeout(1);
+#endif
+}
+
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
@@ -905,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
if (!timr)
return -EINVAL;
+ rcu_read_lock();
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
@@ -913,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
+ timer_wait_for_callback(kc, timr);
rtn = NULL; // We already got the old time...
+ rcu_read_unlock();
goto retry;
}
+ rcu_read_unlock();
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
@@ -953,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
if (!timer)
return -EINVAL;
+ rcu_read_lock();
if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
+ rcu_read_unlock();
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
@@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itime
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
+ /* On RT we can race with a deletion */
+ if (!timer->it_signal) {
+ unlock_timer(timer, flags);
+ return;
+ }
+
if (timer_delete_hook(timer) == TIMER_RETRY) {
+ rcu_read_lock();
unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
goto retry_delete;
}
list_del(&timer->list);

View File

@ -0,0 +1,26 @@
From: Mike Galbraith <bitbucket@online.de>
Date: Fri, 30 Aug 2013 07:57:25 +0200
Subject: hwlat-detector: Don't ignore threshold module parameter
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
If the user specified a threshold at module load time, use it.
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -414,7 +414,7 @@ static int init_stats(void)
goto out;
__reset_stats();
- data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */

View File

@ -0,0 +1,126 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:25 -0400
Subject: hwlat-detector: Update hwlat_detector to add outer loop detection
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The hwlat_detector reads two timestamps in a row, then reports any
gap between those calls. The problem is, it misses everything between
the second reading of the time stamp to the first reading of the time stamp
in the next loop. That's were most of the time is spent, which means,
chances are likely that it will miss all hardware latencies. This
defeats the purpose.
By also testing the first time stamp from the previous loop second
time stamp (the outer loop), we are more likely to find a latency.
Setting the threshold to 1, here's what the report now looks like:
1347415723.0232202770 0 2
1347415725.0234202822 0 2
1347415727.0236202875 0 2
1347415729.0238202928 0 2
1347415731.0240202980 0 2
1347415734.0243203061 0 2
1347415736.0245203113 0 2
1347415738.0247203166 2 0
1347415740.0249203219 0 3
1347415742.0251203272 0 3
1347415743.0252203299 0 3
1347415745.0254203351 0 2
1347415747.0256203404 0 2
1347415749.0258203457 0 2
1347415751.0260203510 0 2
1347415754.0263203589 0 2
1347415756.0265203642 0 2
1347415758.0267203695 0 2
1347415760.0269203748 0 2
1347415762.0271203801 0 2
1347415764.0273203853 2 0
There's some hardware latency that takes 2 microseconds to run.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------
1 file changed, 26 insertions(+), 6 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -143,6 +143,7 @@ static void detector_exit(void);
struct sample {
u64 seqnum; /* unique sequence */
u64 duration; /* ktime delta */
+ u64 outer_duration; /* ktime delta (outer loop) */
struct timespec timestamp; /* wall time */
unsigned long lost;
};
@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(
*/
static int get_sample(void *unused)
{
- ktime_t start, t1, t2;
+ ktime_t start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
+ u64 outer_sample = 0;
int ret = 1;
+ last_t2.tv64 = 0;
start = ktime_get(); /* start timestamp */
do {
@@ -231,7 +234,22 @@ static int get_sample(void *unused)
t1 = ktime_get(); /* we'll look for a discontinuity */
t2 = ktime_get();
+ if (last_t2.tv64) {
+ /* Check the delta from outer loop (t2 to next t1) */
+ diff = ktime_to_us(ktime_sub(t1, last_t2));
+ /* This shouldn't happen */
+ if (diff < 0) {
+ pr_err(BANNER "time running backwards\n");
+ goto out;
+ }
+ if (diff > outer_sample)
+ outer_sample = diff;
+ }
+ last_t2 = t2;
+
total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+
+ /* This checks the inner loop (t1 to t2) */
diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
/* This shouldn't happen */
@@ -246,12 +264,13 @@ static int get_sample(void *unused)
} while (total <= data.sample_width);
/* If we exceed the threshold value, we have found a hardware latency */
- if (sample > data.threshold) {
+ if (sample > data.threshold || outer_sample > data.threshold) {
struct sample s;
data.count++;
s.seqnum = data.count;
s.duration = sample;
+ s.outer_duration = outer_sample;
s.timestamp = CURRENT_TIME;
__buffer_add_sample(&s);
@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct
}
}
- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
- sample->timestamp.tv_sec,
- sample->timestamp.tv_nsec,
- sample->duration);
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
+ sample->timestamp.tv_sec,
+ sample->timestamp.tv_nsec,
+ sample->duration,
+ sample->outer_duration);
/* handling partial reads is more trouble than it's worth */

View File

@ -0,0 +1,184 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:27 -0400
Subject: hwlat-detector: Use thread instead of stop machine
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
There's no reason to use stop machine to search for hardware latency.
Simply disabling interrupts while running the loop will do enough to
check if something comes in that wasn't disabled by interrupts being
off, which is exactly what stop machine does.
Instead of using stop machine, just have the thread disable interrupts
while it checks for hardware latency.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 60 ++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 34 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -41,7 +41,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ring_buffer.h>
-#include <linux/stop_machine.h>
#include <linux/time.h>
#include <linux/hrtimer.h>
#include <linux/kthread.h>
@@ -107,7 +106,6 @@ struct data; /* Global state */
/* Sampling functions */
static int __buffer_add_sample(struct sample *sample);
static struct sample *buffer_get_sample(struct sample *sample);
-static int get_sample(void *unused);
/* Threading and state */
static int kthread_fn(void *unused);
@@ -149,7 +147,7 @@ struct sample {
unsigned long lost;
};
-/* keep the global state somewhere. Mostly used under stop_machine. */
+/* keep the global state somewhere. */
static struct data {
struct mutex lock; /* protect changes */
@@ -172,7 +170,7 @@ static struct data {
* @sample: The new latency sample value
*
* This receives a new latency sample and records it in a global ring buffer.
- * No additional locking is used in this case - suited for stop_machine use.
+ * No additional locking is used in this case.
*/
static int __buffer_add_sample(struct sample *sample)
{
@@ -229,18 +227,18 @@ static struct sample *buffer_get_sample(
#endif
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
- * @unused: This is not used but is a part of the stop_machine API
*
* Used to repeatedly capture the CPU TSC (or similar), looking for potential
- * hardware-induced latency. Called under stop_machine, with data.lock held.
+ * hardware-induced latency. Called with interrupts disabled and with
+ * data.lock held.
*/
-static int get_sample(void *unused)
+static int get_sample(void)
{
time_type start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
u64 outer_sample = 0;
- int ret = 1;
+ int ret = -1;
init_time(last_t2, 0);
start = time_get(); /* start timestamp */
@@ -279,10 +277,14 @@ static int get_sample(void *unused)
} while (total <= data.sample_width);
+ ret = 0;
+
/* If we exceed the threshold value, we have found a hardware latency */
if (sample > data.threshold || outer_sample > data.threshold) {
struct sample s;
+ ret = 1;
+
data.count++;
s.seqnum = data.count;
s.duration = sample;
@@ -295,7 +297,6 @@ static int get_sample(void *unused)
data.max_sample = sample;
}
- ret = 0;
out:
return ret;
}
@@ -305,32 +306,30 @@ static int get_sample(void *unused)
* @unused: A required part of the kthread API.
*
* Used to periodically sample the CPU TSC via a call to get_sample. We
- * use stop_machine, whith does (intentionally) introduce latency since we
+ * disable interrupts, which does (intentionally) introduce latency since we
* need to ensure nothing else might be running (and thus pre-empting).
* Obviously this should never be used in production environments.
*
- * stop_machine will schedule us typically only on CPU0 which is fine for
- * almost every real-world hardware latency situation - but we might later
- * generalize this if we find there are any actualy systems with alternate
- * SMI delivery or other non CPU0 hardware latencies.
+ * Currently this runs on which ever CPU it was scheduled on, but most
+ * real-worald hardware latency situations occur across several CPUs,
+ * but we might later generalize this if we find there are any actualy
+ * systems with alternate SMI delivery or other hardware latencies.
*/
static int kthread_fn(void *unused)
{
- int err = 0;
- u64 interval = 0;
+ int ret;
+ u64 interval;
while (!kthread_should_stop()) {
mutex_lock(&data.lock);
- err = stop_machine(get_sample, unused, 0);
- if (err) {
- /* Houston, we have a problem */
- mutex_unlock(&data.lock);
- goto err_out;
- }
+ local_irq_disable();
+ ret = get_sample();
+ local_irq_enable();
- wake_up(&data.wq); /* wake up reader(s) */
+ if (ret > 0)
+ wake_up(&data.wq); /* wake up reader(s) */
interval = data.sample_window - data.sample_width;
do_div(interval, USEC_PER_MSEC); /* modifies interval value */
@@ -338,15 +337,10 @@ static int kthread_fn(void *unused)
mutex_unlock(&data.lock);
if (msleep_interruptible(interval))
- goto out;
+ break;
}
- goto out;
-err_out:
- pr_err(BANNER "could not call stop_machine, disabling\n");
- enabled = 0;
-out:
- return err;
+ return 0;
}
/**
@@ -442,8 +436,7 @@ static int init_stats(void)
* This function provides a generic read implementation for the global state
* "data" structure debugfs filesystem entries. It would be nice to use
* simple_attr_read directly, but we need to make sure that the data.lock
- * spinlock is held during the actual read (even though we likely won't ever
- * actually race here as the updater runs under a stop_machine context).
+ * is held during the actual read.
*/
static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos, const u64 *entry)
@@ -478,8 +471,7 @@ static ssize_t simple_data_read(struct f
* This function provides a generic write implementation for the global state
* "data" structure debugfs filesystem entries. It would be nice to use
* simple_attr_write directly, but we need to make sure that the data.lock
- * spinlock is held during the actual write (even though we likely won't ever
- * actually race here as the updater runs under a stop_machine context).
+ * is held during the actual write.
*/
static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos, u64 *entry)

View File

@ -0,0 +1,93 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Mon, 19 Aug 2013 17:33:26 -0400
Subject: hwlat-detector: Use trace_clock_local if available
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
As ktime_get() calls into the timing code which does a read_seq(), it
may be affected by other CPUS that touch that lock. To remove this
dependency, use the trace_clock_local() which is already exported
for module use. If CONFIG_TRACING is enabled, use that as the clock,
otherwise use ktime_get().
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
--- a/drivers/misc/hwlat_detector.c
+++ b/drivers/misc/hwlat_detector.c
@@ -51,6 +51,7 @@
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/trace_clock.h>
#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(
return sample;
}
+#ifndef CONFIG_TRACING
+#define time_type ktime_t
+#define time_get() ktime_get()
+#define time_to_us(x) ktime_to_us(x)
+#define time_sub(a, b) ktime_sub(a, b)
+#define init_time(a, b) (a).tv64 = b
+#define time_u64(a) ((a).tv64)
+#else
+#define time_type u64
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+#define init_time(a, b) (a = b)
+#define time_u64(a) a
+#endif
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
* @unused: This is not used but is a part of the stop_machine API
@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(
*/
static int get_sample(void *unused)
{
- ktime_t start, t1, t2, last_t2;
+ time_type start, t1, t2, last_t2;
s64 diff, total = 0;
u64 sample = 0;
u64 outer_sample = 0;
int ret = 1;
- last_t2.tv64 = 0;
- start = ktime_get(); /* start timestamp */
+ init_time(last_t2, 0);
+ start = time_get(); /* start timestamp */
do {
- t1 = ktime_get(); /* we'll look for a discontinuity */
- t2 = ktime_get();
+ t1 = time_get(); /* we'll look for a discontinuity */
+ t2 = time_get();
- if (last_t2.tv64) {
+ if (time_u64(last_t2)) {
/* Check the delta from outer loop (t2 to next t1) */
- diff = ktime_to_us(ktime_sub(t1, last_t2));
+ diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
if (diff < 0) {
pr_err(BANNER "time running backwards\n");
@@ -247,10 +263,10 @@ static int get_sample(void *unused)
}
last_t2 = t2;
- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+ total = time_to_us(time_sub(t2, start)); /* sample width */
/* This checks the inner loop (t1 to t2) */
- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
+ diff = time_to_us(time_sub(t2, t1)); /* current diff */
/* This shouldn't happen */
if (diff < 0) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 21 Mar 2013 11:35:49 +0100
Subject: i2c/omap: drop the lock hard irq context
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The lock is taken while reading two registers. On RT the first lock is
taken in hard irq where it might sleep and in the threaded irq.
The threaded irq runs in oneshot mode so the hard irq does not run until
the thread the completes so there is no reason to grab the lock.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/i2c/busses/i2c-omap.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -995,15 +995,12 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
- spin_lock(&omap->lock);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
- spin_unlock(&omap->lock);
-
return ret;
}

View File

@ -0,0 +1,30 @@
From: Clark Williams <williams@redhat.com>
Date: Tue, 26 May 2015 10:43:43 -0500
Subject: i915: bogus warning from i915 when running on PREEMPT_RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
handler, which whines constanly on the RT kernel (since the interrupt
is actually handled in a threaded handler and not actual interrupt
context).
Change the WARN_ON to WARN_ON_NORT
Tested-by: Joakim Hernberg <jhernberg@alchemy.lu>
Signed-off-by: Clark Williams <williams@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/intel_display.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11366,7 +11366,7 @@ void intel_check_page_flip(struct drm_de
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- WARN_ON(!in_interrupt());
+ WARN_ON_NONRT(!in_interrupt());
if (crtc == NULL)
return;

View File

@ -0,0 +1,24 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: gpu/i915: don't open code these things
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The opencode part is gone in 1f83fee0 ("drm/i915: clear up wedged transitions")
the owner check is still there.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mu
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */

View File

@ -0,0 +1,170 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
Subject: ide: Do not disable interrupts for PREEMPT-RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Use the local_irq_*_nort variants.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/ide/alim15x3.c | 4 ++--
drivers/ide/hpt366.c | 4 ++--
drivers/ide/ide-io-std.c | 8 ++++----
drivers/ide/ide-io.c | 2 +-
drivers/ide/ide-iops.c | 4 ++--
drivers/ide/ide-probe.c | 4 ++--
drivers/ide/ide-taskfile.c | 6 +++---
7 files changed, 16 insertions(+), 16 deletions(-)
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
if (m5229_revision < 0xC2) {
/*
@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p
}
pci_dev_put(north);
pci_dev_put(isa_dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return 0;
}
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -1241,7 +1241,7 @@ static int init_dma_hpt366(ide_hwif_t *h
dma_old = inb(base + 2);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
dma_new = dma_old;
pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
@@ -1252,7 +1252,7 @@ static int init_dma_hpt366(ide_hwif_t *h
if (dma_new != dma_old)
outb(dma_new, base + 2);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
--- a/drivers/ide/ide-io-std.c
+++ b/drivers/ide/ide-io-std.c
@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
if (((len + 1) & 3) < 2)
return;
@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
ata_vlb_sync(io_ports->nsect_addr);
}
@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive,
outsl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
if (((len + 1) & 3) < 2)
return;
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
/* local CPU only, as if we were handling an interrupt */
- local_irq_disable();
+ local_irq_disable_nort();
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
if ((stat & ATA_BUSY) == 0)
break;
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
*rstat = stat;
return -EBUSY;
}
}
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
}
/*
* Allow status to settle, then read it again.
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri
int bswap = 1;
/* local CPU only; some systems need this */
- local_irq_save(flags);
+ local_irq_save_nort(flags);
/* read 512 bytes of id info */
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
page_is_high = PageHighMem(page);
if (page_is_high)
- local_irq_save(flags);
+ local_irq_save_nort(flags);
buf = kmap_atomic(page) + offset;
@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
kunmap_atomic(buf);
if (page_is_high)
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
len -= nr_bytes;
}
@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
- local_irq_disable();
+ local_irq_disable_nort();
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);

View File

@ -0,0 +1,124 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 14 Jul 2015 14:26:34 +0200
Subject: idr: Use local lock instead of preempt enable/disable
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
We need to protect the per cpu variable and prevent migration.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/idr.h | 4 ++++
lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------
2 files changed, 41 insertions(+), 6 deletions(-)
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
+void idr_preload_end(void);
+#else
static inline void idr_preload_end(void)
{
preempt_enable();
}
+#endif
/**
* idr_find - return pointer for given id
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -30,6 +30,7 @@
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
+#include <linux/locallock.h>
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *
static DEFINE_PER_CPU(int, idr_preload_cnt);
static DEFINE_SPINLOCK(simple_ida_lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
+
+static inline void idr_preload_lock(void)
+{
+ local_lock(idr_lock);
+}
+
+static inline void idr_preload_unlock(void)
+{
+ local_unlock(idr_lock);
+}
+
+void idr_preload_end(void)
+{
+ idr_preload_unlock();
+}
+EXPORT_SYMBOL(idr_preload_end);
+#else
+static inline void idr_preload_lock(void)
+{
+ preempt_disable();
+}
+
+static inline void idr_preload_unlock(void)
+{
+ preempt_enable();
+}
+#endif
+
+
/* the maximum ID which can be allocated given idr->layers */
static int idr_max(int layers)
{
@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc
* context. See idr_preload() for details.
*/
if (!in_interrupt()) {
- preempt_disable();
+ idr_preload_lock();
new = __this_cpu_read(idr_preload_head);
if (new) {
__this_cpu_write(idr_preload_head, new->ary[0]);
__this_cpu_dec(idr_preload_cnt);
new->ary[0] = NULL;
}
- preempt_enable();
+ idr_preload_unlock();
if (new)
return new;
}
@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *id
idr_mark_full(pa, id);
}
-
/**
* idr_preload - preload for idr_alloc()
* @gfp_mask: allocation mask to use for preloading
@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
WARN_ON_ONCE(in_interrupt());
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
- preempt_disable();
+ idr_preload_lock();
/*
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
struct idr_layer *new;
- preempt_enable();
+ idr_preload_unlock();
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
- preempt_disable();
+ idr_preload_lock();
if (!new)
break;

View File

@ -0,0 +1,41 @@
From: Sven-Thorsten Dietrich <sdietrich@novell.com>
Date: Fri, 3 Jul 2009 08:30:35 -0500
Subject: infiniband: Mellanox IB driver patch use _nort() primitives
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
Kernel.
Michael S. Tsirkin <mst@dev.mellanox.co.il> sayeth:
"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable
interrupts for non-raw spinlocks, I think all of infiniband will be fine without
changes."
Signed-off-by: Sven-Thorsten Dietrich <sven@thebigcorporation.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -847,7 +847,7 @@ void ipoib_mcast_restart_task(struct wor
ipoib_dbg_mcast(priv, "restarting multicast task\n");
- local_irq_save(flags);
+ local_irq_save_nort(flags);
netif_addr_lock(dev);
spin_lock(&priv->lock);
@@ -929,7 +929,7 @@ void ipoib_mcast_restart_task(struct wor
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
/*
* make sure the in-flight joins have finished before we attempt

View File

@ -0,0 +1,61 @@
From: Ingo Molnar <mingo@elte.hu>
Date: Fri, 3 Jul 2009 08:30:16 -0500
Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Use the _nort() primitives.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/input/gameport/gameport.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -91,13 +91,13 @@ static int gameport_measure_speed(struct
tx = ~0;
for (i = 0; i < 50; i++) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
t1 = ktime_get_ns();
for (t = 0; t < 50; t++)
gameport_read(gameport);
t2 = ktime_get_ns();
t3 = ktime_get_ns();
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
udelay(i * 10);
t = (t2 - t1) - (t3 - t2);
if (t < tx)
@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(st
tx = 1 << 30;
for(i = 0; i < 50; i++) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
GET_TIME(t1);
for (t = 0; t < 50; t++) gameport_read(gameport);
GET_TIME(t2);
GET_TIME(t3);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
udelay(i * 10);
if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
}
@@ -148,11 +148,11 @@ static int old_gameport_measure_speed(st
tx = 1 << 30;
for(i = 0; i < 50; i++) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
t1 = rdtsc();
for (t = 0; t < 50; t++) gameport_read(gameport);
t2 = rdtsc();
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}

View File

@ -0,0 +1,347 @@
Subject: Intrduce migrate_disable() + cpu_light()
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 17 Jun 2011 15:42:38 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Introduce migrate_disable(). The task can't be pushed to another CPU but can
be preempted.
From: Peter Zijlstra <a.p.zijlstra@chello.nl>:
|Make migrate_disable() be a preempt_disable() for !rt kernels. This
|allows generic code to use it but still enforces that these code
|sections stay relatively small.
|
|A preemptible migrate_disable() accessible for general use would allow
|people growing arbitrary per-cpu crap instead of clean these things
|up.
From: Steven Rostedt <rostedt@goodmis.org>
| The migrate_disable() can cause a bit of a overhead to the RT kernel,
| as changing the affinity is expensive to do at every lock encountered.
| As a running task can not migrate, the actual disabling of migration
| does not need to occur until the task is about to schedule out.
|
| In most cases, a task that disables migration will enable it before
| it schedules making this change improve performance tremendously.
On top of this build get/put_cpu_light(). It is similar to get_cpu():
it uses migrate_disable() instead of preempt_disable(). That means the user
remains on the same CPU but the function using it may be preempted and
invoked again from another caller on the same CPU.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/cpu.h | 3 +
include/linux/preempt.h | 9 +++
include/linux/sched.h | 29 +++++++++-
include/linux/smp.h | 3 +
kernel/sched/core.c | 132 +++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 7 ++
lib/smp_processor_id.c | 5 +
7 files changed, 182 insertions(+), 6 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -222,6 +222,9 @@ static inline void smpboot_thread_init(v
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
+static inline void pin_current_cpu(void) { }
+static inline void unpin_current_cpu(void) { }
+
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -257,11 +257,20 @@ do { \
# define preempt_enable_rt() preempt_enable()
# define preempt_disable_nort() barrier()
# define preempt_enable_nort() barrier()
+# ifdef CONFIG_SMP
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+# else /* CONFIG_SMP */
+# define migrate_disable() barrier()
+# define migrate_enable() barrier()
+# endif /* CONFIG_SMP */
#else
# define preempt_disable_rt() barrier()
# define preempt_enable_rt() barrier()
# define preempt_disable_nort() preempt_disable()
# define preempt_enable_nort() preempt_enable()
+# define migrate_disable() preempt_disable()
+# define migrate_enable() preempt_enable()
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1413,6 +1413,12 @@ struct task_struct {
#endif
unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+# endif
+#endif
int nr_cpus_allowed;
cpumask_t cpus_allowed;
@@ -1836,9 +1842,6 @@ extern int arch_task_struct_size __read_
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
@@ -3114,6 +3117,26 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
+static inline int __migrate_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ return p->migrate_disable;
+#else
+ return 0;
+#endif
+}
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (p->migrate_disable)
+ return cpumask_of(task_cpu(p));
+#endif
+
+ return &p->cpus_allowed;
+}
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -185,6 +185,9 @@ static inline void smp_init(void) { }
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+#define put_cpu_light() migrate_enable()
+
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1164,6 +1164,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
+#else
+static inline void update_migrate_disable(struct task_struct *p) { }
+#define migrate_disabled_updated(p) 0
+#endif
+
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
struct rq *rq = task_rq(p);
@@ -1171,6 +1180,11 @@ void do_set_cpus_allowed(struct task_str
lockdep_assert_held(&p->pi_lock);
+ if (migrate_disabled_updated(p)) {
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ return;
+ }
+
queued = task_on_rq_queued(p);
running = task_current(rq, p);
@@ -1232,7 +1246,7 @@ static int __set_cpus_allowed_ptr(struct
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
@@ -3022,6 +3036,120 @@ static inline void schedule_debug(struct
schedstat_inc(this_rq(), sched_count);
}
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+
+static inline void update_migrate_disable(struct task_struct *p)
+{
+ const struct cpumask *mask;
+
+ if (likely(!p->migrate_disable))
+ return;
+
+ /* Did we already update affinity? */
+ if (unlikely(migrate_disabled_updated(p)))
+ return;
+
+ /*
+ * Since this is always current we can get away with only locking
+ * rq->lock, the ->cpus_allowed value can normally only be changed
+ * while holding both p->pi_lock and rq->lock, but seeing that this
+ * is current, we cannot actually be waking up, so all code that
+ * relies on serialization against p->pi_lock is out of scope.
+ *
+ * Having rq->lock serializes us against things like
+ * set_cpus_allowed_ptr() that can still happen concurrently.
+ */
+ mask = tsk_cpus_allowed(p);
+
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */
+ p->nr_cpus_allowed = 1;
+
+ /* Let migrate_enable know to fix things back up */
+ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+}
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+ return;
+ }
+
+ preempt_disable();
+ pin_current_cpu();
+ p->migrate_disable = 1;
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+ const struct cpumask *mask;
+ unsigned long flags;
+ struct rq *rq;
+
+ if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
+#ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+#endif
+ return;
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(p->migrate_disable_atomic);
+#endif
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ if (migrate_disable_count(p) > 1) {
+ p->migrate_disable--;
+ return;
+ }
+
+ preempt_disable();
+ if (unlikely(migrate_disabled_updated(p))) {
+ /*
+ * Undo whatever update_migrate_disable() did, also see there
+ * about locking.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * Clearing migrate_disable causes tsk_cpus_allowed to
+ * show the tasks original cpu affinity.
+ */
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+ do_set_cpus_allowed(p, mask);
+
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ } else
+ p->migrate_disable = 0;
+
+ unpin_current_cpu();
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
+#endif
+
/*
* Pick up the highest-prio task:
*/
@@ -3137,6 +3265,8 @@ static void __sched notrace __schedule(b
raw_spin_lock_irq(&rq->lock);
lockdep_pin_lock(&rq->lock);
+ update_migrate_disable(prev);
+
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -251,6 +251,9 @@ void print_rt_rq(struct seq_file *m, int
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
+#ifdef CONFIG_SMP
+ P(rt_nr_migratory);
+#endif
#undef PN
#undef P
@@ -635,6 +638,10 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ P(migrate_disable);
+#endif
+ P(nr_cpus_allowed);
#undef PN
#undef __PN
#undef P
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -39,8 +39,9 @@ notrace static unsigned int check_preemp
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
- what1, what2, preempt_count() - 1, current->comm, current->pid);
+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
+ what1, what2, preempt_count() - 1, __migrate_disabled(current),
+ current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();

View File

@ -0,0 +1,229 @@
From 9a69dce752915917ecfe06a21f9c826c76f6eb07 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 30 Oct 2015 11:59:07 +0100
Subject: [PATCH] ipc/msg: Implement lockless pipelined wakeups
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
This patch moves the wakeup_process() invocation so it is not done under
the perm->lock by making use of a lockless wake_q. With this change, the
waiter is woken up once the message has been assigned and it does not
need to loop on SMP if the message points to NULL. In the signal case we
still need to check the pointer under the lock to verify the state.
This change should also avoid the introduction of preempt_disable() in
-RT which avoids a busy-loop which pools for the NULL -> !NULL
change if the waiter has a higher priority compared to the waker.
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: George Spelvin <linux@horizon.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
ipc/msg.c | 101 +++++++++++++++++---------------------------------------------
1 file changed, 28 insertions(+), 73 deletions(-)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -183,20 +183,14 @@ static void ss_wakeup(struct list_head *
}
}
-static void expunge_all(struct msg_queue *msq, int res)
+static void expunge_all(struct msg_queue *msq, int res,
+ struct wake_q_head *wake_q)
{
struct msg_receiver *msr, *t;
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
- msr->r_msg = NULL; /* initialize expunge ordering */
- wake_up_process(msr->r_tsk);
- /*
- * Ensure that the wakeup is visible before setting r_msg as
- * the receiving end depends on it: either spinning on a nil,
- * or dealing with -EAGAIN cases. See lockless receive part 1
- * and 2 in do_msgrcv().
- */
- smp_wmb(); /* barrier (B) */
+
+ wake_q_add(wake_q, msr->r_tsk);
msr->r_msg = ERR_PTR(res);
}
}
@@ -213,11 +207,13 @@ static void freeque(struct ipc_namespace
{
struct msg_msg *msg, *t;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
+ WAKE_Q(wake_q);
- expunge_all(msq, -EIDRM);
+ expunge_all(msq, -EIDRM, &wake_q);
ss_wakeup(&msq->q_senders, 1);
msg_rmid(ns, msq);
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
rcu_read_unlock();
list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
@@ -342,6 +338,7 @@ static int msgctl_down(struct ipc_namesp
struct kern_ipc_perm *ipcp;
struct msqid64_ds uninitialized_var(msqid64);
struct msg_queue *msq;
+ WAKE_Q(wake_q);
int err;
if (cmd == IPC_SET) {
@@ -389,7 +386,7 @@ static int msgctl_down(struct ipc_namesp
/* sleeping receivers might be excluded by
* stricter permissions.
*/
- expunge_all(msq, -EAGAIN);
+ expunge_all(msq, -EAGAIN, &wake_q);
/* sleeping senders might be able to send
* due to a larger queue size.
*/
@@ -402,6 +399,7 @@ static int msgctl_down(struct ipc_namesp
out_unlock0:
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
out_unlock1:
rcu_read_unlock();
out_up:
@@ -566,7 +564,8 @@ static int testmsg(struct msg_msg *msg,
return 0;
}
-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
+ struct wake_q_head *wake_q)
{
struct msg_receiver *msr, *t;
@@ -577,27 +576,13 @@ static inline int pipelined_send(struct
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
- /* initialize pipelined send ordering */
- msr->r_msg = NULL;
- wake_up_process(msr->r_tsk);
- /* barrier (B) see barrier comment below */
- smp_wmb();
+ wake_q_add(wake_q, msr->r_tsk);
msr->r_msg = ERR_PTR(-E2BIG);
} else {
- msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
- wake_up_process(msr->r_tsk);
- /*
- * Ensure that the wakeup is visible before
- * setting r_msg, as the receiving can otherwise
- * exit - once r_msg is set, the receiver can
- * continue. See lockless receive part 1 and 2
- * in do_msgrcv(). Barrier (B).
- */
- smp_wmb();
+ wake_q_add(wake_q, msr->r_tsk);
msr->r_msg = msg;
-
return 1;
}
}
@@ -613,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, vo
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
+ WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns;
@@ -698,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, vo
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
- if (!pipelined_send(msq, msg)) {
+ if (!pipelined_send(msq, msg, &wake_q)) {
/* no one is waiting for this message, enqueue it */
list_add_tail(&msg->m_list, &msq->q_messages);
msq->q_cbytes += msgsz;
@@ -712,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, vo
out_unlock0:
ipc_unlock_object(&msq->q_perm);
+ wake_up_q(&wake_q);
out_unlock1:
rcu_read_unlock();
if (msg != NULL)
@@ -932,57 +919,25 @@ long do_msgrcv(int msqid, void __user *b
rcu_read_lock();
/* Lockless receive, part 2:
- * Wait until pipelined_send or expunge_all are outside of
- * wake_up_process(). There is a race with exit(), see
- * ipc/mqueue.c for the details. The correct serialization
- * ensures that a receiver cannot continue without the wakeup
- * being visibible _before_ setting r_msg:
+ * The work in pipelined_send() and expunge_all():
+ * - Set pointer to message
+ * - Queue the receiver task for later wakeup
+ * - Wake up the process after the lock is dropped.
*
- * CPU 0 CPU 1
- * <loop receiver>
- * smp_rmb(); (A) <-- pair -. <waker thread>
- * <load ->r_msg> | msr->r_msg = NULL;
- * | wake_up_process();
- * <continue> `------> smp_wmb(); (B)
- * msr->r_msg = msg;
- *
- * Where (A) orders the message value read and where (B) orders
- * the write to the r_msg -- done in both pipelined_send and
- * expunge_all.
+ * Should the process wake up before this wakeup (due to a
+ * signal) it will either see the message and continue …
*/
- for (;;) {
- /*
- * Pairs with writer barrier in pipelined_send
- * or expunge_all.
- */
- smp_rmb(); /* barrier (A) */
- msg = (struct msg_msg *)msr_d.r_msg;
- if (msg)
- break;
- /*
- * The cpu_relax() call is a compiler barrier
- * which forces everything in this loop to be
- * re-loaded.
- */
- cpu_relax();
- }
-
- /* Lockless receive, part 3:
- * If there is a message or an error then accept it without
- * locking.
- */
+ msg = (struct msg_msg *)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock1;
- /* Lockless receive, part 3:
- * Acquire the queue spinlock.
- */
+ /*
+ * … or see -EAGAIN, acquire the lock to check the message
+ * again.
+ */
ipc_lock_object(&msq->q_perm);
- /* Lockless receive, part 4:
- * Repeat test after acquiring the spinlock.
- */
msg = (struct msg_msg *)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock0;

View File

@ -0,0 +1,70 @@
Subject: ipc/sem: Rework semaphore wakeups
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 14 Sep 2011 11:57:04 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Current sysv sems have a weird ass wakeup scheme that involves keeping
preemption disabled over a potential O(n^2) loop and busy waiting on
that on other CPUs.
Kill this and simply wake the task directly from under the sem_lock.
This was discovered by a migrate_disable() debug feature that
disallows:
spin_lock();
preempt_disable();
spin_unlock()
preempt_enable();
Cc: Manfred Spraul <manfred@colorfullife.com>
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Reported-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Manfred Spraul <manfred@colorfullife.com>
Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
ipc/sem.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -690,6 +690,13 @@ static int perform_atomic_semop(struct s
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct task_struct *p = q->sleeper;
+ get_task_struct(p);
+ q->status = error;
+ wake_up_process(p);
+ put_task_struct(p);
+#else
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
@@ -701,6 +708,7 @@ static void wake_up_sem_queue_prepare(st
q->pid = error;
list_add_tail(&q->list, pt);
+#endif
}
/**
@@ -714,6 +722,7 @@ static void wake_up_sem_queue_prepare(st
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
+#ifndef CONFIG_PREEMPT_RT_BASE
struct sem_queue *q, *t;
int did_something;
@@ -726,6 +735,7 @@ static void wake_up_sem_queue_do(struct
}
if (did_something)
preempt_enable();
+#endif
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)

View File

@ -0,0 +1,147 @@
Subject: genirq: Allow disabling of softirq processing in irq thread context
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 31 Jan 2012 13:01:27 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
The processing of softirqs in irq thread context is a performance gain
for the non-rt workloads of a system, but it's counterproductive for
interrupts which are explicitely related to the realtime
workload. Allow such interrupts to prevent softirq processing in their
thread context.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/interrupt.h | 2 ++
include/linux/irq.h | 4 +++-
kernel/irq/manage.c | 13 ++++++++++++-
kernel/irq/settings.h | 12 ++++++++++++
kernel/softirq.c | 9 +++++++++
5 files changed, 38 insertions(+), 2 deletions(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -61,6 +61,7 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -74,6 +75,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -72,6 +72,7 @@ enum irqchip_irq_state;
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
* mechanism and from core side polling.
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
*/
enum {
@@ -99,13 +100,14 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
+ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -940,7 +940,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
- local_bh_enable();
+ /*
+ * Interrupts which have real time requirements can be set up
+ * to avoid softirq processing in the thread handler. This is
+ * safe as these interrupts do not raise soft interrupts.
+ */
+ if (irq_settings_no_softirq_call(desc))
+ _local_bh_enable();
+ else
+ local_bh_enable();
return ret;
}
@@ -1390,6 +1398,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
+ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
+ irq_settings_set_no_softirq_call(desc);
+
/* Set default affinity mask once everything is setup */
setup_affinity(desc, mask);
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -16,6 +16,7 @@ enum {
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
@@ -30,6 +31,7 @@ enum {
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
+}
+
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
+}
+
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -576,6 +576,15 @@ void __local_bh_enable(void)
}
EXPORT_SYMBOL(__local_bh_enable);
+void _local_bh_enable(void)
+{
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+ if (--current->softirq_nestcnt == 0)
+ migrate_enable();
+}
+EXPORT_SYMBOL(_local_bh_enable);
+
int in_serving_softirq(void)
{
return current->flags & PF_IN_SOFTIRQ;

View File

@ -0,0 +1,78 @@
Subject: irqwork: Move irq safe work to irq context
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 15 Nov 2015 18:40:17 +0100
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
On architectures where arch_irq_work_has_interrupt() returns false, we
end up running the irq safe work from the softirq context. That
results in a potential deadlock in the scheduler irq work which
expects that function to be called with interrupts disabled.
Split the irq_work_tick() function into a hard and soft variant. Call
the hard variant from the tick interrupt and add the soft variant to
the timer softirq.
Reported-and-tested-by: Yanjiang Jin <yanjiang.jin@windriver.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable-rt@vger.kernel.org
---
include/linux/irq_work.h | 6 ++++++
kernel/irq_work.c | 9 +++++++++
kernel/time/timer.c | 6 ++----
3 files changed, 17 insertions(+), 4 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(vo
static inline void irq_work_run(void) { }
#endif
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void);
+#else
+static inline void irq_work_tick_soft(void) { }
+#endif
+
#endif /* _LINUX_IRQ_WORK_H */
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -200,8 +200,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+}
+
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+void irq_work_tick_soft(void)
+{
irq_work_run_list(this_cpu_ptr(&lazy_list));
}
+#endif
/*
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+#if defined(CONFIG_IRQ_WORK)
if (in_irq())
irq_work_tick();
#endif
@@ -1498,9 +1498,7 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- irq_work_tick();
-#endif
+ irq_work_tick_soft();
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);

View File

@ -0,0 +1,198 @@
Subject: irqwork: push most work into softirq context
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 23 Jun 2015 15:32:51 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Initially we defered all irqwork into softirq because we didn't want the
latency spikes if perf or another user was busy and delayed the RT task.
The NOHZ trigger (nohz_full_kick_work) was the first user that did not work
as expected if it did not run in the original irqwork context so we had to
bring it back somehow for it. push_irq_work_func is the second one that
requires this.
This patch adds the IRQ_WORK_HARD_IRQ which makes sure the callback runs
in raw-irq context. Everything else is defered into softirq context. Without
-RT we have the orignal behavior.
This patch incorporates tglx orignal work which revoked a little bringing back
the arch_irq_work_raise() if possible and a few fixes from Steven Rostedt and
Mike Galbraith,
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/irq_work.h | 1 +
kernel/irq_work.c | 47 ++++++++++++++++++++++++++++++++++-------------
kernel/sched/rt.c | 1 +
kernel/time/tick-sched.c | 6 ++++++
kernel/time/timer.c | 6 +++++-
5 files changed, 47 insertions(+), 14 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -16,6 +16,7 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
struct irq_work {
unsigned long flags;
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <asm/processor.h>
@@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void)
*/
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
+ struct llist_head *list;
+
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
@@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *
if (!irq_work_claim(work))
return false;
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
+ list = &per_cpu(lazy_list, cpu);
+ else
+ list = &per_cpu(raised_list, cpu);
+
+ if (llist_add(&work->llnode, list))
arch_send_call_function_single_ipi(cpu);
return true;
@@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{
+ struct llist_head *list;
+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+
/* Only queue if not already pending */
if (!irq_work_claim(work))
return false;
@@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *wor
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
- /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
- } else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+ lazy_work = work->flags & IRQ_WORK_LAZY;
+
+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+ list = this_cpu_ptr(&lazy_list);
+ else
+ list = this_cpu_ptr(&raised_list);
+
+ if (llist_add(&work->llnode, list)) {
+ if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
@@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
- if (llist_empty(raised) || arch_irq_work_has_interrupt())
- if (llist_empty(lazy))
- return false;
+ if (llist_empty(raised) && llist_empty(lazy))
+ return false;
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@@ -132,7 +144,7 @@ static void irq_work_run_list(struct lli
struct irq_work *work;
struct llist_node *llnode;
- BUG_ON(!irqs_disabled());
+ BUG_ON_NONRT(!irqs_disabled());
if (llist_empty(list))
return;
@@ -169,7 +181,16 @@ static void irq_work_run_list(struct lli
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
- irq_work_run_list(this_cpu_ptr(&lazy_list));
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
+ /*
+ * NOTE: we raise softirq via IPI for safety,
+ * and execute in irq_work_tick() to move the
+ * overhead from hard to soft irq context.
+ */
+ if (!llist_empty(this_cpu_ptr(&lazy_list)))
+ raise_softirq(TIMER_SOFTIRQ);
+ } else
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -94,6 +94,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
+ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
#endif
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -181,6 +181,11 @@ static bool can_stop_full_tick(void)
return false;
}
+ if (!arch_irq_work_has_interrupt()) {
+ trace_tick_stop(0, "missing irq work interrupt\n");
+ return false;
+ }
+
/* sched_clock_tick() needs us? */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
@@ -209,6 +214,7 @@ static void nohz_full_kick_work_func(str
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
+ .flags = IRQ_WORK_HARD_IRQ,
};
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
if (in_irq())
irq_work_tick();
#endif
@@ -1498,6 +1498,10 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+ irq_work_tick();
+#endif
+
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
}

View File

@ -0,0 +1,36 @@
Subject: jump-label: disable if stop_machine() is used
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 08 Jul 2015 17:14:48 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Some architectures are using stop_machine() while switching the opcode which
leads to latency spikes.
The architectures which use stop_machine() atm:
- ARM stop machine
- s390 stop machine
The architecures which use other sorcery:
- MIPS
- X86
- powerpc
- sparc
- arm64
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy: only ARM for now]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/arm/Kconfig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -33,7 +33,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK

View File

@ -0,0 +1,34 @@
Subject: kconfig: Disable config options which are not RT compatible
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sun, 24 Jul 2011 12:11:43 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Disable stuff which is known to have issues on RT
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/Kconfig | 1 +
mm/Kconfig | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -9,6 +9,7 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
+ depends on !PREEMPT_RT_FULL
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
select COMPACTION
help
Transparent Hugepages allows the kernel to use huge pages and

View File

@ -0,0 +1,59 @@
Subject: kconfig: Add PREEMPT_RT_FULL
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 14:58:57 +0200
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
Introduce the final symbol for PREEMPT_RT_FULL.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
init/Makefile | 2 +-
kernel/Kconfig.preempt | 8 ++++++++
scripts/mkcompile_h | 4 +++-
3 files changed, 12 insertions(+), 2 deletions(-)
--- a/init/Makefile
+++ b/init/Makefile
@@ -33,4 +33,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -67,6 +67,14 @@ config PREEMPT_RTB
enables changes which are preliminary for the full preemptible
RT kernel.
+config PREEMPT_RT_FULL
+ bool "Fully Preemptible Kernel (RT)"
+ depends on IRQ_FORCED_THREADING
+ select PREEMPT_RT_BASE
+ select PREEMPT_RCU
+ help
+ All and everything
+
endchoice
config PREEMPT_COUNT
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -4,7 +4,8 @@ TARGET=$1
ARCH=$2
SMP=$3
PREEMPT=$4
-CC=$5
+RT=$5
+CC=$6
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
CONFIG_FLAGS=""
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length

View File

@ -0,0 +1,125 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 19 Mar 2013 14:44:30 +0100
Subject: [PATCH] kernel/SRCU: provide a static initializer
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
There are macros for static initializer for the three out of four
possible notifier types, that are:
ATOMIC_NOTIFIER_HEAD()
BLOCKING_NOTIFIER_HEAD()
RAW_NOTIFIER_HEAD()
This patch provides a static initilizer for the forth type to make it
complete.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/notifier.h | 34 +++++++++++++++++++++++++---------
include/linux/srcu.h | 6 +++---
2 files changed, 28 insertions(+), 12 deletions(-)
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
-
+
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
@@ -42,9 +42,7 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
- * chains are slightly more difficult to use because they require special
- * runtime initialization.
+ * often but notifier_blocks will seldom be removed.
*/
typedef int (*notifier_fn_t)(struct notifier_block *nb,
@@ -88,7 +86,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+/* srcu_notifier_heads must be cleaned up dynamically */
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
@@ -101,7 +99,13 @@ extern void srcu_init_notifier_head(stru
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-/* srcu_notifier_heads cannot be initialized statically */
+
+#define SRCU_NOTIFIER_INIT(name, pcpu) \
+ { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .head = NULL, \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
+ }
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
@@ -113,6 +117,18 @@ extern void srcu_init_notifier_head(stru
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, \
+ name##_head_srcu_array); \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
+
+#define SRCU_NOTIFIER_HEAD(name) \
+ _SRCU_NOTIFIER_HEAD(name, )
+
+#define SRCU_NOTIFIER_HEAD_STATIC(name) \
+ _SRCU_NOTIFIER_HEAD(name, static)
+
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@@ -182,12 +198,12 @@ static inline int notifier_to_errno(int
/*
* Declared notifiers so far. I can imagine quite a few more chains
- * over time (eg laptop power reset chains, reboot chain (to clean
+ * over time (eg laptop power reset chains, reboot chain (to clean
* device units up), device [un]mount chain, module load/unload chain,
- * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
*/
-
+
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct
void process_srcu(struct work_struct *work);
-#define __SRCU_STRUCT_INIT(name) \
+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
{ \
.completed = -300, \
- .per_cpu_ref = &name##_srcu_array, \
+ .per_cpu_ref = &pcpu_name, \
.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
.running = false, \
.batch_queue = RCU_BATCH_INIT(name.batch_queue), \
@@ -104,7 +104,7 @@ void process_srcu(struct work_struct *wo
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)

View File

@ -0,0 +1,86 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 7 Jun 2013 22:37:06 +0200
Subject: kernel/cpu: fix cpu down problem if kthread's cpu is going down
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
If kthread is pinned to CPUx and CPUx is going down then we get into
trouble:
- first the unplug thread is created
- it will set itself to hp->unplug. As a result, every task that is
going to take a lock, has to leave the CPU.
- the CPU_DOWN_PREPARE notifier are started. The worker thread will
start a new process for the "high priority worker".
Now kthread would like to take a lock but since it can't leave the CPU
it will never complete its task.
We could fire the unplug thread after the notifier but then the cpu is
no longer marked "online" and the unplug thread will run on CPU0 which
was fixed before :)
So instead the unplug thread is started and kept waiting until the
notfier complete their work.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -109,6 +109,7 @@ struct hotplug_pcp {
int refcount;
int grab_lock;
struct completion synced;
+ struct completion unplug_wait;
#ifdef CONFIG_PREEMPT_RT_FULL
/*
* Note, on PREEMPT_RT, the hotplug lock must save the state of
@@ -212,6 +213,7 @@ static int sync_unplug_thread(void *data
{
struct hotplug_pcp *hp = data;
+ wait_for_completion(&hp->unplug_wait);
preempt_disable();
hp->unplug = current;
wait_for_pinned_cpus(hp);
@@ -277,6 +279,14 @@ static void __cpu_unplug_sync(struct hot
wait_for_completion(&hp->synced);
}
+static void __cpu_unplug_wait(unsigned int cpu)
+{
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ complete(&hp->unplug_wait);
+ wait_for_completion(&hp->synced);
+}
+
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
@@ -300,6 +310,7 @@ static int cpu_unplug_begin(unsigned int
tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
+ init_completion(&hp->unplug_wait);
hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(hp->sync_tsk)) {
@@ -315,8 +326,7 @@ static int cpu_unplug_begin(unsigned int
* wait for tasks that are going to enter these sections and
* we must not have them block.
*/
- __cpu_unplug_sync(hp);
-
+ wake_up_process(hp->sync_tsk);
return 0;
}
@@ -671,6 +681,7 @@ static int _cpu_down(unsigned int cpu, i
else
synchronize_rcu();
+ __cpu_unplug_wait(cpu);
smpboot_park_threads(cpu);
/* Notifiers are done. Don't let any more tasks pin this CPU. */

View File

@ -0,0 +1,59 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Jun 2013 17:16:35 +0200
Subject: kernel/hotplug: restore original cpu mask oncpu/down
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
If a task which is allowed to run only on CPU X puts CPU Y down then it
will be allowed on all CPUs but the on CPU Y after it comes back from
kernel. This patch ensures that we don't lose the initial setting unless
the CPU the task is running is going down.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/cpu.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -629,6 +629,7 @@ static int _cpu_down(unsigned int cpu, i
.hcpu = hcpu,
};
cpumask_var_t cpumask;
+ cpumask_var_t cpumask_org;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -639,6 +640,12 @@ static int _cpu_down(unsigned int cpu, i
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
+ free_cpumask_var(cpumask);
+ return -ENOMEM;
+ }
+
+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -647,7 +654,8 @@ static int _cpu_down(unsigned int cpu, i
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
- return -EBUSY;
+ err = -EBUSY;
+ goto restore_cpus;
}
cpu_hotplug_begin();
@@ -737,6 +745,9 @@ static int _cpu_down(unsigned int cpu, i
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+restore_cpus:
+ set_cpus_allowed_ptr(current, cpumask_org);
+ free_cpumask_var(cpumask_org);
return err;
}

View File

@ -0,0 +1,102 @@
From: Jason Wessel <jason.wessel@windriver.com>
Date: Thu, 28 Jul 2011 12:42:23 -0500
Subject: kgdb/serial: Short term workaround
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4-rc6-rt1/patches-4.4-rc6-rt1.tar.xz
On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
> - KGDB (not yet disabled) is reportedly unusable on -rt right now due
> to missing hacks in the console locking which I dropped on purpose.
>
To work around this in the short term you can use this patch, in
addition to the clocksource watchdog patch that Thomas brewed up.
Comments are welcome of course. Ultimately the right solution is to
change separation between the console and the HW to have a polled mode
+ work queue so as not to introduce any kind of latency.
Thanks,
Jason.
---
drivers/tty/serial/8250/8250_port.c | 3 ++-
include/linux/kdb.h | 2 ++
kernel/debug/kdb/kdb_io.c | 6 ++----
3 files changed, 6 insertions(+), 5 deletions(-)
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -35,6 +35,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/kdb.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
@@ -2851,7 +2852,7 @@ void serial8250_console_write(struct uar
if (port->sysrq)
locked = 0;
- else if (oops_in_progress)
+ else if (oops_in_progress || in_kdb_printk())
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(en
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
+#define in_kdb_printk() (kdb_trap_printk)
extern void kdb_init(int level);
/* Access to kdb specific polling devices */
@@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kd
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
+#define in_kdb_printk() (0)
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
int linecount;
int colcount;
int logging, saved_loglevel = 0;
- int saved_trap_printk;
int got_printf_lock = 0;
int retlen = 0;
int fnd, len;
@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
unsigned long uninitialized_var(flags);
preempt_disable();
- saved_trap_printk = kdb_trap_printk;
- kdb_trap_printk = 0;
/* Serialize kdb_printf if multiple cpus try to write at once.
* But if any cpu goes recursive in kdb, just print the output,
@@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
} else {
__release(kdb_printf_lock);
}
- kdb_trap_printk = saved_trap_printk;
preempt_enable();
return retlen;
}
@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
+ kdb_trap_printk++;
va_start(ap, fmt);
r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
va_end(ap);
+ kdb_trap_printk--;
return r;
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More