100 lines
3.1 KiB
Diff
100 lines
3.1 KiB
Diff
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Sun, 16 Aug 2015 14:27:50 +0200
|
|
Subject: dump stack: don't disable preemption during trace
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.1-rt5.tar.xz
|
|
|
|
I see here large latencies during a stack dump on x86. The
|
|
preempt_disable() and get_cpu() should forbid moving the task to another
|
|
CPU during a stack dump and avoiding two stack traces in parallel on the
|
|
same CPU. However a stack trace from a second CPU may still happen in
|
|
parallel. Also nesting is allowed so a stack trace happens in
|
|
process-context and we may have another one from IRQ context. With migrate
|
|
disable we keep this code preemptible and allow a second backtrace on
|
|
the same CPU by another task.
|
|
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
arch/x86/kernel/dumpstack_32.c | 4 ++--
|
|
arch/x86/kernel/dumpstack_64.c | 8 ++++----
|
|
lib/dump_stack.c | 4 ++--
|
|
3 files changed, 8 insertions(+), 8 deletions(-)
|
|
|
|
--- a/arch/x86/kernel/dumpstack_32.c
|
|
+++ b/arch/x86/kernel/dumpstack_32.c
|
|
@@ -42,7 +42,7 @@ void dump_trace(struct task_struct *task
|
|
unsigned long *stack, unsigned long bp,
|
|
const struct stacktrace_ops *ops, void *data)
|
|
{
|
|
- const unsigned cpu = get_cpu();
|
|
+ const unsigned cpu = get_cpu_light();
|
|
int graph = 0;
|
|
u32 *prev_esp;
|
|
|
|
@@ -86,7 +86,7 @@ void dump_trace(struct task_struct *task
|
|
break;
|
|
touch_nmi_watchdog();
|
|
}
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
EXPORT_SYMBOL(dump_trace);
|
|
|
|
--- a/arch/x86/kernel/dumpstack_64.c
|
|
+++ b/arch/x86/kernel/dumpstack_64.c
|
|
@@ -152,7 +152,7 @@ void dump_trace(struct task_struct *task
|
|
unsigned long *stack, unsigned long bp,
|
|
const struct stacktrace_ops *ops, void *data)
|
|
{
|
|
- const unsigned cpu = get_cpu();
|
|
+ const unsigned cpu = get_cpu_light();
|
|
struct thread_info *tinfo;
|
|
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
|
|
unsigned long dummy;
|
|
@@ -241,7 +241,7 @@ void dump_trace(struct task_struct *task
|
|
* This handles the process stack:
|
|
*/
|
|
bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
|
|
- put_cpu();
|
|
+ put_cpu_light();
|
|
}
|
|
EXPORT_SYMBOL(dump_trace);
|
|
|
|
@@ -255,7 +255,7 @@ show_stack_log_lvl(struct task_struct *t
|
|
int cpu;
|
|
int i;
|
|
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
cpu = smp_processor_id();
|
|
|
|
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
|
|
@@ -291,7 +291,7 @@ show_stack_log_lvl(struct task_struct *t
|
|
pr_cont(" %016lx", *stack++);
|
|
touch_nmi_watchdog();
|
|
}
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
|
|
pr_cont("\n");
|
|
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
|
|
--- a/lib/dump_stack.c
|
|
+++ b/lib/dump_stack.c
|
|
@@ -33,7 +33,7 @@ asmlinkage __visible void dump_stack(voi
|
|
* Permit this cpu to perform nested stack dumps while serialising
|
|
* against other CPUs
|
|
*/
|
|
- preempt_disable();
|
|
+ migrate_disable();
|
|
|
|
retry:
|
|
cpu = smp_processor_id();
|
|
@@ -52,7 +52,7 @@ asmlinkage __visible void dump_stack(voi
|
|
if (!was_locked)
|
|
atomic_set(&dump_lock, -1);
|
|
|
|
- preempt_enable();
|
|
+ migrate_enable();
|
|
}
|
|
#else
|
|
asmlinkage __visible void dump_stack(void)
|