117 lines
3.4 KiB
Diff
117 lines
3.4 KiB
Diff
Subject: stop_machine: Use raw spinlocks
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Wed, 29 Jun 2011 11:01:51 +0200
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9-rt1.tar.xz
|
|
|
|
Use raw-locks in stomp_machine() to allow locking in irq-off regions.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
kernel/stop_machine.c | 34 +++++++++++++---------------------
|
|
1 file changed, 13 insertions(+), 21 deletions(-)
|
|
|
|
--- a/kernel/stop_machine.c
|
|
+++ b/kernel/stop_machine.c
|
|
@@ -36,7 +36,7 @@ struct cpu_stop_done {
|
|
struct cpu_stopper {
|
|
struct task_struct *thread;
|
|
|
|
- spinlock_t lock;
|
|
+ raw_spinlock_t lock;
|
|
bool enabled; /* is this stopper enabled? */
|
|
struct list_head works; /* list of pending works */
|
|
|
|
@@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned
|
|
unsigned long flags;
|
|
bool enabled;
|
|
|
|
- spin_lock_irqsave(&stopper->lock, flags);
|
|
+ raw_spin_lock_irqsave(&stopper->lock, flags);
|
|
enabled = stopper->enabled;
|
|
if (enabled)
|
|
__cpu_stop_queue_work(stopper, work);
|
|
else if (work->done)
|
|
cpu_stop_signal_done(work->done);
|
|
- spin_unlock_irqrestore(&stopper->lock, flags);
|
|
|
|
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
return enabled;
|
|
}
|
|
|
|
@@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int
|
|
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
|
|
int err;
|
|
retry:
|
|
- spin_lock_irq(&stopper1->lock);
|
|
- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
|
|
+ raw_spin_lock_irq(&stopper1->lock);
|
|
+ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
err = -ENOENT;
|
|
if (!stopper1->enabled || !stopper2->enabled)
|
|
@@ -255,8 +255,8 @@ static int cpu_stop_queue_two_works(int
|
|
__cpu_stop_queue_work(stopper1, work1);
|
|
__cpu_stop_queue_work(stopper2, work2);
|
|
unlock:
|
|
- spin_unlock(&stopper2->lock);
|
|
- spin_unlock_irq(&stopper1->lock);
|
|
+ raw_spin_unlock(&stopper2->lock);
|
|
+ raw_spin_unlock_irq(&stopper1->lock);
|
|
|
|
if (unlikely(err == -EDEADLK)) {
|
|
while (stop_cpus_in_progress)
|
|
@@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned
|
|
unsigned long flags;
|
|
int run;
|
|
|
|
- spin_lock_irqsave(&stopper->lock, flags);
|
|
+ raw_spin_lock_irqsave(&stopper->lock, flags);
|
|
run = !list_empty(&stopper->works);
|
|
- spin_unlock_irqrestore(&stopper->lock, flags);
|
|
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
|
return run;
|
|
}
|
|
|
|
@@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned
|
|
|
|
repeat:
|
|
work = NULL;
|
|
- spin_lock_irq(&stopper->lock);
|
|
+ raw_spin_lock_irq(&stopper->lock);
|
|
if (!list_empty(&stopper->works)) {
|
|
work = list_first_entry(&stopper->works,
|
|
struct cpu_stop_work, list);
|
|
list_del_init(&work->list);
|
|
}
|
|
- spin_unlock_irq(&stopper->lock);
|
|
+ raw_spin_unlock_irq(&stopper->lock);
|
|
|
|
if (work) {
|
|
cpu_stop_fn_t fn = work->fn;
|
|
@@ -475,15 +475,7 @@ static void cpu_stopper_thread(unsigned
|
|
struct cpu_stop_done *done = work->done;
|
|
int ret;
|
|
|
|
- /*
|
|
- * Wait until the stopper finished scheduling on all
|
|
- * cpus
|
|
- */
|
|
- lg_global_lock(&stop_cpus_lock);
|
|
- /*
|
|
- * Let other cpu threads continue as well
|
|
- */
|
|
- lg_global_unlock(&stop_cpus_lock);
|
|
+ /* XXX */
|
|
|
|
/* cpu stop callbacks must not sleep, make in_atomic() == T */
|
|
preempt_count_inc();
|
|
@@ -551,7 +543,7 @@ static int __init cpu_stop_init(void)
|
|
for_each_possible_cpu(cpu) {
|
|
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
|
|
|
- spin_lock_init(&stopper->lock);
|
|
+ raw_spin_lock_init(&stopper->lock);
|
|
INIT_LIST_HEAD(&stopper->works);
|
|
}
|
|
|