2020-09-04 20:10:21 +00:00
|
|
|
From a3e37a2d98132a499e9a17f71cb2b3ef0a1f9c2e Mon Sep 17 00:00:00 2001
|
|
|
|
Message-Id: <a3e37a2d98132a499e9a17f71cb2b3ef0a1f9c2e.1599166691.git.zanussi@kernel.org>
|
|
|
|
In-Reply-To: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
|
|
|
References: <56457dc415803c8abc5acb513ada877a79596f05.1599166690.git.zanussi@kernel.org>
|
2018-08-27 14:32:32 +00:00
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
Date: Sun, 17 Jul 2011 21:42:26 +0200
|
2020-09-04 20:10:21 +00:00
|
|
|
Subject: [PATCH 191/333] workqueue: Use local irq lock instead of irq disable
|
2019-04-08 23:49:20 +00:00
|
|
|
regions
|
2020-09-04 20:10:21 +00:00
|
|
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.142-rt63.tar.xz
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
Use a local_irq_lock as a replacement for irq off regions. We keep the
|
|
|
|
semantic of irq-off in regard to the pool->lock and remain preemptible.
|
|
|
|
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
---
|
2019-04-08 23:49:20 +00:00
|
|
|
kernel/workqueue.c | 45 ++++++++++++++++++++++++++++++---------------
|
2018-08-27 14:32:32 +00:00
|
|
|
1 file changed, 30 insertions(+), 15 deletions(-)
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
2020-04-21 20:13:52 +00:00
|
|
|
index 29dc939dad4e..f34586370fcb 100644
|
2018-08-27 14:32:32 +00:00
|
|
|
--- a/kernel/workqueue.c
|
|
|
|
+++ b/kernel/workqueue.c
|
|
|
|
@@ -49,6 +49,7 @@
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/sched/isolation.h>
|
|
|
|
#include <linux/nmi.h>
|
|
|
|
+#include <linux/locallock.h>
|
|
|
|
|
|
|
|
#include "workqueue_internal.h"
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
|
2018-08-27 14:32:32 +00:00
|
|
|
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
|
|
|
|
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
|
|
|
|
|
|
|
|
+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
|
|
|
|
+
|
|
|
|
static int worker_thread(void *__worker);
|
|
|
|
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
2018-08-27 14:32:32 +00:00
|
|
|
* As both pwqs and pools are RCU protected, the
|
|
|
|
* following lock operations are safe.
|
|
|
|
*/
|
|
|
|
- spin_lock_irq(&pwq->pool->lock);
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
|
|
|
|
put_pwq(pwq);
|
|
|
|
- spin_unlock_irq(&pwq->pool->lock);
|
|
|
|
+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
2018-08-27 14:32:32 +00:00
|
|
|
struct worker_pool *pool;
|
|
|
|
struct pool_workqueue *pwq;
|
|
|
|
|
|
|
|
- local_irq_save(*flags);
|
|
|
|
+ local_lock_irqsave(pendingb_lock, *flags);
|
|
|
|
|
|
|
|
/* try to steal the timer if it exists */
|
|
|
|
if (is_dwork) {
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
2018-08-27 14:32:32 +00:00
|
|
|
spin_unlock(&pool->lock);
|
|
|
|
fail:
|
|
|
|
rcu_read_unlock();
|
|
|
|
- local_irq_restore(*flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, *flags);
|
|
|
|
if (work_is_canceling(work))
|
|
|
|
return -ENOENT;
|
|
|
|
cpu_relax();
|
2019-04-08 23:49:20 +00:00
|
|
|
@@ -1378,7 +1383,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
2018-08-27 14:32:32 +00:00
|
|
|
* queued or lose PENDING. Grabbing PENDING and queueing should
|
|
|
|
* happen with IRQ disabled.
|
|
|
|
*/
|
|
|
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
|
|
|
+ /*
|
|
|
|
+ * nort: On RT the "interrupts-disabled" rule has been replaced with
|
|
|
|
+ * pendingb_lock.
|
|
|
|
+ */
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
debug_work_activate(work);
|
|
|
|
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1486,14 +1497,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
2018-08-27 14:32:32 +00:00
|
|
|
bool ret = false;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
+ local_lock_irqsave(pendingb_lock,flags);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
__queue_work(cpu, wq, work);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(queue_work_on);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1502,8 +1513,11 @@ void delayed_work_timer_fn(struct timer_list *t)
|
2018-08-27 14:32:32 +00:00
|
|
|
{
|
|
|
|
struct delayed_work *dwork = from_timer(dwork, t, timer);
|
|
|
|
|
|
|
|
+ /* XXX */
|
|
|
|
+ /* local_lock(pendingb_lock); */
|
|
|
|
/* should have been called from irqsafe timer with irq already off */
|
|
|
|
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
|
|
|
+ /* local_unlock(pendingb_lock); */
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(delayed_work_timer_fn);
|
|
|
|
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1558,14 +1572,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
2018-08-27 14:32:32 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* read the comment in __queue_work() */
|
|
|
|
- local_irq_save(flags);
|
|
|
|
+ local_lock_irqsave(pendingb_lock, flags);
|
|
|
|
|
|
|
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
|
|
|
|
__queue_delayed_work(cpu, wq, dwork, delay);
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(queue_delayed_work_on);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1600,7 +1614,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
if (likely(ret >= 0)) {
|
|
|
|
__queue_delayed_work(cpu, wq, dwork, delay);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* -ENOENT from try_to_grab_pending() becomes %true */
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -1611,11 +1625,12 @@ EXPORT_SYMBOL_GPL(mod_delayed_work_on);
|
2018-08-27 14:32:32 +00:00
|
|
|
static void rcu_work_rcufn(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
/* read the comment in __queue_work() */
|
|
|
|
- local_irq_disable();
|
|
|
|
+ local_lock_irqsave(pendingb_lock, flags);
|
|
|
|
__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
|
|
|
|
- local_irq_enable();
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -3010,7 +3025,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
|
2018-08-27 14:32:32 +00:00
|
|
|
|
|
|
|
/* tell other tasks trying to grab @work to back off */
|
|
|
|
mark_work_canceling(work);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This allows canceling during early boot. We know that @work
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -3071,10 +3086,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
|
2018-08-27 14:32:32 +00:00
|
|
|
*/
|
|
|
|
bool flush_delayed_work(struct delayed_work *dwork)
|
|
|
|
{
|
|
|
|
- local_irq_disable();
|
|
|
|
+ local_lock_irq(pendingb_lock);
|
|
|
|
if (del_timer_sync(&dwork->timer))
|
|
|
|
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
|
|
|
|
- local_irq_enable();
|
|
|
|
+ local_unlock_irq(pendingb_lock);
|
|
|
|
return flush_work(&dwork->work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(flush_delayed_work);
|
2020-04-21 20:13:52 +00:00
|
|
|
@@ -3112,7 +3127,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
|
2018-08-27 14:32:32 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
+ local_unlock_irqrestore(pendingb_lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-03 23:36:11 +00:00
|
|
|
--
|
2020-06-22 13:14:16 +00:00
|
|
|
2.17.1
|
2020-01-03 23:36:11 +00:00
|
|
|
|