[rt] Refresh "workqueue: Use normal rcu" for context changes in 4.19.112

This commit is contained in:
Salvatore Bonaccorso 2020-04-09 21:48:22 +02:00
parent f3ef27b42b
commit c65add4845
2 changed files with 32 additions and 36 deletions

1
debian/changelog vendored
View File

@ -1423,6 +1423,7 @@ linux (4.19.112-1) UNRELEASED; urgency=medium
* [rt] Update to 4.19.106-rt45
* [rt] Refresh "workqueue: Use normal rcu" for context changes in 4.19.111
* [rt] Update to 4.19.106-rt46
* [rt] Refresh "workqueue: Use normal rcu" for context changes in 4.19.112
[ Ben Hutchings ]
* [x86] Drop "Add a SysRq option to lift kernel lockdown" (Closes: #947021)

View File

@ -14,11 +14,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/workqueue.c | 95 +++++++++++++++++++++++++---------------------
1 file changed, 52 insertions(+), 43 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 493908464b9e..544007905706 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -127,7 +127,7 @@ enum {
@@ -127,7 +127,7 @@
*
* PL: wq_pool_mutex protected.
*
@ -27,7 +25,7 @@ index 493908464b9e..544007905706 100644
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
@@ -136,7 +136,7 @@ enum {
@@ -136,7 +136,7 @@
*
* WQ: wq->mutex protected.
*
@ -36,7 +34,7 @@ index 493908464b9e..544007905706 100644
*
* MD: wq_mayday_lock protected.
*/
@@ -183,7 +183,7 @@ struct worker_pool {
@@ -183,7 +183,7 @@
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
@ -45,7 +43,7 @@ index 493908464b9e..544007905706 100644
* from get_work_pool().
*/
struct rcu_head rcu;
@@ -212,7 +212,7 @@ struct pool_workqueue {
@@ -212,7 +212,7 @@
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
@ -54,7 +52,7 @@ index 493908464b9e..544007905706 100644
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -357,20 +357,20 @@
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@ -81,7 +79,7 @@ index 493908464b9e..544007905706 100644
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -382,7 +382,7 @@
* @pool: iteration cursor
* @pi: integer used for iteration
*
@ -90,7 +88,7 @@ index 493908464b9e..544007905706 100644
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
@@ -414,7 +414,7 @@
* @pwq: iteration cursor
* @wq: the target workqueue
*
@ -99,7 +97,7 @@ index 493908464b9e..544007905706 100644
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
@@ -550,7 +550,7 @@
* @wq: the target workqueue
* @node: the node ID
*
@ -108,7 +106,7 @@ index 493908464b9e..544007905706 100644
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
@@ -694,8 +694,8 @@
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@ -119,7 +117,7 @@ index 493908464b9e..544007905706 100644
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
@@ -1100,7 +1100,7 @@
{
if (pwq) {
/*
@ -128,7 +126,7 @@ index 493908464b9e..544007905706 100644
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
@@ -1228,6 +1228,7 @@
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@ -136,7 +134,7 @@ index 493908464b9e..544007905706 100644
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
@@ -1266,10 +1267,12 @@
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@ -149,15 +147,15 @@ index 493908464b9e..544007905706 100644
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
@@ -1383,6 +1386,7 @@
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
+ rcu_read_lock();
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
@@ -1439,10 +1443,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq which will be used unless @work is executing elsewhere */
if (wq->flags & WQ_UNBOUND) {
@@ -1441,10 +1445,8 @@
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@ -170,7 +168,7 @@ index 493908464b9e..544007905706 100644
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
@@ -1460,7 +1462,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
@@ -1462,7 +1464,9 @@
insert_work(pwq, work, worklist, work_flags);
@ -180,7 +178,7 @@ index 493908464b9e..544007905706 100644
}
/**
@@ -2861,14 +2865,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
@@ -2863,14 +2867,14 @@
might_sleep();
@ -198,7 +196,7 @@ index 493908464b9e..544007905706 100644
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@@ -2900,10 +2904,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
@@ -2902,10 +2906,11 @@
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
@ -211,7 +209,7 @@ index 493908464b9e..544007905706 100644
return false;
}
@@ -3350,7 +3355,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
@@ -3352,7 +3357,7 @@
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@ -220,7 +218,7 @@ index 493908464b9e..544007905706 100644
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
@@ -3404,8 +3409,8 @@ static void put_unbound_pool(struct worker_pool *pool)
@@ -3406,8 +3411,8 @@
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@ -231,7 +229,7 @@ index 493908464b9e..544007905706 100644
}
/**
@@ -3512,14 +3517,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
@@ -3514,14 +3519,14 @@
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@ -248,7 +246,7 @@ index 493908464b9e..544007905706 100644
}
/**
@@ -4219,7 +4224,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
@@ -4221,7 +4226,7 @@
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@ -257,7 +255,7 @@ index 493908464b9e..544007905706 100644
} else {
/*
* We're the sole accessor of @wq at this point. Directly
@@ -4329,7 +4334,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@ -4331,7 +4336,8 @@
struct pool_workqueue *pwq;
bool ret;
@ -267,7 +265,7 @@ index 493908464b9e..544007905706 100644
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
@@ -4340,7 +4346,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@ -4342,7 +4348,8 @@
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@ -277,7 +275,7 @@ index 493908464b9e..544007905706 100644
return ret;
}
@@ -4366,15 +4373,15 @@ unsigned int work_busy(struct work_struct *work)
@@ -4368,15 +4375,15 @@
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@ -297,7 +295,7 @@ index 493908464b9e..544007905706 100644
return ret;
}
@@ -4559,7 +4566,7 @@ void show_workqueue_state(void)
@@ -4561,7 +4568,7 @@
unsigned long flags;
int pi;
@ -306,7 +304,7 @@ index 493908464b9e..544007905706 100644
pr_info("Showing busy workqueues and worker pools:\n");
@@ -4624,7 +4631,7 @@ void show_workqueue_state(void)
@@ -4626,7 +4633,7 @@
touch_nmi_watchdog();
}
@ -315,7 +313,7 @@ index 493908464b9e..544007905706 100644
}
/* used to show worker information through /proc/PID/{comm,stat,status} */
@@ -5011,16 +5018,16 @@ bool freeze_workqueues_busy(void)
@@ -5013,16 +5020,16 @@
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@ -335,7 +333,7 @@ index 493908464b9e..544007905706 100644
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
@@ -5215,7 +5222,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
@@ -5217,7 +5224,8 @@
const char *delim = "";
int node, written = 0;
@ -345,7 +343,7 @@ index 493908464b9e..544007905706 100644
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
@@ -5223,7 +5231,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
@@ -5225,7 +5233,8 @@
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
@ -355,6 +353,3 @@ index 493908464b9e..544007905706 100644
return written;
}
--
2.25.1