[rt] Drop idle task related parts of "sched: Move mmdrop to RCU on RT"

This conflicted with and seems to be redundant with "sched/core: Fix
illegal RCU from offline CPUs".  I think the previous resolution of
this conflict can result in a double free.
This commit is contained in:
Ben Hutchings 2020-07-06 01:35:06 +01:00
parent 48cd3cb583
commit 87405282a9
2 changed files with 6 additions and 38 deletions

4
debian/changelog vendored
View File

@ -1005,14 +1005,14 @@ linux (4.19.131-1) UNRELEASED; urgency=medium
- fs/dcache: Include swait.h header
- mm: slub: Always flush the delayed empty slubs in flush_all()
- tasklet: Fix UP case for tasklet CHAINED state
* [rt] Refresh "sched: Move mmdrop to RCU on RT" for context changes in
4.19.129
[ Ben Hutchings ]
* [rt] Update "net: move xmit_recursion to per-task variable on -RT" to
apply on top of "net: place xmit recursion in softnet data"
* [rt] Drop "net: Add a mutex around devnet_rename_seq", redundant with
"net: Introduce net_rwsem to protect net_namespace_list"
* [rt] Drop idle task related parts of "sched: Move mmdrop to RCU on RT",
redundant with "sched/core: Fix illegal RCU from offline CPUs"
-- Salvatore Bonaccorso <carnil@debian.org> Wed, 13 May 2020 17:44:43 +0200

View File

@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
@@ -489,6 +490,9 @@
@@ -489,6 +490,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -51,6 +51,17 @@
@@ -51,6 +51,17 @@ static inline void mmdrop(struct mm_stru
void mmdrop(struct mm_struct *mm);
@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* followed by taking the mmap_sem for writing before modifying the
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -647,6 +647,19 @@
@@ -647,6 +647,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2729,9 +2729,13 @@
@@ -2729,9 +2729,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@ -97,35 +97,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
@@ -5602,6 +5606,8 @@
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
@@ -5617,6 +5623,11 @@
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
+ /*
+ * Defer the cleanup to an alive cpu. On RT we can neither
+ * call mmdrop() nor mmdrop_delayed() from here.
+ */
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
@@ -5930,6 +5941,10 @@
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
+ if (per_cpu(idle_last_mm, cpu)) {
+ mmdrop_delayed(per_cpu(idle_last_mm, cpu));
+ per_cpu(idle_last_mm, cpu) = NULL;
+ }
return 0;
}
#endif