[rt] Refresh "sched: Move mmdrop to RCU on RT" for context changes in 4.19.129

This commit is contained in:
Salvatore Bonaccorso 2020-06-24 09:29:18 +02:00
parent d3153c7c8e
commit 1f589648a5
2 changed files with 14 additions and 24 deletions

2
debian/changelog vendored
View File

@ -727,6 +727,8 @@ linux (4.19.129-1) UNRELEASED; urgency=medium
* [rt] Update to 4.19.120-rt52
* [rt] Update to 4.19.124-rt53
* [rt] Update to 4.19.127-rt55
* [rt] Refresh "sched: Move mmdrop to RCU on RT" for context changes in
4.19.129
-- Salvatore Bonaccorso <carnil@debian.org> Wed, 13 May 2020 17:44:43 +0200

View File

@ -18,8 +18,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/sched/core.c | 18 ++++++++++++++++--
4 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a9a996af229..202b736ccbfa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
@ -30,7 +28,7 @@ index 3a9a996af229..202b736ccbfa 100644
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
@@ -487,6 +488,9 @@ struct mm_struct {
@@ -489,6 +490,9 @@
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
@ -40,13 +38,11 @@ index 3a9a996af229..202b736ccbfa 100644
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index e9d4e389aed9..fb59f96fdd2e 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
@@ -51,6 +51,17 @@
void mmdrop(struct mm_struct *mm);
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __mmdrop_delayed(struct rcu_head *rhp);
@ -62,11 +58,9 @@ index e9d4e389aed9..fb59f96fdd2e 100644
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
* followed by taking the mmap_sem for writing before modifying the
diff --git a/kernel/fork.c b/kernel/fork.c
index 309f4a20d4ac..d4ec53c72577 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -642,6 +642,19 @@ void __mmdrop(struct mm_struct *mm)
@@ -647,6 +647,19 @@
}
EXPORT_SYMBOL_GPL(__mmdrop);
@ -86,11 +80,9 @@ index 309f4a20d4ac..d4ec53c72577 100644
static void mmdrop_async_fn(struct work_struct *work)
{
struct mm_struct *mm;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c23d1272429..cb89c90513dd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2729,9 +2729,13 @@ static struct rq *finish_task_switch(struct task_struct *prev)
@@ -2729,9 +2729,13 @@
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@ -105,7 +97,7 @@ index 5c23d1272429..cb89c90513dd 100644
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
@@ -5601,6 +5605,8 @@ void sched_setnuma(struct task_struct *p, int nid)
@@ -5602,6 +5606,8 @@
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@ -114,20 +106,19 @@ index 5c23d1272429..cb89c90513dd 100644
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
@@ -5616,7 +5622,11 @@ void idle_task_exit(void)
current->active_mm = &init_mm;
@@ -5617,6 +5623,11 @@
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
- mmdrop(mm);
+ /*
+ * Defer the cleanup to an alive cpu. On RT we can neither
+ * call mmdrop() nor mmdrop_delayed() from here.
+ */
+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
}
/*
@@ -5928,6 +5938,10 @@ int sched_cpu_dying(unsigned int cpu)
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
@@ -5930,6 +5941,10 @@
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
@ -138,6 +129,3 @@ index 5c23d1272429..cb89c90513dd 100644
return 0;
}
#endif
--
2.17.1