165 lines
5.8 KiB
Diff
165 lines
5.8 KiB
Diff
|
From: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Date: Thu, 1 Nov 2012 10:14:11 +0100
|
||
|
Subject: powerpc: Add support for lazy preemption
|
||
|
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.18/older/patches-4.18-rc8-rt1.tar.xz
|
||
|
|
||
|
Implement the powerpc pieces for lazy preempt.
|
||
|
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
---
|
||
|
arch/powerpc/Kconfig | 1 +
|
||
|
arch/powerpc/include/asm/thread_info.h | 7 ++++++-
|
||
|
arch/powerpc/kernel/asm-offsets.c | 1 +
|
||
|
arch/powerpc/kernel/entry_32.S | 17 ++++++++++++-----
|
||
|
arch/powerpc/kernel/entry_64.S | 14 +++++++++++---
|
||
|
5 files changed, 31 insertions(+), 9 deletions(-)
|
||
|
|
||
|
--- a/arch/powerpc/Kconfig
|
||
|
+++ b/arch/powerpc/Kconfig
|
||
|
@@ -215,6 +215,7 @@ config PPC
|
||
|
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||
|
select HAVE_PERF_REGS
|
||
|
select HAVE_PERF_USER_STACK_DUMP
|
||
|
+ select HAVE_PREEMPT_LAZY
|
||
|
select HAVE_RCU_TABLE_FREE if SMP
|
||
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||
|
select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN
|
||
|
--- a/arch/powerpc/include/asm/thread_info.h
|
||
|
+++ b/arch/powerpc/include/asm/thread_info.h
|
||
|
@@ -36,6 +36,8 @@ struct thread_info {
|
||
|
int cpu; /* cpu we're on */
|
||
|
int preempt_count; /* 0 => preemptable,
|
||
|
<0 => BUG */
|
||
|
+ int preempt_lazy_count; /* 0 => preemptable,
|
||
|
+ <0 => BUG */
|
||
|
unsigned long local_flags; /* private flags for thread */
|
||
|
#ifdef CONFIG_LIVEPATCH
|
||
|
unsigned long *livepatch_sp;
|
||
|
@@ -99,6 +101,7 @@ extern int arch_dup_task_struct(struct t
|
||
|
#define TIF_ELF2ABI 18 /* function descriptors must die! */
|
||
|
#endif
|
||
|
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||
|
+#define TIF_NEED_RESCHED_LAZY 20 /* lazy rescheduling necessary */
|
||
|
|
||
|
/* as above, but as bit values */
|
||
|
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||
|
@@ -118,6 +121,7 @@ extern int arch_dup_task_struct(struct t
|
||
|
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||
|
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
|
||
|
#define _TIF_NOHZ (1<<TIF_NOHZ)
|
||
|
+#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
|
||
|
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
|
||
|
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||
|
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
|
||
|
@@ -126,8 +130,9 @@ extern int arch_dup_task_struct(struct t
|
||
|
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||
|
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
|
||
|
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
|
||
|
- _TIF_FSCHECK)
|
||
|
+ _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
|
||
|
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||
|
|
||
|
/* Bits in local_flags */
|
||
|
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
|
||
|
--- a/arch/powerpc/kernel/asm-offsets.c
|
||
|
+++ b/arch/powerpc/kernel/asm-offsets.c
|
||
|
@@ -156,6 +156,7 @@ int main(void)
|
||
|
OFFSET(TI_FLAGS, thread_info, flags);
|
||
|
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
|
||
|
OFFSET(TI_PREEMPT, thread_info, preempt_count);
|
||
|
+ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
|
||
|
OFFSET(TI_TASK, thread_info, task);
|
||
|
OFFSET(TI_CPU, thread_info, cpu);
|
||
|
|
||
|
--- a/arch/powerpc/kernel/entry_32.S
|
||
|
+++ b/arch/powerpc/kernel/entry_32.S
|
||
|
@@ -873,7 +873,14 @@ user_exc_return: /* r10 contains MSR_KE
|
||
|
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
||
|
bne restore
|
||
|
andi. r8,r8,_TIF_NEED_RESCHED
|
||
|
+ bne+ 1f
|
||
|
+ lwz r0,TI_PREEMPT_LAZY(r9)
|
||
|
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
|
||
|
+ bne restore
|
||
|
+ lwz r0,TI_FLAGS(r9)
|
||
|
+ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
|
||
|
beq+ restore
|
||
|
+1:
|
||
|
lwz r3,_MSR(r1)
|
||
|
andi. r0,r3,MSR_EE /* interrupts off? */
|
||
|
beq restore /* don't schedule if so */
|
||
|
@@ -884,11 +891,11 @@ user_exc_return: /* r10 contains MSR_KE
|
||
|
*/
|
||
|
bl trace_hardirqs_off
|
||
|
#endif
|
||
|
-1: bl preempt_schedule_irq
|
||
|
+2: bl preempt_schedule_irq
|
||
|
CURRENT_THREAD_INFO(r9, r1)
|
||
|
lwz r3,TI_FLAGS(r9)
|
||
|
- andi. r0,r3,_TIF_NEED_RESCHED
|
||
|
- bne- 1b
|
||
|
+ andi. r0,r3,_TIF_NEED_RESCHED_MASK
|
||
|
+ bne- 2b
|
||
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||
|
/* And now, to properly rebalance the above, we tell lockdep they
|
||
|
* are being turned back on, which will happen when we return
|
||
|
@@ -1211,7 +1218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
|
||
|
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
|
||
|
|
||
|
do_work: /* r10 contains MSR_KERNEL here */
|
||
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
||
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
||
|
beq do_user_signal
|
||
|
|
||
|
do_resched: /* r10 contains MSR_KERNEL here */
|
||
|
@@ -1232,7 +1239,7 @@ do_resched: /* r10 contains MSR_KERNEL
|
||
|
MTMSRD(r10) /* disable interrupts */
|
||
|
CURRENT_THREAD_INFO(r9, r1)
|
||
|
lwz r9,TI_FLAGS(r9)
|
||
|
- andi. r0,r9,_TIF_NEED_RESCHED
|
||
|
+ andi. r0,r9,_TIF_NEED_RESCHED_MASK
|
||
|
bne- do_resched
|
||
|
andi. r0,r9,_TIF_USER_WORK_MASK
|
||
|
beq restore_user
|
||
|
--- a/arch/powerpc/kernel/entry_64.S
|
||
|
+++ b/arch/powerpc/kernel/entry_64.S
|
||
|
@@ -707,7 +707,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
|
||
|
bl restore_math
|
||
|
b restore
|
||
|
#endif
|
||
|
-1: andi. r0,r4,_TIF_NEED_RESCHED
|
||
|
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
||
|
beq 2f
|
||
|
bl restore_interrupts
|
||
|
SCHEDULE_USER
|
||
|
@@ -769,10 +769,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
|
||
|
|
||
|
#ifdef CONFIG_PREEMPT
|
||
|
/* Check if we need to preempt */
|
||
|
+ lwz r8,TI_PREEMPT(r9)
|
||
|
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
|
||
|
+ bne restore
|
||
|
andi. r0,r4,_TIF_NEED_RESCHED
|
||
|
+ bne+ check_count
|
||
|
+
|
||
|
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
|
||
|
beq+ restore
|
||
|
+ lwz r8,TI_PREEMPT_LAZY(r9)
|
||
|
+
|
||
|
/* Check that preempt_count() == 0 and interrupts are enabled */
|
||
|
- lwz r8,TI_PREEMPT(r9)
|
||
|
+check_count:
|
||
|
cmpwi cr0,r8,0
|
||
|
bne restore
|
||
|
ld r0,SOFTE(r1)
|
||
|
@@ -789,7 +797,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG
|
||
|
/* Re-test flags and eventually loop */
|
||
|
CURRENT_THREAD_INFO(r9, r1)
|
||
|
ld r4,TI_FLAGS(r9)
|
||
|
- andi. r0,r4,_TIF_NEED_RESCHED
|
||
|
+ andi. r0,r4,_TIF_NEED_RESCHED_MASK
|
||
|
bne 1b
|
||
|
|
||
|
/*
|