linux/debian/patches/debian/arm-mm-avoid-abi-change-in-...

111 lines
3.3 KiB
Diff

From: Ben Hutchings <ben@decadent.org.uk>
Subject: ARM: mm: Avoid ABI changes in 3.10.6
Date: Sat, 17 Aug 2013 00:35:01 +0200
Forwarded: not-needed
commit a5510daad56d ('ARM: move signal handlers into a vdso-like
page', commit 48be69a026b2 upstream) and commit 8271eb9ffaaa ('ARM:
7790/1: Fix deferred mm switch on VIVT processors', commit
bdae73cd374e upstream) added members to mm_context_t. Move them into
a new structure at the end of mm_struct (which is always allocated by
the core kernel) and hide it from genksyms.
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,13 +6,17 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
-#else
- int switch_pending;
#endif
unsigned int vmalloc_seq;
- unsigned long sigpage;
} mm_context_t;
+struct mm_context_ext {
+#ifndef CONFIG_CPU_HAS_ASID
+ int switch_pending;
+#endif
+ unsigned long sigpage;
+};
+
#ifdef CONFIG_CPU_HAS_ASID
#define ASID_BITS 8
#define ASID_MASK ((~0ULL) << ASID_BITS)
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -55,7 +55,7 @@ static inline void check_and_switch_cont
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- mm->context.switch_pending = 1;
+ mm->context_ext.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -66,7 +66,7 @@ static inline void finish_arch_post_lock
{
struct mm_struct *mm = current->mm;
- if (mm && mm->context.switch_pending) {
+ if (mm && mm->context_ext.switch_pending) {
/*
* Preemption must be disabled during cpu_switch_mm() as we
* have some stateful cache flush implementations. Check
@@ -74,8 +74,8 @@ static inline void finish_arch_post_lock
* switch to this mm was already done.
*/
preempt_disable();
- if (mm->context.switch_pending) {
- mm->context.switch_pending = 0;
+ if (mm->context_ext.switch_pending) {
+ mm->context_ext.switch_pending = 0;
cpu_switch_mm(mm->pgd, mm);
}
preempt_enable_no_resched();
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -437,6 +437,10 @@ struct mm_struct {
int first_nid;
#endif
struct uprobes_state uprobes_state;
+
+#if !defined(__GENKSYMS__) && defined(CONFIG_ARM)
+ struct mm_context_ext context_ext;
+#endif
};
/* first nid will either be a valid NID or one of these values */
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -474,7 +474,7 @@ int in_gate_area_no_mm(unsigned long add
const char *arch_vma_name(struct vm_area_struct *vma)
{
return is_gate_vma(vma) ? "[vectors]" :
- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context_ext.sigpage) ?
"[sigpage]" : NULL;
}
@@ -504,7 +504,7 @@ int arch_setup_additional_pages(struct l
&signal_page);
if (ret == 0)
- mm->context.sigpage = addr;
+ mm->context_ext.sigpage = addr;
up_fail:
up_write(&mm->mmap_sem);
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -406,7 +406,7 @@ setup_return(struct pt_regs *regs, struc
* except when the MPU has protected the vectors
* page from PL0
*/
- retcode = mm->context.sigpage + signal_return_offset +
+ retcode = mm->context_ext.sigpage + signal_return_offset +
(idx << 2) + thumb;
} else
#endif