diff --git a/debian/changelog b/debian/changelog index 97fa8bfeb..de4f6b242 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,6 +2,7 @@ linux-2.6 (2.6.35-1~experimental.2) UNRELEASED; urgency=low * images: Nuke modules.devname on removal. (closes: #590607) * Add stable 2.6.35.1 and 2.6.35.2. + * mm: fix page table unmap for stack guard page properly. -- maximilian attems Tue, 04 Aug 2010 20:21:16 +0200 diff --git a/debian/patches/bugfix/all/mm-fix-page-table-unmap-for-stack-guard-page-properl.patch b/debian/patches/bugfix/all/mm-fix-page-table-unmap-for-stack-guard-page-properl.patch new file mode 100644 index 000000000..fe9fcf270 --- /dev/null +++ b/debian/patches/bugfix/all/mm-fix-page-table-unmap-for-stack-guard-page-properl.patch @@ -0,0 +1,76 @@ +From 11ac552477e32835cb6970bf0a70c210807f5673 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Sat, 14 Aug 2010 11:44:56 -0700 +Subject: [PATCH] mm: fix page table unmap for stack guard page properly +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We do in fact need to unmap the page table _before_ doing the whole +stack guard page logic, because if it is needed (mainly 32-bit x86 with +PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it +will do a kmap_atomic/kunmap_atomic. + +And those kmaps will create an atomic region that we cannot do +allocations in. However, the whole stack expand code will need to do +anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an +atomic region. + +Now, a better model might actually be to do the anon_vma_prepare() when +_creating_ a VM_GROWSDOWN segment, and not have to worry about any of +this at page fault time. But in the meantime, this is the +straightforward fix for the issue. + +See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details. + +Reported-by: Wylda +Reported-by: Sedat Dilek +Reported-by: Mike Pagano +Reported-by: François Valenduc +Tested-by: Ed Tomlinson +Cc: Pekka Enberg +Cc: Greg KH +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +--- + mm/memory.c | 13 ++++++------- + 1 files changed, 6 insertions(+), 7 deletions(-) + +diff --git a/mm/memory.c b/mm/memory.c +index 9b3b73f..b6e5fd2 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, + spinlock_t *ptl; + pte_t entry; + +- if (check_stack_guard_page(vma, address) < 0) { +- pte_unmap(page_table); ++ pte_unmap(page_table); ++ ++ /* Check if we need to add a guard page to the stack */ ++ if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; +- } + ++ /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- ptl = pte_lockptr(mm, pmd); +- spin_lock(ptl); ++ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ +- pte_unmap(page_table); +- + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +-- +1.7.1 + diff --git a/debian/patches/series/1~experimental.2 b/debian/patches/series/1~experimental.2 index 2d148ff94..fd2929f67 100644 --- a/debian/patches/series/1~experimental.2 +++ b/debian/patches/series/1~experimental.2 @@ -1,2 +1,3 @@ + bugfix/all/stable/2.6.35.1.patch + bugfix/all/stable/2.6.35.2.patch ++ bugfix/all/mm-fix-page-table-unmap-for-stack-guard-page-properl.patch