156 lines
4.8 KiB
Diff
156 lines
4.8 KiB
Diff
From: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Date: Thu, 11 Apr 2019 10:49:19 -0700
|
|
Subject: mm: prevent get_user_pages() from overflowing page refcount
|
|
Origin: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit?id=d972ebbf42ba6712460308ae57c222a0706f2af3
|
|
Bug-Debian-Security: https://security-tracker.debian.org/tracker/CVE-2019-11487
|
|
|
|
commit 8fde12ca79aff9b5ba951fce1a2641901b8d8e64 upstream.
|
|
|
|
If the page refcount wraps around past zero, it will be freed while
|
|
there are still four billion references to it. One of the possible
|
|
avenues for an attacker to try to make this happen is by doing direct IO
|
|
on a page multiple times. This patch makes get_user_pages() refuse to
|
|
take a new page reference if there are already more than two billion
|
|
references to the page.
|
|
|
|
Reported-by: Jann Horn <jannh@google.com>
|
|
Acked-by: Matthew Wilcox <willy@infradead.org>
|
|
Cc: stable@kernel.org
|
|
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
mm/gup.c | 45 ++++++++++++++++++++++++++++++++++-----------
|
|
mm/hugetlb.c | 13 +++++++++++++
|
|
2 files changed, 47 insertions(+), 11 deletions(-)
|
|
|
|
diff --git a/mm/gup.c b/mm/gup.c
|
|
index 0a5374e6e82d..caadd31714a5 100644
|
|
--- a/mm/gup.c
|
|
+++ b/mm/gup.c
|
|
@@ -153,7 +153,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
|
}
|
|
|
|
if (flags & FOLL_GET) {
|
|
- get_page(page);
|
|
+ if (unlikely(!try_get_page(page))) {
|
|
+ page = ERR_PTR(-ENOMEM);
|
|
+ goto out;
|
|
+ }
|
|
|
|
/* drop the pgmap reference now that we hold the page */
|
|
if (pgmap) {
|
|
@@ -296,7 +299,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
|
|
if (pmd_trans_unstable(pmd))
|
|
ret = -EBUSY;
|
|
} else {
|
|
- get_page(page);
|
|
+ if (unlikely(!try_get_page(page))) {
|
|
+ spin_unlock(ptl);
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+ }
|
|
spin_unlock(ptl);
|
|
lock_page(page);
|
|
ret = split_huge_page(page);
|
|
@@ -480,7 +486,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
|
|
if (is_device_public_page(*page))
|
|
goto unmap;
|
|
}
|
|
- get_page(*page);
|
|
+ if (unlikely(!try_get_page(*page))) {
|
|
+ ret = -ENOMEM;
|
|
+ goto unmap;
|
|
+ }
|
|
out:
|
|
ret = 0;
|
|
unmap:
|
|
@@ -1368,6 +1377,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
|
|
}
|
|
}
|
|
|
|
+/*
|
|
+ * Return the compund head page with ref appropriately incremented,
|
|
+ * or NULL if that failed.
|
|
+ */
|
|
+static inline struct page *try_get_compound_head(struct page *page, int refs)
|
|
+{
|
|
+ struct page *head = compound_head(page);
|
|
+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
|
|
+ return NULL;
|
|
+ if (unlikely(!page_cache_add_speculative(head, refs)))
|
|
+ return NULL;
|
|
+ return head;
|
|
+}
|
|
+
|
|
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
|
|
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
|
int write, struct page **pages, int *nr)
|
|
@@ -1402,9 +1425,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
|
|
|
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
|
page = pte_page(pte);
|
|
- head = compound_head(page);
|
|
|
|
- if (!page_cache_get_speculative(head))
|
|
+ head = try_get_compound_head(page, 1);
|
|
+ if (!head)
|
|
goto pte_unmap;
|
|
|
|
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
|
@@ -1543,8 +1566,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
- head = compound_head(pmd_page(orig));
|
|
- if (!page_cache_add_speculative(head, refs)) {
|
|
+ head = try_get_compound_head(pmd_page(orig), refs);
|
|
+ if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
@@ -1581,8 +1604,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
- head = compound_head(pud_page(orig));
|
|
- if (!page_cache_add_speculative(head, refs)) {
|
|
+ head = try_get_compound_head(pud_page(orig), refs);
|
|
+ if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
@@ -1618,8 +1641,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
|
|
refs++;
|
|
} while (addr += PAGE_SIZE, addr != end);
|
|
|
|
- head = compound_head(pgd_page(orig));
|
|
- if (!page_cache_add_speculative(head, refs)) {
|
|
+ head = try_get_compound_head(pgd_page(orig), refs);
|
|
+ if (!head) {
|
|
*nr -= refs;
|
|
return 0;
|
|
}
|
|
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
|
|
index 9e5f66cbf711..5fb779cda972 100644
|
|
--- a/mm/hugetlb.c
|
|
+++ b/mm/hugetlb.c
|
|
@@ -4299,6 +4299,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
|
|
page = pte_page(huge_ptep_get(pte));
|
|
+
|
|
+ /*
|
|
+ * Instead of doing 'try_get_page()' below in the same_page
|
|
+ * loop, just check the count once here.
|
|
+ */
|
|
+ if (unlikely(page_count(page) <= 0)) {
|
|
+ if (pages) {
|
|
+ spin_unlock(ptl);
|
|
+ remainder = 0;
|
|
+ err = -ENOMEM;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
same_page:
|
|
if (pages) {
|
|
pages[i] = mem_map_offset(page, pfn_offset);
|