71 lines
2.4 KiB
Diff
71 lines
2.4 KiB
Diff
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
Date: Sun, 23 Jun 2019 19:15:49 +0200
|
|
Subject: KVM: x86: remove now unneeded hugepage gfn adjustment
|
|
|
|
commit d679b32611c0102ce33b9e1a4e4b94854ed1812a upstream
|
|
|
|
After the previous patch, the low bits of the gfn are masked in
|
|
both FNAME(fetch) and __direct_map, so we do not need to clear them
|
|
in transparent_hugepage_adjust.
|
|
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/x86/kvm/mmu.c | 9 +++------
|
|
arch/x86/kvm/paging_tmpl.h | 2 +-
|
|
2 files changed, 4 insertions(+), 7 deletions(-)
|
|
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -3155,11 +3155,10 @@ static int kvm_handle_bad_page(struct kv
|
|
}
|
|
|
|
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
|
- gfn_t *gfnp, kvm_pfn_t *pfnp,
|
|
+ gfn_t gfn, kvm_pfn_t *pfnp,
|
|
int *levelp)
|
|
{
|
|
kvm_pfn_t pfn = *pfnp;
|
|
- gfn_t gfn = *gfnp;
|
|
int level = *levelp;
|
|
|
|
/*
|
|
@@ -3186,8 +3185,6 @@ static void transparent_hugepage_adjust(
|
|
mask = KVM_PAGES_PER_HPAGE(level) - 1;
|
|
VM_BUG_ON((gfn & mask) != (pfn & mask));
|
|
if (pfn & mask) {
|
|
- gfn &= ~mask;
|
|
- *gfnp = gfn;
|
|
kvm_release_pfn_clean(pfn);
|
|
pfn &= ~mask;
|
|
kvm_get_pfn(pfn);
|
|
@@ -3451,7 +3448,7 @@ static int nonpaging_map(struct kvm_vcpu
|
|
if (make_mmu_pages_available(vcpu) < 0)
|
|
goto out_unlock;
|
|
if (likely(!force_pt_level))
|
|
- transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
|
|
+ transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
|
r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
|
|
out_unlock:
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
@@ -4085,7 +4082,7 @@ static int tdp_page_fault(struct kvm_vcp
|
|
if (make_mmu_pages_available(vcpu) < 0)
|
|
goto out_unlock;
|
|
if (likely(!force_pt_level))
|
|
- transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
|
|
+ transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
|
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
|
|
out_unlock:
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
--- a/arch/x86/kvm/paging_tmpl.h
|
|
+++ b/arch/x86/kvm/paging_tmpl.h
|
|
@@ -829,7 +829,7 @@ static int FNAME(page_fault)(struct kvm_
|
|
if (make_mmu_pages_available(vcpu) < 0)
|
|
goto out_unlock;
|
|
if (!force_pt_level)
|
|
- transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
|
|
+ transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
|
|
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
|
|
level, pfn, map_writable, prefault);
|
|
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
|