2019-11-07 16:10:09 +00:00
|
|
|
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
|
|
Date: Sun, 27 Oct 2019 16:23:23 +0100
|
|
|
|
Subject: KVM: vmx, svm: always run with EFER.NXE=1 when shadow paging is
|
|
|
|
active
|
2019-11-07 18:04:08 +00:00
|
|
|
|
|
|
|
commit 9167ab79936206118cc60e47dcb926c3489f3bd5 upstream.
|
2019-11-07 16:10:09 +00:00
|
|
|
|
|
|
|
VMX already does so if the host has SMEP, in order to support the combination of
|
|
|
|
CR0.WP=1 and CR4.SMEP=1. However, it is perfectly safe to always do so, and in
|
|
|
|
fact VMX already ends up running with EFER.NXE=1 on old processors that lack the
|
|
|
|
"load EFER" controls, because it may help avoiding a slow MSR write. Removing
|
|
|
|
all the conditionals simplifies the code.
|
|
|
|
|
|
|
|
SVM does not have similar code, but it should since recent AMD processors do
|
|
|
|
support SMEP. So this patch also makes the code for the two vendors more similar
|
|
|
|
while fixing NPT=0, CR0.WP=1 and CR4.SMEP=1 on AMD processors.
|
|
|
|
|
|
|
|
Cc: Joerg Roedel <jroedel@suse.de>
|
|
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
2019-11-07 18:04:08 +00:00
|
|
|
[bwh: Backported to 4.19: adjust filename]
|
|
|
|
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
|
2019-11-07 16:10:09 +00:00
|
|
|
---
|
2019-11-07 18:04:08 +00:00
|
|
|
arch/x86/kvm/svm.c | 10 ++++++++--
|
|
|
|
arch/x86/kvm/vmx.c | 14 +++-----------
|
2019-11-07 16:10:09 +00:00
|
|
|
2 files changed, 11 insertions(+), 13 deletions(-)
|
|
|
|
|
|
|
|
--- a/arch/x86/kvm/svm.c
|
|
|
|
+++ b/arch/x86/kvm/svm.c
|
|
|
|
@@ -736,8 +736,14 @@ static int get_npt_level(struct kvm_vcpu
|
|
|
|
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|
|
|
{
|
|
|
|
vcpu->arch.efer = efer;
|
|
|
|
- if (!npt_enabled && !(efer & EFER_LMA))
|
|
|
|
- efer &= ~EFER_LME;
|
|
|
|
+
|
|
|
|
+ if (!npt_enabled) {
|
|
|
|
+ /* Shadow paging assumes NX to be available. */
|
|
|
|
+ efer |= EFER_NX;
|
|
|
|
+
|
|
|
|
+ if (!(efer & EFER_LMA))
|
|
|
|
+ efer &= ~EFER_LME;
|
|
|
|
+ }
|
|
|
|
|
|
|
|
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
|
|
|
|
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
|
|
|
|
--- a/arch/x86/kvm/vmx.c
|
|
|
|
+++ b/arch/x86/kvm/vmx.c
|
|
|
|
@@ -2785,17 +2785,9 @@ static bool update_transition_efer(struc
|
|
|
|
u64 guest_efer = vmx->vcpu.arch.efer;
|
|
|
|
u64 ignore_bits = 0;
|
|
|
|
|
|
|
|
- if (!enable_ept) {
|
|
|
|
- /*
|
|
|
|
- * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
|
|
|
|
- * host CPUID is more efficient than testing guest CPUID
|
|
|
|
- * or CR4. Host SMEP is anyway a requirement for guest SMEP.
|
|
|
|
- */
|
|
|
|
- if (boot_cpu_has(X86_FEATURE_SMEP))
|
|
|
|
- guest_efer |= EFER_NX;
|
|
|
|
- else if (!(guest_efer & EFER_NX))
|
|
|
|
- ignore_bits |= EFER_NX;
|
|
|
|
- }
|
|
|
|
+ /* Shadow paging assumes NX to be available. */
|
|
|
|
+ if (!enable_ept)
|
|
|
|
+ guest_efer |= EFER_NX;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* LMA and LME handled by hardware; SCE meaningless outside long mode.
|