149 lines
4.5 KiB
Diff
149 lines
4.5 KiB
Diff
From: Paolo Bonzini <pbonzini@redhat.com>
|
|
Date: Mon, 1 Jul 2019 06:22:57 -0400
|
|
Subject: KVM: x86: add tracepoints around __direct_map and FNAME(fetch)
|
|
|
|
commit 335e192a3fa415e1202c8b9ecdaaecd643f823cc upstream.
|
|
|
|
These are useful in debugging shadow paging.
|
|
|
|
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
[bwh: Backported to 4.19: adjust context]
|
|
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
|
|
---
|
|
arch/x86/kvm/mmu.c | 13 ++++-----
|
|
arch/x86/kvm/mmutrace.h | 59 ++++++++++++++++++++++++++++++++++++++
|
|
arch/x86/kvm/paging_tmpl.h | 2 ++
|
|
3 files changed, 67 insertions(+), 7 deletions(-)
|
|
|
|
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
index 68fa10d890ee..7f9be921df7c 100644
|
|
--- a/arch/x86/kvm/mmu.c
|
|
+++ b/arch/x86/kvm/mmu.c
|
|
@@ -140,9 +140,6 @@ module_param(dbg, bool, 0644);
|
|
|
|
#include <trace/events/kvm.h>
|
|
|
|
-#define CREATE_TRACE_POINTS
|
|
-#include "mmutrace.h"
|
|
-
|
|
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
|
|
#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
|
|
|
|
@@ -261,9 +258,13 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
|
|
|
|
|
static void mmu_spte_set(u64 *sptep, u64 spte);
|
|
+static bool is_executable_pte(u64 spte);
|
|
static union kvm_mmu_page_role
|
|
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
+#include "mmutrace.h"
|
|
+
|
|
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
|
|
{
|
|
BUG_ON((mmio_mask & mmio_value) != mmio_value);
|
|
@@ -2992,10 +2993,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
|
ret = RET_PF_EMULATE;
|
|
|
|
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
|
|
- pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
|
|
- is_large_pte(*sptep)? "2MB" : "4kB",
|
|
- *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn,
|
|
- *sptep, sptep);
|
|
+ trace_kvm_mmu_set_spte(level, gfn, sptep);
|
|
if (!was_rmapped && is_large_pte(*sptep))
|
|
++vcpu->kvm->stat.lpages;
|
|
|
|
@@ -3106,6 +3104,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
|
|
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
return RET_PF_RETRY;
|
|
|
|
+ trace_kvm_mmu_spte_requested(gpa, level, pfn);
|
|
for_each_shadow_entry(vcpu, gpa, it) {
|
|
base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|
|
if (it.level == level)
|
|
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
|
|
index c73bf4e4988c..918b0d5bf272 100644
|
|
--- a/arch/x86/kvm/mmutrace.h
|
|
+++ b/arch/x86/kvm/mmutrace.h
|
|
@@ -325,6 +325,65 @@ TRACE_EVENT(
|
|
__entry->kvm_gen == __entry->spte_gen
|
|
)
|
|
);
|
|
+
|
|
+TRACE_EVENT(
|
|
+ kvm_mmu_set_spte,
|
|
+ TP_PROTO(int level, gfn_t gfn, u64 *sptep),
|
|
+ TP_ARGS(level, gfn, sptep),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(u64, gfn)
|
|
+ __field(u64, spte)
|
|
+ __field(u64, sptep)
|
|
+ __field(u8, level)
|
|
+ /* These depend on page entry type, so compute them now. */
|
|
+ __field(bool, r)
|
|
+ __field(bool, x)
|
|
+ __field(u8, u)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->gfn = gfn;
|
|
+ __entry->spte = *sptep;
|
|
+ __entry->sptep = virt_to_phys(sptep);
|
|
+ __entry->level = level;
|
|
+ __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
|
|
+ __entry->x = is_executable_pte(__entry->spte);
|
|
+ __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
|
|
+ ),
|
|
+
|
|
+ TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
|
|
+ __entry->gfn, __entry->spte,
|
|
+ __entry->r ? "r" : "-",
|
|
+ __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
|
|
+ __entry->x ? "x" : "-",
|
|
+ __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
|
|
+ __entry->level, __entry->sptep
|
|
+ )
|
|
+);
|
|
+
|
|
+TRACE_EVENT(
|
|
+ kvm_mmu_spte_requested,
|
|
+ TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
|
|
+ TP_ARGS(addr, level, pfn),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(u64, gfn)
|
|
+ __field(u64, pfn)
|
|
+ __field(u8, level)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->gfn = addr >> PAGE_SHIFT;
|
|
+ __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
|
|
+ __entry->level = level;
|
|
+ ),
|
|
+
|
|
+ TP_printk("gfn %llx pfn %llx level %d",
|
|
+ __entry->gfn, __entry->pfn, __entry->level
|
|
+ )
|
|
+);
|
|
+
|
|
#endif /* _TRACE_KVMMMU_H */
|
|
|
|
#undef TRACE_INCLUDE_PATH
|
|
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
|
index 4aab953f1d31..3b022b08b577 100644
|
|
--- a/arch/x86/kvm/paging_tmpl.h
|
|
+++ b/arch/x86/kvm/paging_tmpl.h
|
|
@@ -649,6 +649,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
|
|
|
base_gfn = gw->gfn;
|
|
|
|
+ trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
|
|
+
|
|
for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
|
|
clear_sp_write_flooding_count(it.sptep);
|
|
base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
|