170 lines
6.2 KiB
Diff
170 lines
6.2 KiB
Diff
From bb9d4a24ba55d1487a34d287c6b940ce00b85822 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Wed, 27 Feb 2019 10:10:23 +0100
|
|
Subject: [PATCH 05/30] x86/speculation: Consolidate CPU whitelists
|
|
|
|
commit 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d upstream
|
|
|
|
The CPU vulnerability whitelists have some overlap and there are more
|
|
whitelists coming along.
|
|
|
|
Use the driver_data field in the x86_cpu_id struct to denote the
|
|
whitelisted vulnerabilities and combine all whitelists into one.
|
|
|
|
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
|
|
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Jon Masters <jcm@redhat.com>
|
|
Tested-by: Jon Masters <jcm@redhat.com>
|
|
---
|
|
arch/x86/kernel/cpu/common.c | 105 +++++++++++++++++++----------------
|
|
1 file changed, 56 insertions(+), 49 deletions(-)
|
|
|
|
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
|
|
index 10e5ccfa9278..fd16b4cc991f 100644
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -948,60 +948,68 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|
#endif
|
|
}
|
|
|
|
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
|
|
- { X86_VENDOR_CENTAUR, 5 },
|
|
- { X86_VENDOR_INTEL, 5 },
|
|
- { X86_VENDOR_NSC, 5 },
|
|
- { X86_VENDOR_ANY, 4 },
|
|
+#define NO_SPECULATION BIT(0)
|
|
+#define NO_MELTDOWN BIT(1)
|
|
+#define NO_SSB BIT(2)
|
|
+#define NO_L1TF BIT(3)
|
|
+
|
|
+#define VULNWL(_vendor, _family, _model, _whitelist) \
|
|
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
|
+
|
|
+#define VULNWL_INTEL(model, whitelist) \
|
|
+ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
|
|
+
|
|
+#define VULNWL_AMD(family, whitelist) \
|
|
+ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
|
|
+
|
|
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|
+ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
|
|
+ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
+
|
|
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
|
|
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
|
|
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
|
|
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
|
|
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
|
|
+
|
|
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
|
|
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
|
|
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
|
|
+
|
|
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
|
+
|
|
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
|
|
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
|
|
+
|
|
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
|
|
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
|
|
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
|
|
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
|
|
+
|
|
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
|
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
|
|
{}
|
|
};
|
|
|
|
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
|
|
- { X86_VENDOR_AMD },
|
|
- {}
|
|
-};
|
|
-
|
|
-/* Only list CPUs which speculate but are non susceptible to SSB */
|
|
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
- { X86_VENDOR_AMD, 0x12, },
|
|
- { X86_VENDOR_AMD, 0x11, },
|
|
- { X86_VENDOR_AMD, 0x10, },
|
|
- { X86_VENDOR_AMD, 0xf, },
|
|
- {}
|
|
-};
|
|
+static bool __init cpu_matches(unsigned long which)
|
|
+{
|
|
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
|
|
|
|
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
|
- /* in addition to cpu_no_speculation */
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
|
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
|
- {}
|
|
-};
|
|
+ return m && !!(m->driver_data & which);
|
|
+}
|
|
|
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
{
|
|
u64 ia32_cap = 0;
|
|
|
|
- if (x86_match_cpu(cpu_no_speculation))
|
|
+ if (cpu_matches(NO_SPECULATION))
|
|
return;
|
|
|
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
|
@@ -1010,15 +1018,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
|
|
|
- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
|
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
|
|
+ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
|
|
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
if (ia32_cap & ARCH_CAP_IBRS_ALL)
|
|
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
|
|
|
- if (x86_match_cpu(cpu_no_meltdown))
|
|
+ if (cpu_matches(NO_MELTDOWN))
|
|
return;
|
|
|
|
/* Rogue Data Cache Load? No! */
|
|
@@ -1027,7 +1034,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
|
|
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
|
|
|
- if (x86_match_cpu(cpu_no_l1tf))
|
|
+ if (cpu_matches(NO_L1TF))
|
|
return;
|
|
|
|
setup_force_cpu_bug(X86_BUG_L1TF);
|