144 lines
5.3 KiB
Diff
144 lines
5.3 KiB
Diff
From foo@baz Mon May 21 21:56:07 CEST 2018
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Thu, 10 May 2018 19:13:18 +0200
|
|
Subject: x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
|
|
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream
|
|
|
|
The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
|
|
Intel and implied by IBRS or STIBP support on AMD. That's just confusing
|
|
and in case an AMD CPU has IBRS not supported because the underlying
|
|
problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
|
|
the thing falls apart.
|
|
|
|
Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
|
|
availability on both Intel and AMD.
|
|
|
|
While at it replace the boot_cpu_has() checks with static_cpu_has() where
|
|
possible. This prevents late microcode loading from exposing SPEC_CTRL, but
|
|
late loading is already very limited as it does not reevaluate the
|
|
mitigation options and other bits and pieces. Having static_cpu_has() is
|
|
the simplest and least fragile solution.
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Reviewed-by: Borislav Petkov <bp@suse.de>
|
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
---
|
|
arch/x86/include/asm/cpufeatures.h | 1 +
|
|
arch/x86/kernel/cpu/bugs.c | 18 +++++++++++-------
|
|
arch/x86/kernel/cpu/common.c | 9 +++++++--
|
|
arch/x86/kernel/cpu/intel.c | 1 +
|
|
4 files changed, 20 insertions(+), 9 deletions(-)
|
|
|
|
--- a/arch/x86/include/asm/cpufeatures.h
|
|
+++ b/arch/x86/include/asm/cpufeatures.h
|
|
@@ -206,6 +206,7 @@
|
|
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
|
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
|
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
|
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
|
|
|
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
|
--- a/arch/x86/kernel/cpu/bugs.c
|
|
+++ b/arch/x86/kernel/cpu/bugs.c
|
|
@@ -64,7 +64,7 @@ void __init check_bugs(void)
|
|
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
|
|
* init code as it is not enumerated and depends on the family.
|
|
*/
|
|
- if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
|
|
|
/* Select the proper spectre mitigation before patching alternatives */
|
|
@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void)
|
|
{
|
|
u64 msrval = x86_spec_ctrl_base;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
return msrval;
|
|
}
|
|
@@ -155,10 +155,12 @@ void x86_spec_ctrl_set_guest(u64 guest_s
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ /* Is MSR_SPEC_CTRL implemented ? */
|
|
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -170,10 +172,12 @@ void x86_spec_ctrl_restore_host(u64 gues
|
|
{
|
|
u64 host = x86_spec_ctrl_base;
|
|
|
|
- if (!boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ /* Is MSR_SPEC_CTRL implemented ? */
|
|
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
return;
|
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
+ /* Intel controls SSB in MSR_SPEC_CTRL */
|
|
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
|
|
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
|
|
|
|
if (host != guest_spec_ctrl)
|
|
@@ -631,7 +635,7 @@ int arch_prctl_spec_ctrl_get(struct task
|
|
|
|
void x86_spec_ctrl_setup_ap(void)
|
|
{
|
|
- if (boot_cpu_has(X86_FEATURE_IBRS))
|
|
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
|
|
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
|
|
|
|
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
|
|
--- a/arch/x86/kernel/cpu/common.c
|
|
+++ b/arch/x86/kernel/cpu/common.c
|
|
@@ -761,19 +761,24 @@ static void init_speculation_control(str
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
}
|
|
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
|
|
- if (cpu_has(c, X86_FEATURE_AMD_IBRS))
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
+ }
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
|
|
- if (cpu_has(c, X86_FEATURE_AMD_STIBP))
|
|
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
|
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
+ }
|
|
}
|
|
|
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
|
--- a/arch/x86/kernel/cpu/intel.c
|
|
+++ b/arch/x86/kernel/cpu/intel.c
|
|
@@ -188,6 +188,7 @@ static void early_init_intel(struct cpui
|
|
setup_clear_cpu_cap(X86_FEATURE_IBPB);
|
|
setup_clear_cpu_cap(X86_FEATURE_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
|
|
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
|
|
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
|
|
setup_clear_cpu_cap(X86_FEATURE_SSBD);
|
|
}
|