lkml.org 
[lkml]   [2018]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4.16 087/110] x86/bugs: Rename _RDS to _SSBD
Date
4.16-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

commit 9f65fb29374ee37856dbad847b4e121aab72b510 upstream

Intel collateral will reference the SSB mitigation bit in IA32_SPEC_CTL[2]
as SSBD (Speculative Store Bypass Disable).

Hence changing it.

It is unclear yet what the MSR_IA32_ARCH_CAPABILITIES (0x10a) Bit(4) name
is going to be. Following the rename it would be SSBD_NO but that rolls out
to Speculative Store Bypass Disable No.

Also fixed the missing space in X86_FEATURE_AMD_SSBD.

[ tglx: Fixup x86_amd_rds_enable() and rds_tif_to_amd_ls_cfg() as well ]

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/include/asm/cpufeatures.h | 4 ++--
arch/x86/include/asm/msr-index.h | 10 +++++-----
arch/x86/include/asm/spec-ctrl.h | 12 ++++++------
arch/x86/include/asm/thread_info.h | 6 +++---
arch/x86/kernel/cpu/amd.c | 14 +++++++-------
arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++++++++------------------
arch/x86/kernel/cpu/common.c | 2 +-
arch/x86/kernel/cpu/intel.c | 2 +-
arch/x86/kernel/process.c | 8 ++++----
arch/x86/kvm/cpuid.c | 2 +-
arch/x86/kvm/vmx.c | 6 +++---
11 files changed, 51 insertions(+), 51 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -215,7 +215,7 @@
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
-#define X86_FEATURE_AMD_RDS (7*32+24) /* "" AMD RDS implementation */
+#define X86_FEATURE_AMD_SSBD ( 7*32+24) /* "" AMD SSBD implementation */

/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -335,7 +335,7 @@
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
-#define X86_FEATURE_RDS (18*32+31) /* Reduced Data Speculation */
+#define X86_FEATURE_SSBD (18*32+31) /* Speculative Store Bypass Disable */

/*
* BUG word(s)
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,8 +42,8 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS_SHIFT 2 /* Reduced Data Speculation bit */
-#define SPEC_CTRL_RDS (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */

#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
@@ -70,10 +70,10 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
-#define ARCH_CAP_RDS_NO (1 << 4) /*
+#define ARCH_CAP_SSBD_NO (1 << 4) /*
* Not susceptible to Speculative Store Bypass
- * attack, so no Reduced Data Speculation control
- * required.
+ * attack, so no Speculative Store Bypass
+ * control required.
*/

#define MSR_IA32_BBL_CR_CTL 0x00000119
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -17,20 +17,20 @@ extern void x86_spec_ctrl_restore_host(u

/* AMD specific Speculative Store Bypass MSR data */
extern u64 x86_amd_ls_cfg_base;
-extern u64 x86_amd_ls_cfg_rds_mask;
+extern u64 x86_amd_ls_cfg_ssbd_mask;

/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;

-static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
{
- BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
- return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}

-static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
- return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
}

extern void speculative_store_bypass_update(void);
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,7 +79,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_RDS 5 /* Reduced data speculation */
+#define TIF_SSBD 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -106,7 +106,7 @@ struct thread_info {
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
-#define _TIF_RDS (1 << TIF_RDS)
+#define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -146,7 +146,7 @@ struct thread_info {

/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)

#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -567,12 +567,12 @@ static void bsp_init_amd(struct cpuinfo_
}
/*
* Try to cache the base value so further operations can
- * avoid RMW. If that faults, do not enable RDS.
+ * avoid RMW. If that faults, do not enable SSBD.
*/
if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
- setup_force_cpu_cap(X86_FEATURE_RDS);
- setup_force_cpu_cap(X86_FEATURE_AMD_RDS);
- x86_amd_ls_cfg_rds_mask = 1ULL << bit;
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
}
}
}
@@ -920,9 +920,9 @@ static void init_amd(struct cpuinfo_x86
if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);

- if (boot_cpu_has(X86_FEATURE_AMD_RDS)) {
- set_cpu_cap(c, X86_FEATURE_RDS);
- set_cpu_cap(c, X86_FEATURE_AMD_RDS);
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+ set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
}
}

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -45,10 +45,10 @@ static u64 __ro_after_init x86_spec_ctrl

/*
* AMD specific MSR info for Speculative Store Bypass control.
- * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
*/
u64 __ro_after_init x86_amd_ls_cfg_base;
-u64 __ro_after_init x86_amd_ls_cfg_rds_mask;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;

void __init check_bugs(void)
{
@@ -146,7 +146,7 @@ u64 x86_spec_ctrl_get_default(void)
u64 msrval = x86_spec_ctrl_base;

if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval;
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
@@ -159,7 +159,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s
return;

if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);

if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
@@ -174,18 +174,18 @@ void x86_spec_ctrl_restore_host(u64 gues
return;

if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+ host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);

if (host != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, host);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);

-static void x86_amd_rds_enable(void)
+static void x86_amd_ssb_disable(void)
{
- u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;

- if (boot_cpu_has(X86_FEATURE_AMD_RDS))
+ if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}

@@ -473,7 +473,7 @@ static enum ssb_mitigation_cmd __init __
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;

- if (!boot_cpu_has(X86_FEATURE_RDS))
+ if (!boot_cpu_has(X86_FEATURE_SSBD))
return mode;

cmd = ssb_parse_cmdline();
@@ -507,7 +507,7 @@ static enum ssb_mitigation_cmd __init __
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
- * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
if (mode == SPEC_STORE_BYPASS_DISABLE) {
@@ -518,12 +518,12 @@ static enum ssb_mitigation_cmd __init __
*/
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
- x86_spec_ctrl_base |= SPEC_CTRL_RDS;
- x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
- x86_spec_ctrl_set(SPEC_CTRL_RDS);
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+ x86_spec_ctrl_set(SPEC_CTRL_SSBD);
break;
case X86_VENDOR_AMD:
- x86_amd_rds_enable();
+ x86_amd_ssb_disable();
break;
}
}
@@ -556,16 +556,16 @@ static int ssb_prctl_set(struct task_str
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
- update = test_and_clear_tsk_thread_flag(task, TIF_RDS);
+ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
break;
case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break;
case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_RDS);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
break;
default:
return -ERANGE;
@@ -635,7 +635,7 @@ void x86_spec_ctrl_setup_ap(void)
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);

if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
- x86_amd_rds_enable();
+ x86_amd_ssb_disable();
}

#ifdef CONFIG_SYSFS
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -950,7 +950,7 @@ static void __init cpu_set_bug_bits(stru
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);

if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_RDS_NO))
+ !(ia32_cap & ARCH_CAP_SSBD_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);

if (x86_match_cpu(cpu_no_speculation))
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -189,7 +189,7 @@ static void early_init_intel(struct cpui
setup_clear_cpu_cap(X86_FEATURE_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
- setup_clear_cpu_cap(X86_FEATURE_RDS);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
}

/*
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -283,11 +283,11 @@ static __always_inline void __speculativ
{
u64 msr;

- if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
- msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+ if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
wrmsrl(MSR_AMD64_LS_CFG, msr);
} else {
- msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+ msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
}
@@ -329,7 +329,7 @@ void __switch_to_xtra(struct task_struct
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));

- if ((tifp ^ tifn) & _TIF_RDS)
+ if ((tifp ^ tifn) & _TIF_SSBD)
__speculative_store_bypass_update(tifn);
}

--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct

/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
- F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(RDS) |
+ F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
F(ARCH_CAPABILITIES);

/* all calls to cpuid_count() should be made on the same cpu */
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3277,7 +3277,7 @@ static int vmx_get_msr(struct kvm_vcpu *
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
return 1;

msr_info->data = to_vmx(vcpu)->spec_ctrl;
@@ -3399,11 +3399,11 @@ static int vmx_set_msr(struct kvm_vcpu *
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDS))
+ !guest_cpuid_has(vcpu, X86_FEATURE_SSBD))
return 1;

/* The STIBP bit doesn't fault even if it's not advertised */
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_RDS))
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
return 1;

vmx->spec_ctrl = data;

\
 
 \ /
  Last update: 2018-05-21 23:35    [W:0.412 / U:0.456 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site