Messages in this thread Patch in this message |  | | Date | Thu, 18 Jan 2018 14:48:34 +0100 | From | Peter Zijlstra <> | From | Peter Zijlstra <> | Subject | [PATCH 34/35] x86/kvm: Add IBPB support |
| |
From: Ashok Raj <ashok.raj@intel.com>
Add MSR passthrough for MSR_IA32_PRED_CMD and place branch predictor barriers on switching between VMs to avoid inter VM specte-v2 attacks.
[peterz: rebase and changelog rewrite]
Cc: Asit Mallick <asit.k.mallick@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: David Woodhouse <dwmw@amazon.co.uk> Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/kvm/svm.c | 8 ++++++++ arch/x86/kvm/vmx.c | 8 ++++++++ 2 files changed, 16 insertions(+)
--- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -252,6 +252,7 @@ static const struct svm_direct_access_ms { .index = MSR_SYSCALL_MASK, .always = true }, #endif { .index = MSR_IA32_SPEC_CTRL, .always = true }, + { .index = MSR_IA32_PRED_CMD, .always = true }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -532,6 +533,7 @@ struct svm_cpu_data { struct kvm_ldttss_desc *tss_desc; struct page *save_area; + struct vmcb *current_vmcb; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -1709,11 +1711,13 @@ static void svm_free_vcpu(struct kvm_vcp __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, svm); + indirect_branch_prediction_barrier(); } static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_svm *svm = to_svm(vcpu); + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); int i; if (unlikely(cpu != vcpu->cpu)) { @@ -1742,6 +1746,10 @@ static void svm_vcpu_load(struct kvm_vcp if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); + if (sd->current_vmcb != svm->vmcb) { + sd->current_vmcb = svm->vmcb; + indirect_branch_prediction_barrier(); + } avic_vcpu_load(vcpu, cpu); } --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2280,6 +2280,7 @@ static void vmx_vcpu_load(struct kvm_vcp if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); + indirect_branch_prediction_barrier(); } if (!already_loaded) { @@ -3837,6 +3838,11 @@ static void free_loaded_vmcs(struct load free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; WARN_ON(loaded_vmcs->shadow_vmcs != NULL); + /* + * The VMCS could be recycled, causing a false negative in vmx_vcpu_load + * block speculative execution. + */ + indirect_branch_prediction_barrier(); } static void free_kvm_area(void) @@ -6804,6 +6810,8 @@ static __init int hardware_setup(void) */ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL)) vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false); + if (boot_cpu_has(X86_FEATURE_IBPB)) + vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false); vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
|  |