From c441e50d54c697c6b5e309efd9de3740972a6975 Mon Sep 17 00:00:00 2001
From: Paolo Bonzini <pbonzini@redhat.com>
Date: Mon, 2 Jul 2018 13:07:14 +0200
Subject: [PATCH 35/40] x86/KVM/VMX: Add L1D flush logic
Patch-mainline: v4.19-rc1
Git-commit: c595ceee45707f00f64f61c54fb64ef0cc0b4e85
References: bsc#1089343 CVE-2018-3646
commit c595ceee45707f00f64f61c54fb64ef0cc0b4e85 upstream
Add the logic for flushing L1D on VMENTER. The flush depends on the static
key being enabled and the new l1tf_flush_l1d flag being set.
The flags is set:
- Always, if the flush module parameter is 'always'
- Conditionally at:
- Entry to vcpu_run(), i.e. after executing user space
- From the sched_in notifier, i.e. when switching to a vCPU thread.
- From vmexit handlers which are considered unsafe, i.e. where
sensitive data can be brought into L1D:
- The emulator, which could be a good target for other speculative
execution-based threats,
- The MMU, which can bring host page tables in the L1 cache.
- External interrupts
- Nested operations that require the MMU (see above). That is
vmptrld, vmptrst, vmclear,vmwrite,vmread.
- When handling invept,invvpid
[ tglx: Split out from combo patch and reduced to a single flag ]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kvm/mmu.c | 1 +
arch/x86/kvm/vmx.c | 23 ++++++++++++++++++++++-
arch/x86/kvm/x86.c | 8 ++++++++
4 files changed, 35 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -706,6 +706,9 @@ struct kvm_vcpu_arch {
/* be preempted when it's in kernel-mode(cpl=0) */
bool preempted_in_kernel;
+
+ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
+ bool l1tf_flush_l1d;
};
struct kvm_lpage_info {
@@ -886,6 +889,7 @@ struct kvm_vcpu_stat {
u64 signal_exits;
u64 irq_window_exits;
u64 nmi_window_exits;
+ u64 l1d_flush;
u64 halt_exits;
u64 halt_successful_poll;
u64 halt_attempted_poll;
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3836,6 +3836,7 @@ int kvm_handle_page_fault(struct kvm_vcp
{
int r = 1;
+ vcpu->arch.l1tf_flush_l1d = true;
switch (vcpu->arch.apf.host_apf_reason) {
default:
trace_kvm_page_fault(fault_address, error_code);
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9001,9 +9001,20 @@ static int vmx_handle_exit(struct kvm_vc
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
-static void __maybe_unused vmx_l1d_flush(void)
+static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;
+ bool always;
+
+ /*
+ * If the mitigation mode is 'flush always', keep the flush bit
+ * set, otherwise clear it. It gets set again either from
+ * vcpu_run() or from one of the unsafe VMEXIT handlers.
+ */
+ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
@@ -9274,6 +9285,7 @@ static void vmx_handle_external_intr(str
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
+ vcpu->arch.l1tf_flush_l1d = true;
}
}
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
@@ -9528,6 +9540,12 @@ static void __noclone vmx_vcpu_run(struc
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
vmx->__launched = vmx->loaded_vmcs->launched;
+
+ if (static_branch_unlikely(&vmx_l1d_should_flush)) {
+ if (vcpu->arch.l1tf_flush_l1d)
+ vmx_l1d_flush(vcpu);
+ }
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -11241,6 +11259,9 @@ static int nested_vmx_run(struct kvm_vcp
return 1;
}
+ /* Hide L1D cache contents from the nested guest. */
+ vmx->vcpu.arch.l1tf_flush_l1d = true;
+
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -184,6 +184,7 @@ struct kvm_stats_debugfs_item debugfs_en
{ "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "req_event", VCPU_STAT(req_event) },
+ { "l1d_flush", VCPU_STAT(l1d_flush) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
@@ -4594,6 +4595,9 @@ static int kvm_write_guest_virt_helper(g
void *data = val;
int r = X86EMUL_CONTINUE;
+ /* kvm_write_guest_virt_system can pull in tons of pages. */
+ vcpu->arch.l1tf_flush_l1d = true;
+
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
access,
@@ -5756,6 +5760,8 @@ int x86_emulate_instruction(struct kvm_v
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+ vcpu->arch.l1tf_flush_l1d = true;
+
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
@@ -7204,6 +7210,7 @@ static int vcpu_run(struct kvm_vcpu *vcp
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
if (kvm_vcpu_running(vcpu)) {
@@ -8217,6 +8224,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
+ vcpu->arch.l1tf_flush_l1d = true;
kvm_x86_ops->sched_in(vcpu, cpu);
}