Blob Blame History Raw
From 5ed57eeec83f8e851029df9ebbe679f9d15c30a8 Mon Sep 17 00:00:00 2001
From: Davidlohr Bueso <dbueso@suse.de>
Date: Thu, 17 May 2018 12:17:19 -0700
Subject: [PATCH] kvm: Introduce nopvspin kernel parameter
Patch-mainline: never, SUSE specific
References: bsc#1056427

This adds a is a SLE specific feature to disable paravirtual
spinlocks in favor of the bare metal behavior under specific 1:1
cpu to vcpu mappings.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
---
 Documentation/admin-guide/kernel-parameters.txt |    4 +++
 arch/x86/include/asm/qspinlock.h                |   11 ++++++++-
 arch/x86/kernel/kvm.c                           |   29 ++++++++++++++++++++++++
 arch/x86/kernel/paravirt.c                      |    8 ++++++
 arch/x86/kernel/smpboot.c                       |    2 +
 5 files changed, 53 insertions(+), 1 deletion(-)

--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1899,6 +1899,10 @@
 			feature (tagged TLBs) on capable Intel chips.
 			Default is 1 (enabled)
 
+	kvm_nopvspin	[X86,KVM]
+			Disables the paravirtualized spinlock slowpath
+			optimizations for KVM.
+
 	l1tf=           [X86] Control mitigation of the L1TF vulnerability on
 			      affected CPUs
 
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_QSPINLOCK_H
 #define _ASM_X86_QSPINLOCK_H
 
+#include <linux/jump_label.h>
 #include <asm/cpufeature.h>
 #include <asm-generic/qspinlock_types.h>
 #include <asm/paravirt.h>
@@ -46,10 +47,14 @@ static inline void queued_spin_unlock(st
 #endif
 
 #ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
 #define virt_spin_lock virt_spin_lock
 static inline bool virt_spin_lock(struct qspinlock *lock)
 {
-	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+	if (!static_branch_likely(&virt_spin_lock_key))
 		return false;
 
 	/*
@@ -65,6 +70,10 @@ static inline bool virt_spin_lock(struct
 
 	return true;
 }
+#else
+static inline void native_pv_lock_init(void)
+{
+}
 #endif /* CONFIG_PARAVIRT */
 
 #include <asm-generic/qspinlock.h>
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -47,6 +47,22 @@
 #include <asm/hypervisor.h>
 #include <asm/kvm_guest.h>
 
+/*
+ * SLE-specific.
+ *
+ * Allow disabling of PV spinlock in kernel command line (kernel param).
+ * Similar idea to what Xen does. Upstream, however, uses a different
+ * approach such that hypervisor admins can pass the VM_HINTS_DEDICATED
+ * via qemu.
+ */
+static bool kvm_pvspin = true;
+static __init int kvm_parse_nopvspin(char *arg)
+{
+	kvm_pvspin = false;
+	return 0;
+}
+early_param("kvm_nopvspin", kvm_parse_nopvspin);
+
 static int kvmapf = 1;
 
 static int parse_no_kvmapf(char *arg)
@@ -454,6 +470,13 @@ static void __init sev_map_percpu_data(v
 }
 
 #ifdef CONFIG_SMP
+static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
+{
+	native_smp_prepare_cpus(max_cpus);
+	if (!kvm_pvspin)
+		static_branch_disable(&virt_spin_lock_key);
+}
+
 static void __init kvm_smp_prepare_boot_cpu(void)
 {
 	/*
@@ -524,6 +547,7 @@ void __init kvm_guest_init(void)
 		kvm_setup_vsyscall_timeinfo();
 
 #ifdef CONFIG_SMP
+	smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
@@ -680,6 +704,11 @@ void __init kvm_spinlock_init(void)
 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
 		return;
 
+	if (!kvm_pvspin) {
+		printk(KERN_INFO "KVM: disabled paravirtual spinlock\n");
+		return;
+	}
+
 	__pv_init_lock_hash();
 	pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 	pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -121,6 +121,14 @@ unsigned paravirt_patch_jmp(void *insnbu
 	return 5;
 }
 
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void __init native_pv_lock_init(void)
+{
+	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+		static_branch_disable(&virt_spin_lock_key);
+}
+
 /* Neat trick to map patch type back to the call within the
  * corresponding structure. */
 static void *get_call_destination(u8 type)
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1334,6 +1334,8 @@ void __init native_smp_prepare_cpus(unsi
 	pr_info("CPU0: ");
 	print_cpu_info(&cpu_data(0));
 
+	native_pv_lock_init();
+
 	uv_system_init();
 
 	set_mtrr_aps_delayed_init();