Blob Blame History Raw
From: David Hildenbrand <david@redhat.com>
Date: Thu, 10 Aug 2017 23:15:28 +0200
Subject: KVM: VMX: cleanup EPTP definitions
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Patch-mainline: v4.14-rc1
Git-commit: bb97a016937816768ffb8cd7a49d9507e8a1bbda
References: bsc#1077761

Don't use shifts, tag them correctly as EPTP and use better matching
names (PWL vs. GAW).

Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
 arch/x86/include/asm/vmx.h |   11 ++++++-----
 arch/x86/kvm/vmx.c         |   25 +++++++++++--------------
 2 files changed, 17 insertions(+), 19 deletions(-)

--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -468,12 +468,13 @@
 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
 #define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
 
-#define VMX_EPT_DEFAULT_GAW			3
-#define VMX_EPT_MAX_GAW				0x4
 #define VMX_EPT_MT_EPTE_SHIFT			3
-#define VMX_EPT_GAW_EPTP_SHIFT			3
-#define VMX_EPT_AD_ENABLE_BIT			(1ull << 6)
-#define VMX_EPT_DEFAULT_MT			0x6ull
+#define VMX_EPTP_PWL_MASK			0x38ull
+#define VMX_EPTP_PWL_4				0x18ull
+#define VMX_EPTP_AD_ENABLE_BIT			(1ull << 6)
+#define VMX_EPTP_MT_MASK			0x7ull
+#define VMX_EPTP_MT_WB				0x6ull
+#define VMX_EPTP_MT_UC				0x0ull
 #define VMX_EPT_READABLE_MASK			0x1ull
 #define VMX_EPT_WRITABLE_MASK			0x2ull
 #define VMX_EPT_EXECUTABLE_MASK			0x4ull
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4288,14 +4288,12 @@
 
 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
 {
-	u64 eptp;
+	u64 eptp = VMX_EPTP_MT_WB | VMX_EPTP_PWL_4;
 
 	/* TODO write the value reading from MSR */
-	eptp = VMX_EPT_DEFAULT_MT |
-		VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
 	if (enable_ept_ad_bits &&
 	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
-		eptp |= VMX_EPT_AD_ENABLE_BIT;
+		eptp |= VMX_EPTP_AD_ENABLE_BIT;
 	eptp |= (root_hpa & PAGE_MASK);
 
 	return eptp;
@@ -7865,16 +7863,15 @@
 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
-	u64 mask = address & 0x7;
 	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
 	/* Check for memory type validity */
-	switch (mask) {
-	case 0:
+	switch (address & VMX_EPTP_MT_MASK) {
+	case VMX_EPTP_MT_UC:
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT))
 			return false;
 		break;
-	case 6:
+	case VMX_EPTP_MT_WB:
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT))
 			return false;
 		break;
@@ -7882,8 +7879,8 @@
 		return false;
 	}
 
-	/* Bits 5:3 must be 3 */
-	if (((address >> VMX_EPT_GAW_EPTP_SHIFT) & 0x7) != VMX_EPT_DEFAULT_GAW)
+	/* only 4 levels page-walk length are valid */
+	if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
 		return false;
 
 	/* Reserved bits should not be set */
@@ -7891,7 +7888,7 @@
 		return false;
 
 	/* AD, if set, should be supported */
-	if ((address & VMX_EPT_AD_ENABLE_BIT)) {
+	if (address & VMX_EPTP_AD_ENABLE_BIT) {
 		if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT))
 			return false;
 	}
@@ -7919,7 +7916,7 @@
 				     &address, index * 8, 8))
 		return 1;
 
-	accessed_dirty = !!(address & VMX_EPT_AD_ENABLE_BIT);
+	accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
 
 	/*
 	 * If the (L2) guest does a vmfunc to the currently
@@ -9476,7 +9473,7 @@
 
 static int get_ept_level(void)
 {
-	return VMX_EPT_DEFAULT_GAW + 1;
+	return 4;
 }
 
 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
@@ -9677,7 +9674,7 @@
 
 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
 {
-	return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
+	return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
 }
 
 /* Callbacks for nested_ept_init_mmu_context: */