Blob Blame History Raw
From: Joerg Roedel <jroedel@suse.de>
Date: Mon, 21 Mar 2022 10:33:51 +0100
Subject: x86/sev: Unroll string mmio with CC_ATTR_GUEST_UNROLL_STRING_IO
Git-commit: 4009a4ac82dd95b8cd2b62bd30019476983f0aff
Patch-mainline: v5.18-rc1
References: git-fixes

The io-specific memcpy/memset functions use string mmio accesses to do
their work. Under SEV, the hypervisor can't emulate these instructions
because they read/write directly from/to encrypted memory.

KVM will inject a page fault exception into the guest when it is asked
to emulate string mmio instructions for an SEV guest:

  BUG: unable to handle page fault for address: ffffc90000065068
  #PF: supervisor read access in kernel mode
  #PF: error_code(0x0000) - not-present page
  PGD 8000100000067 P4D 8000100000067 PUD 80001000fb067 PMD 80001000fc067 PTE 80000000fed40173
  Oops: 0000 [#1] PREEMPT SMP NOPTI
  CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.17.0-rc7 #3

As string mmio for an SEV guest can not be supported by the
hypervisor, unroll the instructions for CC_ATTR_GUEST_UNROLL_STRING_IO
enabled kernels.

This issue appears when kernels are launched in recent libvirt-managed
SEV virtual machines, because virt-install started to add a tpm-crb
device to the guest by default and proactively because, raisins:

  https://github.com/virt-manager/virt-manager/commit/eb58c09f488b0633ed1eea012cd311e48864401e

and as that commit says, the default adding of a TPM can be disabled
with "virt-install ... --tpm none".

The kernel driver for tpm-crb uses memcpy_to/from_io() functions to
access MMIO memory, resulting in a page-fault injected by KVM and
crashing the kernel at boot.

  [ bp: Massage and extend commit message. ]

Fixes: d8aa7eea78a1 ('x86/mm: Add Secure Encrypted Virtualization (SEV) support')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Cc: <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/20220321093351.23976-1-joro@8bytes.org
---
 arch/x86/include/asm/io.h |   62 +++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 59 insertions(+), 3 deletions(-)

--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -209,6 +209,8 @@ extern void set_iounmap_nonlazy(void);
  */
 #define xlate_dev_kmem_ptr(p)	p
 
+static inline bool sev_key_active(void);
+
 /**
  * memset_io	Set a range of I/O memory to a constant value
  * @addr:	The beginning of the I/O-memory range to set
@@ -218,9 +220,23 @@ extern void set_iounmap_nonlazy(void);
  * Set a range of I/O memory to a given value.
  */
 static inline void
+unrolled_memset_io(volatile void __iomem *addr, int val, size_t count)
+{
+	volatile char __iomem *mem = addr;
+	int i;
+
+	for (i = 0; i < count; ++i)
+		writeb(val, &mem[i]);
+}
+
+static inline void
 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
 {
-	memset((void __force *)addr, val, count);
+	if (sev_key_active()) {
+		unrolled_memset_io(addr, val, count);
+	} else {
+		memset((void __force *)addr, val, count);
+	}
 }
 
 /**
@@ -232,11 +248,31 @@ memset_io(volatile void __iomem *addr, u
  * Copy a block of data from I/O memory.
  */
 static inline void
-memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
+unrolled_memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
+{
+	const volatile char __iomem *in = src;
+	char *out = dst;
+	int i;
+
+	for (i = 0; i < count; ++i)
+		out[i] = readb(&in[i]);
+}
+
+static inline void
+string_memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
 {
 	memcpy(dst, (const void __force *)src, count);
 }
 
+static inline void
+memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
+{
+	if (sev_key_active())
+		unrolled_memcpy_fromio(dst, src, count);
+	else
+		string_memcpy_fromio(dst, src, count);
+}
+
 /**
  * memcpy_toio		Copy a block of data into I/O memory
  * @dst:		The (I/O memory) destination for the copy
@@ -246,11 +282,31 @@ memcpy_fromio(void *dst, const volatile
  * Copy a block of data to I/O memory.
  */
 static inline void
-memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
+unrolled_memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
+{
+	volatile char __iomem *out = dst;
+	const char *in = src;
+	int i;
+
+	for (i = 0; i < count; ++i)
+		writeb(in[i], &out[i]);
+}
+
+static inline void
+string_memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
 {
 	memcpy((void __force *)dst, src, count);
 }
 
+static inline void
+memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
+{
+	if (sev_key_active())
+		unrolled_memcpy_toio(dst, src, count);
+	else
+		string_memcpy_toio(dst, src, count);
+}
+
 /*
  * ISA space is 'always mapped' on a typical x86 system, no need to
  * explicitly ioremap() it. The fact that the ISA IO space is mapped