Blob Blame History Raw
From: Halil Pasic <pasic@linux.ibm.com>
Date: Thu, 13 Sep 2018 18:57:16 +0200
Subject: s390/mm: force swiotlb for protected virtualization
Git-commit: 64e1f0c531d1072cd97939bf0d8df42b26713543
Patch-mainline: v5.2-rc1
References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150

On s390, protected virtualization guests have to use bounced I/O
buffers.  That requires some plumbing.

Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.

[ ptesarik: config SWIOTLB was added to arch/s390/Kconfig, because
  SLE15-SP1 does not contain commit
  09230cbc1baba68e0ca1e7c489344ce5d35c6f27, which itself depends on
  commit f21254cdd147d703ed9b79382cab8aff5a966397, but the latter
  would change CONFIG_SWIOTLB for armv7hl in Leap 15.1. ]
[ ptesarik: s390_pv_dma_ops is implemented in arch-specific code;
  commit 55897af63091ebc2c3f239c6a6666f748113ac50 looks too intrusive
  for a backport. ]

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
Tested-by: Michael Mueller <mimu@linux.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 arch/s390/Kconfig                   |    7 ++
 arch/s390/include/asm/dma-mapping.h |    3 -
 arch/s390/include/asm/mem_encrypt.h |   17 ++++++
 arch/s390/mm/init.c                 |   98 ++++++++++++++++++++++++++++++++++++
 4 files changed, 124 insertions(+), 1 deletion(-)

--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,3 +1,6 @@
+config ARCH_HAS_MEM_ENCRYPT
+        def_bool y
+
 config MMU
 	def_bool y
 
@@ -62,6 +65,9 @@ config PCI_QUIRKS
 config ARCH_SUPPORTS_UPROBES
 	def_bool y
 
+config SWIOTLB
+	bool
+
 config S390
 	def_bool y
 	select ARCH_BINFMT_ELF_STATE
@@ -179,6 +185,7 @@ config S390
 	select ARCH_HAS_SCALED_CPUTIME
 	select VIRT_TO_BUS
 	select HAVE_NMI
+	select SWIOTLB
 
 
 config SCHED_OMIT_FRAME_POINTER
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -11,10 +11,11 @@
 #define DMA_ERROR_CODE		(~(dma_addr_t) 0x0)
 
 extern const struct dma_map_ops s390_pci_dma_ops;
+extern const struct dma_map_ops *s390_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-	return &dma_noop_ops;
+	return s390_dma_ops;
 }
 
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
--- /dev/null
+++ b/arch/s390/include/asm/mem_encrypt.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef S390_MEM_ENCRYPT_H__
+#define S390_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#define sme_me_mask	0ULL
+
+static inline bool sme_active(void) { return false; }
+extern bool sev_active(void);
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* S390_MEM_ENCRYPT_H__ */
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -17,6 +17,7 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/swiotlb.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
@@ -29,6 +30,7 @@
 #include <linux/cma.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
+#include <linux/dma-mapping.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -41,6 +43,8 @@
 #include <asm/ctl_reg.h>
 #include <asm/sclp.h>
 #include <asm/set_memory.h>
+#include <asm/dma-mapping.h>
+#include <asm/uv.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
@@ -48,6 +52,9 @@ unsigned long empty_zero_page, zero_page
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
+const struct dma_map_ops *s390_dma_ops = &dma_noop_ops;
+EXPORT_SYMBOL(s390_dma_ops);
+
 static void __init setup_zero_pages(void)
 {
 	unsigned int order;
@@ -119,6 +126,95 @@ void mark_rodata_ro(void)
 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 }
 
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	int i;
+
+	/* make specified pages unshared, (swiotlb, dma_free) */
+	for (i = 0; i < numpages; ++i) {
+		uv_remove_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	int i;
+	/* make specified pages shared (swiotlb, dma_alloca) */
+	for (i = 0; i < numpages; ++i) {
+		uv_set_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+/* are we a protected virtualization guest? */
+bool sev_active(void)
+{
+	return is_prot_virt_guest();
+}
+
+static void *s390_pv_alloc_coherent(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    unsigned long attrs)
+{
+	void *ret;
+
+	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
+		gfp |= GFP_DMA;
+	ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+	/* share */
+	if (ret)
+		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+
+	return ret;
+}
+
+static void s390_pv_free_coherent(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t dma_addr,
+				  unsigned long attrs)
+{
+	if (!vaddr)
+		return;
+
+	/* unshare */
+	set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+
+	swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
+static const struct dma_map_ops s390_pv_dma_ops = {
+	.alloc			= s390_pv_alloc_coherent,
+	.free			= s390_pv_free_coherent,
+	.map_page		= swiotlb_map_page,
+	.unmap_page		= swiotlb_unmap_page,
+	.map_sg			= swiotlb_map_sg_attrs,
+	.unmap_sg		= swiotlb_unmap_sg_attrs,
+	.sync_single_for_cpu	= swiotlb_sync_single_for_cpu,
+	.sync_single_for_device	= swiotlb_sync_single_for_device,
+	.sync_sg_for_cpu	= swiotlb_sync_sg_for_cpu,
+	.sync_sg_for_device	= swiotlb_sync_sg_for_device,
+	.dma_supported		= swiotlb_dma_supported,
+	.mapping_error		= swiotlb_dma_mapping_error,
+	.max_mapping_size	= swiotlb_max_mapping_size,
+};
+
+/* protected virtualization */
+static void pv_init(void)
+{
+	if (!is_prot_virt_guest())
+		return;
+
+	/* make sure bounce buffers are shared */
+	swiotlb_init(1);
+	swiotlb_update_mem_attributes();
+	swiotlb_force = SWIOTLB_FORCE;
+	/* use swiotlb_dma_ops */
+	s390_dma_ops = &s390_pv_dma_ops;
+}
+
 void __init mem_init(void)
 {
 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -127,6 +223,8 @@ void __init mem_init(void)
 	set_max_mapnr(max_low_pfn);
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
+	pv_init();
+
 	/* Setup guest page hinting */
 	cmma_init();