Blob Blame History Raw
From: Christian Borntraeger <borntraeger@de.ibm.com>
Date: Mon, 16 Dec 2019 10:48:11 -0500
Subject: KVM: s390/mm: Make pages accessible before destroying the guest
Git-commit: 1274800792dced8e5b6d54c71ec049c4d1e34189
Patch-mainline: v5.7-rc1
References: jsc#SLE-7512 bsc#1165545

Before we destroy the secure configuration, we better make all
pages accessible again. This also happens during reboot, where we reboot
into a non-secure guest that then can go again into secure mode. As
this "new" secure guest will have a new ID we cannot reuse the old page
state.

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
[ ptesarik: SLE15-SP2 does not have a backport of upstream commit
  7b86ac3371b70c3fd8fd95501719beb1faab719f, so reset_acc_walk_ops
  must be converted back to struct mm_walk. ]
[ vbabka: SLE15-SP3 has said commit, so undo the conversion and
  match upstream commit again ]
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 arch/s390/include/asm/gmap.h |    1 +
 arch/s390/kvm/pv.c           |    3 +++
 arch/s390/mm/gmap.c          |   35 +++++++++++++++++++++++++++++++++++
 3 files changed, 39 insertions(+)

--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -148,4 +148,5 @@ int gmap_mprotect_notify(struct gmap *,
 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
 			     unsigned long gaddr, unsigned long vmaddr);
 int gmap_mark_unmergeable(void);
+void s390_reset_acc(struct mm_struct *mm);
 #endif /* _ASM_S390_GMAP_H */
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -140,6 +140,9 @@ int kvm_s390_pv_deinit_vm(struct kvm *kv
 {
 	int cc;
 
+	/* make all pages accessible before destroying the guest */
+	s390_reset_acc(kvm->mm);
+
 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2651,3 +2651,38 @@ void s390_reset_cmma(struct mm_struct *m
 	up_write(&mm->mmap_sem);
 }
 EXPORT_SYMBOL_GPL(s390_reset_cmma);
+
+/*
+ * make inaccessible pages accessible again
+ */
+static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
+			    unsigned long next, struct mm_walk *walk)
+{
+	pte_t pte = READ_ONCE(*ptep);
+
+	if (pte_present(pte))
+		WARN_ON_ONCE(uv_convert_from_secure(pte_val(pte) & PAGE_MASK));
+	return 0;
+}
+
+static const struct mm_walk_ops reset_acc_walk_ops = {
+	.pte_entry		= __s390_reset_acc,
+};
+
+#include <linux/sched/mm.h>
+void s390_reset_acc(struct mm_struct *mm)
+{
+	/*
+	 * we might be called during
+	 * reset:                             we walk the pages and clear
+	 * close of all kvm file descriptors: we walk the pages and clear
+	 * exit of process on fd closure:     vma already gone, do nothing
+	 */
+	if (!mmget_not_zero(mm))
+		return;
+	down_read(&mm->mmap_sem);
+	walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+}
+EXPORT_SYMBOL_GPL(s390_reset_acc);