Blob Blame History Raw
From: Janosch Frank <frankja@linux.ibm.com>
Subject: s390/mm: Clear skeys for newly mapped huge guest pmds
Patch-mainline: v4.19-rc1
Git-commit: 3afdfca69870963ae01e280732a5ee493a2fcbb3
References: FATE#326372, LTC#169184, bsc#1113484

Summary:     kernel: Introduce huge page KVM backing support
Description: This adds the KVM support for libhugetlbfs backings of
             s390 KVM guests.

Upstream-Description:

             s390/mm: Clear skeys for newly mapped huge guest pmds

             Similarly to the pte skey handling, where we set the storage key to
             the default key for each newly mapped pte, we have to also do that for
             huge pmds.

             With the PG_arch_1 flag we keep track if the area has already been
             cleared of its skeys.

             Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
             Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 arch/s390/include/asm/hugetlb.h |    5 ++++-
 arch/s390/mm/gmap.c             |    2 ++
 arch/s390/mm/hugetlbpage.c      |   24 ++++++++++++++++++++++++
 3 files changed, 30 insertions(+), 1 deletion(-)

--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -36,7 +36,10 @@ static inline int prepare_hugepage_range
 	return 0;
 }
 
-#define arch_clear_hugepage_flags(page)		do { } while (0)
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+	clear_bit(PG_arch_1, &page->flags);
+}
 
 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
 				  pte_t *ptep)
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2560,6 +2560,7 @@ static int __s390_enable_skey_hugetlb(pt
 {
 	pmd_t *pmd = (pmd_t *)pte;
 	unsigned long start, end;
+	struct page *page = pmd_page(*pmd);
 
 	/*
 	 * The write check makes sure we do not set a key on shared
@@ -2574,6 +2575,7 @@ static int __s390_enable_skey_hugetlb(pt
 	start = pmd_val(*pmd) & HPAGE_MASK;
 	end = start + HPAGE_SIZE - 1;
 	__storage_key_init_range(start, end);
+	set_bit(PG_arch_1, &page->flags);
 	return 0;
 }
 
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -122,6 +122,29 @@ static inline pte_t __rste_to_pte(unsign
 	return pte;
 }
 
+static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
+{
+	struct page *page;
+	unsigned long size, paddr;
+
+	if (!mm_uses_skeys(mm) ||
+	    rste & _SEGMENT_ENTRY_INVALID)
+		return;
+
+	if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+		page = pud_page(__pud(rste));
+		size = PUD_SIZE;
+		paddr = rste & PUD_MASK;
+	} else {
+		page = pmd_page(__pmd(rste));
+		size = PMD_SIZE;
+		paddr = rste & PMD_MASK;
+	}
+
+	if (!test_and_set_bit(PG_arch_1, &page->flags))
+		__storage_key_init_range(paddr, paddr + size - 1);
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 		     pte_t *ptep, pte_t pte)
 {
@@ -136,6 +159,7 @@ void set_huge_pte_at(struct mm_struct *m
 		rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
 	else
 		rste |= _SEGMENT_ENTRY_LARGE;
+	clear_huge_pte_skeys(mm, rste);
 	pte_val(*ptep) = rste;
 }