Blob Blame History Raw
From: Jianyong Wu <jianyong.wu@arm.com>
Date: Tue, 1 Feb 2022 19:44:00 +0800
Subject: arm64/mm: avoid fixmap race condition when create pud mapping
Git-commit: ee017ee353506fcec58e481673e4331ff198a80e
Patch-mainline: v5.18-rc1
References: git-fixes

The 'fixmap' is a global resource and is used recursively by
create pud mapping(), leading to a potential race condition in the
presence of a concurrent call to alloc_init_pud():

kernel_init thread                          virtio-mem workqueue thread
==================                          ===========================

  alloc_init_pud(...)                       alloc_init_pud(...)
  pudp = pud_set_fixmap_offset(...)         pudp = pud_set_fixmap_offset(...)
  READ_ONCE(*pudp)
  pud_clear_fixmap(...)
                                            READ_ONCE(*pudp) // CRASH!

As kernel may sleep during creating pud mapping, introduce a mutex lock to
serialise use of the fixmap entries by alloc_init_pud(). However, there is
no need for locking in early boot stage and it doesn't work well with
KASLR enabled when early boot. So, enable lock when system_state doesn't
equal to "SYSTEM_BOOTING".

Signed-off-by: Jianyong Wu <jianyong.wu@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Fixes: f4710445458c ("arm64: mm: use fixmap when creating page tables")
Link: https://lore.kernel.org/r/20220201114400.56885-1-jianyong.wu@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
Acked-by: Ivan T. Ivanov <iivanov@suse.de>
---
 arch/arm64/mm/mmu.c |   10 ++++++++++
 1 file changed, 10 insertions(+)

--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -65,6 +65,8 @@ static pte_t bm_pte[PTRS_PER_PTE] __page
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+static DEFINE_MUTEX(fixmap_lock);
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
@@ -280,6 +282,12 @@ static void alloc_init_pud(pgd_t *pgd, u
 	}
 	BUG_ON(pgd_bad(*pgd));
 
+	/*
+	 * No need for locking during early boot. And it doesn't work as
+	 * expected with KASLR enabled.
+	 */
+	if (system_state != SYSTEM_BOOTING)
+		mutex_lock(&fixmap_lock);
 	pud = pud_set_fixmap_offset(pgd, addr);
 	do {
 		pud_t old_pud = *pud;
@@ -310,6 +318,8 @@ static void alloc_init_pud(pgd_t *pgd, u
 	} while (pud++, addr = next, addr != end);
 
 	pud_clear_fixmap();
+	if (system_state != SYSTEM_BOOTING)
+		mutex_unlock(&fixmap_lock);
 }
 
 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,