Blob Blame History Raw
From 80a4ae202f2d319eced8bbf612a4e8b0f11c21f5 Mon Sep 17 00:00:00 2001
From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Date: Fri, 23 Mar 2018 09:29:06 +1100
Subject: [PATCH] powerpc/mm: Workaround Nest MMU bug with TLB invalidations

References: bsc#1087908
Patch-mainline: v4.16
Git-commit: 80a4ae202f2d319eced8bbf612a4e8b0f11c21f5

On POWER9 the Nest MMU may fail to invalidate some translations when
doing a tlbie "by PID" or "by LPID" that is targeted at the TLB only
and not the page walk cache.

This works around it by forcing such invalidations to escalate to
RIC=2 (full invalidation of TLB *and* PWC) when a coprocessor is in
use for the context.

Fixes: 03b8abedf4f4 ("cxl: Enable global TLBIs for cxl contexts")
Cc: stable@vger.kernel.org # v4.15+
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Balbir Singh <bsingharora@gmail.com>
[balbirs: fixed spelling and coding style to quiesce checkpatch.pl]
Tested-by: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
[mauricfo: backport: essentially, cover '_tlbie_pid(pid, RIC_FLUSH_TLB)'
 - hunk 1: removed -- not required since there's no __tlbie_pid()
           in SLES 15, so _tlbie_pid() is called directly using
           the compile-time constraint.
 - hunk 3: update context lines.
 - hunk 4: update context lines (to match a different function;
           in SLES 15, original target radix__flush_tlb_range()
           just calls radix__flush_tlb_mm() which is already
           patched; but there is another instance of _tlbie_pid(
           (pid, RIC_FLUSH_TLB) in radix__flush_tlb_range_psize(),
           so cover that one; equivalent logic (else / goto err_out).
 - hunk 5: removed -- this function does not exist in SLES 15.]
Signed-off-by: Mauricio Faria de Oliveira <mauricfo@linux.vnet.ibm.com>

Acked-by: Michal Suchanek <msuchanek@suse.de>
---
 arch/powerpc/mm/tlb-radix.c | 29 +++++++++++++++++++++++------
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index f136cb4c4255..c26a92cfa4a6 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -261,6 +261,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
 }
 EXPORT_SYMBOL(radix__local_flush_tlb_page);
 
+static bool mm_needs_flush_escalation(struct mm_struct *mm)
+{
+	/*
+	 * P9 nest MMU has issues with the page walk cache
+	 * caching PTEs and not flushing them properly when
+	 * RIC = 0 for a PID/LPID invalidate
+	 */
+	return atomic_read(&mm->context.copros) != 0;
+}
+
 #ifdef CONFIG_SMP
 void radix__flush_tlb_mm(struct mm_struct *mm)
 {
@@ -271,9 +281,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
 	if (unlikely(pid == MMU_NO_CONTEXT))
 		goto no_context;
 
-	if (!mm_is_thread_local(mm))
-		_tlbie_pid(pid, RIC_FLUSH_TLB);
-	else
+	if (!mm_is_thread_local(mm)) {
+		if (mm_needs_flush_escalation(mm))
+			_tlbie_pid(pid, RIC_FLUSH_ALL);
+		else
+			_tlbie_pid(pid, RIC_FLUSH_TLB);
+	} else
 		_tlbiel_pid(pid, RIC_FLUSH_TLB);
 no_context:
 	preempt_enable();
@@ -415,10 +428,14 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
 
 	if (end == TLB_FLUSH_ALL ||
 	    (end - start) > tlb_single_page_flush_ceiling * page_size) {
-		if (local)
+		if (local) {
 			_tlbiel_pid(pid, RIC_FLUSH_TLB);
-		else
-			_tlbie_pid(pid, RIC_FLUSH_TLB);
+		} else {
+			if (mm_needs_flush_escalation(mm))
+				_tlbie_pid(pid, RIC_FLUSH_ALL);
+			else
+				_tlbie_pid(pid, RIC_FLUSH_TLB);
+		}
 		goto err_out;
 	}
 	asm volatile("ptesync": : :"memory");
-- 
2.13.6