From e82d899775f05214d8382420ca44f05d6dd74c63 Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross@suse.com>
Date: May 31 2022 08:03:19 +0000
Subject: KVM: x86/mmu: Complete prefetch for trailing SPTEs for direct,

legacy MMU (git-fixes).

---

diff --git a/patches.suse/KVM-x86-mmu-Complete-prefetch-for-trailing-SPTEs-for.patch b/patches.suse/KVM-x86-mmu-Complete-prefetch-for-trailing-SPTEs-for.patch
new file mode 100644
index 0000000..fdf2a9e
--- /dev/null
+++ b/patches.suse/KVM-x86-mmu-Complete-prefetch-for-trailing-SPTEs-for.patch
@@ -0,0 +1,61 @@
+Patch-mainline: v5.16-rc1
+Git-commit: c6cecc4b9324b97775d7002a13460c247f586e8e
+References: git-fixes
+From: Sean Christopherson <seanjc@google.com>
+Date: Wed, 18 Aug 2021 23:56:15 +0000
+Subject: [PATCH] KVM: x86/mmu: Complete prefetch for trailing SPTEs for
+ direct, legacy MMU
+
+Make a final call to direct_pte_prefetch_many() if there are "trailing"
+SPTEs to prefetch, i.e. SPTEs for GFNs following the faulting GFN.  The
+call to direct_pte_prefetch_many() in the loop only handles the case
+where there are !PRESENT SPTEs preceding a PRESENT SPTE.
+
+E.g. if the faulting GFN is a multiple of 8 (the prefetch size) and all
+SPTEs for the following GFNs are !PRESENT, the loop will terminate with
+"start = sptep+1" and not prefetch any SPTEs.
+
+Prefetching trailing SPTEs as intended can drastically reduce the number
+of guest page faults, e.g. accessing the first byte of every 4kb page in
+a 6gb chunk of virtual memory, in a VM with 8gb of preallocated memory,
+the number of pf_fixed events observed in L0 drops from ~1.75M to <0.27M.
+
+Note, this only affects memory that is backed by 4kb pages as KVM doesn't
+prefetch when installing hugepages.  Shadow paging prefetching is not
+affected as it does not batch the prefetches due to the need to process
+the corresponding guest PTE.  The TDP MMU is not affected because it
+doesn't have prefetching, yet...
+
+Fixes: 957ed9effd80 ("KVM: MMU: prefetch ptes when intercepted guest #PF")
+Cc: Sergey Senozhatsky <senozhatsky@google.com>
+Cc: Ben Gardon <bgardon@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210818235615.2047588-1-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+---
+ arch/x86/kvm/mmu/mmu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 1a64ba5b9437..4238fe3e91c2 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -2842,11 +2842,13 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
+ 			if (!start)
+ 				continue;
+ 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
+-				break;
++				return;
+ 			start = NULL;
+ 		} else if (!start)
+ 			start = spte;
+ 	}
++	if (start)
++		direct_pte_prefetch_many(vcpu, sp, start, spte);
+ }
+ 
+ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
+-- 
+2.35.3
+
diff --git a/series.conf b/series.conf
index 36e62ff..ca0d944 100644
--- a/series.conf
+++ b/series.conf
@@ -5629,6 +5629,7 @@
 	patches.suse/x86-hyperv-Protect-set_hv_tscchange_cb-against-getti.patch
 	patches.suse/Revert-x86-kvm-fix-vcpu-id-indexed-array-sizes.patch
 	patches.suse/KVM-x86-Do-not-mark-all-registers-as-avail-dirty-dur.patch
+	patches.suse/KVM-x86-mmu-Complete-prefetch-for-trailing-SPTEs-for.patch
 	patches.suse/KVM-x86-avoid-warning-with-Wbitwise-instead-of-logic.patch
 	patches.suse/x86-irq-Ensure-PI-wakeup-handler-is-unregistered-bef.patch
 	patches.suse/KVM-VMX-Unregister-posted-interrupt-wakeup-handler-o.patch