Michal Suchanek 404bf1
From 8e8a31d7fd54d68fc9c6c1e69f52ccdaf43b01ea Mon Sep 17 00:00:00 2001
Michal Suchanek 404bf1
From: "Gautham R. Shenoy" <ego@linux.vnet.ibm.com>
Michal Suchanek 404bf1
Date: Thu, 11 Oct 2018 11:03:02 +0530
Michal Suchanek 404bf1
Subject: [PATCH] powerpc: Use cpu_smallcore_sibling_mask at SMT level on
Michal Suchanek 404bf1
 bigcores
Michal Suchanek 404bf1
Michal Suchanek 404bf1
References: bsc#1109695
Michal Suchanek 404bf1
Patch-mainline: v4.20-rc1
Michal Suchanek 404bf1
Git-commit: 8e8a31d7fd54d68fc9c6c1e69f52ccdaf43b01ea
Michal Suchanek 404bf1
Michal Suchanek 404bf1
POWER9 SMT8 cores consist of two groups of threads, where threads in
Michal Suchanek 404bf1
each group shares L1-cache. The scheduler is not aware of this
Michal Suchanek 404bf1
distinction as the current sched-domain hierarchy has all the threads
Michal Suchanek 404bf1
of the core defined at the SMT domain.
Michal Suchanek 404bf1
Michal Suchanek 404bf1
	SMT  [Thread siblings of the SMT8 core]
Michal Suchanek 404bf1
	DIE  [CPUs in the same die]
Michal Suchanek 404bf1
	NUMA [All the CPUs in the system]
Michal Suchanek 404bf1
Michal Suchanek 404bf1
Due to this, we can observe run-to-run variance when we run a
Michal Suchanek 404bf1
multi-threaded benchmark bound to a single core based on how the
Michal Suchanek 404bf1
scheduler spreads the software threads across the two groups in the
Michal Suchanek 404bf1
core.
Michal Suchanek 404bf1
Michal Suchanek 404bf1
We fix this in this patch by defining each group of threads which
Michal Suchanek 404bf1
share L1-cache to be the SMT level. The group of threads in the SMT8
Michal Suchanek 404bf1
core is defined to be the CACHE level. The sched-domain hierarchy
Michal Suchanek 404bf1
after this patch will be :
Michal Suchanek 404bf1
Michal Suchanek 404bf1
	SMT	[Thread siblings in the core that share L1 cache]
Michal Suchanek 404bf1
	CACHE 	[Thread siblings that are in the SMT8 core]
Michal Suchanek 404bf1
	DIE  	[CPUs in the same die]
Michal Suchanek 404bf1
	NUMA 	[All the CPUs in the system]
Michal Suchanek 404bf1
Michal Suchanek 404bf1
Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
Michal Suchanek 404bf1
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Michal Suchanek 404bf1
Acked-by: Michal Suchanek <msuchanek@suse.de>
Michal Suchanek 404bf1
---
Michal Suchanek 404bf1
 arch/powerpc/kernel/smp.c | 19 ++++++++++++++++++-
Michal Suchanek 404bf1
 1 file changed, 18 insertions(+), 1 deletion(-)
Michal Suchanek 404bf1
Michal Suchanek 404bf1
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
Michal Suchanek 404bf1
index 8d245ff059c9..8e3a5da24d59 100644
Michal Suchanek 404bf1
--- a/arch/powerpc/kernel/smp.c
Michal Suchanek 404bf1
+++ b/arch/powerpc/kernel/smp.c
Michal Suchanek 404bf1
@@ -1274,6 +1274,7 @@ static bool shared_caches;
Michal Suchanek 404bf1
 void start_secondary(void *unused)
Michal Suchanek 404bf1
 {
Michal Suchanek 404bf1
 	unsigned int cpu = smp_processor_id();
Michal Suchanek 404bf1
+	struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
Michal Suchanek 404bf1
 
Michal Suchanek 404bf1
 	mmgrab(&init_mm);
Michal Suchanek 404bf1
 	current->active_mm = &init_mm;
Michal Suchanek 404bf1
@@ -1299,11 +1300,13 @@ void start_secondary(void *unused)
Michal Suchanek 404bf1
 	/* Update topology CPU masks */
Michal Suchanek 404bf1
 	add_cpu_to_masks(cpu);
Michal Suchanek 404bf1
 
Michal Suchanek 404bf1
+	if (has_big_cores)
Michal Suchanek 404bf1
+		sibling_mask = cpu_smallcore_mask;
Michal Suchanek 404bf1
 	/*
Michal Suchanek 404bf1
 	 * Check for any shared caches. Note that this must be done on a
Michal Suchanek 404bf1
 	 * per-core basis because one core in the pair might be disabled.
Michal Suchanek 404bf1
 	 */
Michal Suchanek 404bf1
-	if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
Michal Suchanek 404bf1
+	if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
Michal Suchanek 404bf1
 		shared_caches = true;
Michal Suchanek 404bf1
 
Michal Suchanek 404bf1
 	set_numa_node(numa_cpu_lookup_table[cpu]);
Michal Suchanek 404bf1
@@ -1370,6 +1373,13 @@ static const struct cpumask *shared_cache_mask(int cpu)
Michal Suchanek 404bf1
 	return cpu_l2_cache_mask(cpu);
Michal Suchanek 404bf1
 }
Michal Suchanek 404bf1
 
Michal Suchanek 404bf1
+#ifdef CONFIG_SCHED_SMT
Michal Suchanek 404bf1
+static const struct cpumask *smallcore_smt_mask(int cpu)
Michal Suchanek 404bf1
+{
Michal Suchanek 404bf1
+	return cpu_smallcore_mask(cpu);
Michal Suchanek 404bf1
+}
Michal Suchanek 404bf1
+#endif
Michal Suchanek 404bf1
+
Michal Suchanek 404bf1
 static struct sched_domain_topology_level power9_topology[] = {
Michal Suchanek 404bf1
 #ifdef CONFIG_SCHED_SMT
Michal Suchanek 404bf1
 	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
Michal Suchanek 404bf1
@@ -1397,6 +1407,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
Michal Suchanek 404bf1
 	shared_proc_topology_init();
Michal Suchanek 404bf1
 	dump_numa_cpu_topology();
Michal Suchanek 404bf1
 
Michal Suchanek 404bf1
+#ifdef CONFIG_SCHED_SMT
Michal Suchanek 404bf1
+	if (has_big_cores) {
Michal Suchanek 404bf1
+		pr_info("Using small cores at SMT level\n");
Michal Suchanek 404bf1
+		power9_topology[0].mask = smallcore_smt_mask;
Michal Suchanek 404bf1
+		powerpc_topology[0].mask = smallcore_smt_mask;
Michal Suchanek 404bf1
+	}
Michal Suchanek 404bf1
+#endif
Michal Suchanek 404bf1
 	/*
Michal Suchanek 404bf1
 	 * If any CPU detects that it's sharing a cache with another CPU then
Michal Suchanek 404bf1
 	 * use the deeper topology that is aware of this sharing.
Michal Suchanek 404bf1
-- 
Michal Suchanek 404bf1
2.13.7
Michal Suchanek 404bf1