Blob Blame History Raw
From 575a63218aaea7d3a5e1cd59c62a8c04fda75f2b Mon Sep 17 00:00:00 2001
From: Lauro Ramos Venancio <lvenanci@redhat.com>
Date: Thu, 20 Apr 2017 16:51:42 -0300
Subject: [PATCH] sched/topology: Move comment about asymmetric node setups

References: bnc#978907 Scheduler performance -- topology
Patch-mainline: v4.13-rc1
Git-commit: c20e1ea4b61c3d99a354d912f2d74822fd2a001d

Signed-off-by: Lauro Ramos Venancio <lvenanci@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: lwang@redhat.com
Cc: riel@redhat.com
Link: http://lkml.kernel.org/r/1492717903-5195-4-git-send-email-lvenanci@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 kernel/sched/topology.c | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 22355d0cc5eb..20ca78d2514c 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -498,14 +498,6 @@ enum s_alloc {
  *
  * Only CPUs that can arrive at this group should be considered to continue
  * balancing.
- *
- * Asymmetric node setups can result in situations where the domain tree is of
- * unequal depth, make sure to skip domains that already cover the entire
- * range.
- *
- * In that case build_sched_domains() will have terminated the iteration early
- * and our sibling sd spans will be empty. Domains should always include the
- * CPU they're built on, so check that.
  */
 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
 {
@@ -606,7 +598,16 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
 
 		sibling = *per_cpu_ptr(sdd->sd, i);
 
-		/* See the comment near build_group_mask(). */
+		/*
+		 * Asymmetric node setups can result in situations where the
+		 * domain tree is of unequal depth, make sure to skip domains
+		 * that already cover the entire range.
+		 *
+		 * In that case build_sched_domains() will have terminated the
+		 * iteration early and our sibling sd spans will be empty.
+		 * Domains should always include the CPU they're built on, so
+		 * check that.
+		 */
 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
 			continue;