From a7915f6a01b0bfec0f634c2a9316bdb9ced68460 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mar 29 2024 10:04:59 +0000 Subject: Refresh series - patches.suse/sched-fair-Add-EAS-checks-before-updating-root_domain-overutilized.patch. - patches.suse/sched-fair-Introduce-is_rd_overutilized-helper-function-to-access-root_domain-overutilized.patch. - patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch. --- diff --git a/patches.suse/sched-fair-Add-EAS-checks-before-updating-overutilized.patch b/patches.suse/sched-fair-Add-EAS-checks-before-updating-overutilized.patch deleted file mode 100644 index fc4623a..0000000 --- a/patches.suse/sched-fair-Add-EAS-checks-before-updating-overutilized.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 9131beb01060aeaf25894c54128f13ce7d5b5570 Mon Sep 17 00:00:00 2001 -From: Shrikanth Hegde -Date: Wed, 6 Mar 2024 15:54:52 +0530 -Subject: [PATCH] sched/fair: Add EAS checks before updating overutilized - -Patch-mainline: Not yet, v6 under review -URL: https://lore.kernel.org/lkml/20240307085725.444486-1-sshegde@linux.ibm.com/ -References: bsc#1221157 - -Overutilized field of root domain is only used for EAS(energy aware scheduler) -to decide whether to do load balance or not. It is not used if EAS -not possible. - -Currently enqueue_task_fair and task_tick_fair accesses, sometime updates -this field. In update_sd_lb_stats it is updated often. This causes cache -contention due to true sharing and burns a lot of cycles. overload and -overutilized are part of the same cacheline. Updating it often invalidates -the cacheline. That causes access to overload to slow down due to -false sharing. Hence add EAS check before accessing/updating this field. -EAS check is optimized at compile time or it is a static branch. -Hence it shouldn't cost much. - -With the patch, both enqueue_task_fair and newidle_balance don't show -up as hot routines in perf profile. - -6.8-rc4: -7.18% swapper [kernel.vmlinux] [k] enqueue_task_fair -6.78% s [kernel.vmlinux] [k] newidle_balance -+patch: -0.14% swapper [kernel.vmlinux] [k] enqueue_task_fair -0.00% swapper [kernel.vmlinux] [k] newidle_balance - -Minor change: trace_sched_overutilized_tp expect that second argument to -be bool. So do a int to bool conversion for that. - -Fixes: 2802bf3cd936 ("sched/fair: Add over-utilization/tipping point indicator") -Reviewed-by: Qais Yousef -Reviewed-by: Srikar Dronamraju -Signed-off-by: Shrikanth Hegde -Signed-off-by: Mel Gorman ---- - kernel/sched/fair.c | 62 +++++++++++++++++++++++++++++++++++++---------------- - 1 file changed, 43 insertions(+), 19 deletions(-) - -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 51b49829854d..be112fbc244b 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -6571,22 +6571,51 @@ static inline void hrtick_update(struct rq *rq) - #ifdef CONFIG_SMP - static inline bool cpu_overutilized(int cpu) - { -- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); -- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); -+ unsigned long rq_util_min, rq_util_max; -+ -+ if (!sched_energy_enabled()) -+ return false; -+ -+ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); -+ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); - - /* Return true only if the utilization doesn't fit CPU's capacity */ - return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu); - } - --static inline void update_overutilized_status(struct rq *rq) -+static inline void set_rd_overutilized_status(struct root_domain *rd, -+ unsigned int status) - { -- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { -- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); -- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); -- } -+ if (!sched_energy_enabled()) -+ return; -+ -+ WRITE_ONCE(rd->overutilized, status); -+ trace_sched_overutilized_tp(rd, !!status); -+} -+ -+static inline void check_update_overutilized_status(struct rq *rq) -+{ -+ /* -+ * overutilized field is used for load balancing decisions only -+ * if energy aware scheduler is being used -+ */ -+ if (!sched_energy_enabled()) -+ return; -+ -+ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) -+ set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); - } - #else --static inline void update_overutilized_status(struct rq *rq) { } -+static inline void check_update_overutilized_status(struct rq *rq) -+{ -+ return 0; -+} -+ -+static inline void set_rd_overutilized_status(struct root_domain *rd, -+ unsigned int status) -+{ -+ return 0; -+} - #endif - - /* Runqueue only has SCHED_IDLE tasks enqueued */ -@@ -6698,7 +6727,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - * and the following generally works well enough in practice. - */ - if (!task_new) -- update_overutilized_status(rq); -+ check_update_overutilized_status(rq); - - enqueue_throttle: - assert_list_leaf_cfs_rq(rq); -@@ -10627,19 +10656,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd - env->fbq_type = fbq_classify_group(&sds->busiest_stat); - - if (!env->sd->parent) { -- struct root_domain *rd = env->dst_rq->rd; -- - /* update overload indicator if we are at root domain */ -- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); -+ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD); - - /* Update over-utilization (tipping point, U >= 0) indicator */ -- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); -- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); -+ set_rd_overutilized_status(env->dst_rq->rd, -+ sg_status & SG_OVERUTILIZED); - } else if (sg_status & SG_OVERUTILIZED) { -- struct root_domain *rd = env->dst_rq->rd; -- -- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); -- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); -+ set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED); - } - - update_idle_cpu_scan(env, sum_util); -@@ -12642,7 +12666,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) - task_tick_numa(rq, curr); - - update_misfit_status(curr, rq); -- update_overutilized_status(task_rq(curr)); -+ check_update_overutilized_status(task_rq(curr)); - - task_tick_core(rq, curr); - } diff --git a/patches.suse/sched-fair-Add-EAS-checks-before-updating-root_domain-overutilized.patch b/patches.suse/sched-fair-Add-EAS-checks-before-updating-root_domain-overutilized.patch new file mode 100644 index 0000000..9870298 --- /dev/null +++ b/patches.suse/sched-fair-Add-EAS-checks-before-updating-root_domain-overutilized.patch @@ -0,0 +1,147 @@ +From 707a4189c097780a9bbdc46d51b1c205fb1ee596 Mon Sep 17 00:00:00 2001 +From: Shrikanth Hegde +Date: Thu, 7 Mar 2024 14:27:23 +0530 +Subject: [PATCH] sched/fair: Add EAS checks before updating + root_domain::overutilized + +References: bsc#1221157 +Git-commit: be3a51e68f2f1b17250ce40d8872c7645b7a2991 +Patch-mainline: Queued in subsystem maintainer repository +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git + +root_domain::overutilized is only used for EAS(energy aware scheduler) +to decide whether to do load balance or not. It is not used if EAS +not possible. + +Currently enqueue_task_fair and task_tick_fair accesses, sometime updates +this field. In update_sd_lb_stats it is updated often. This causes cache +contention due to true sharing and burns a lot of cycles. ::overload and +::overutilized are part of the same cacheline. Updating it often invalidates +the cacheline. That causes access to ::overload to slow down due to +false sharing. Hence add EAS check before accessing/updating this field. +EAS check is optimized at compile time or it is a static branch. +Hence it shouldn't cost much. + +With the patch, both enqueue_task_fair and newidle_balance don't show +up as hot routines in perf profile. + + 6.8-rc4: + 7.18% swapper [kernel.vmlinux] [k] enqueue_task_fair + 6.78% s [kernel.vmlinux] [k] newidle_balance + + +patch: + 0.14% swapper [kernel.vmlinux] [k] enqueue_task_fair + 0.00% swapper [kernel.vmlinux] [k] newidle_balance + +While at it: trace_sched_overutilized_tp expect that second argument to +be bool. So do a int to bool conversion for that. + +Fixes: 2802bf3cd936 ("sched/fair: Add over-utilization/tipping point indicator") +Signed-off-by: Shrikanth Hegde +Signed-off-by: Ingo Molnar +Reviewed-by: Qais Yousef +Reviewed-by: Srikar Dronamraju +Reviewed-by: Vincent Guittot +Link: https://lore.kernel.org/r/20240307085725.444486-2-sshegde@linux.ibm.com +Signed-off-by: Mel Gorman +--- + kernel/sched/fair.c | 53 ++++++++++++++++++++++++++++++++++------------------- + 1 file changed, 34 insertions(+), 19 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 51b49829854d..d0d8ec4cb4e8 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -6571,22 +6571,42 @@ static inline void hrtick_update(struct rq *rq) + #ifdef CONFIG_SMP + static inline bool cpu_overutilized(int cpu) + { +- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); +- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); ++ unsigned long rq_util_min, rq_util_max; ++ ++ if (!sched_energy_enabled()) ++ return false; ++ ++ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); ++ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); + + /* Return true only if the utilization doesn't fit CPU's capacity */ + return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu); + } + +-static inline void update_overutilized_status(struct rq *rq) ++static inline void set_rd_overutilized_status(struct root_domain *rd, ++ unsigned int status) + { +- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { +- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); +- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); +- } ++ if (!sched_energy_enabled()) ++ return; ++ ++ WRITE_ONCE(rd->overutilized, status); ++ trace_sched_overutilized_tp(rd, !!status); ++} ++ ++static inline void check_update_overutilized_status(struct rq *rq) ++{ ++ /* ++ * overutilized field is used for load balancing decisions only ++ * if energy aware scheduler is being used ++ */ ++ if (!sched_energy_enabled()) ++ return; ++ ++ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) ++ set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); + } + #else +-static inline void update_overutilized_status(struct rq *rq) { } ++static inline void check_update_overutilized_status(struct rq *rq) { } + #endif + + /* Runqueue only has SCHED_IDLE tasks enqueued */ +@@ -6698,7 +6718,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) + * and the following generally works well enough in practice. + */ + if (!task_new) +- update_overutilized_status(rq); ++ check_update_overutilized_status(rq); + + enqueue_throttle: + assert_list_leaf_cfs_rq(rq); +@@ -10627,19 +10647,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd + env->fbq_type = fbq_classify_group(&sds->busiest_stat); + + if (!env->sd->parent) { +- struct root_domain *rd = env->dst_rq->rd; +- + /* update overload indicator if we are at root domain */ +- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); ++ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD); + + /* Update over-utilization (tipping point, U >= 0) indicator */ +- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); +- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); ++ set_rd_overutilized_status(env->dst_rq->rd, ++ sg_status & SG_OVERUTILIZED); + } else if (sg_status & SG_OVERUTILIZED) { +- struct root_domain *rd = env->dst_rq->rd; +- +- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); +- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); ++ set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED); + } + + update_idle_cpu_scan(env, sum_util); +@@ -12642,7 +12657,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) + task_tick_numa(rq, curr); + + update_misfit_status(curr, rq); +- update_overutilized_status(task_rq(curr)); ++ check_update_overutilized_status(task_rq(curr)); + + task_tick_core(rq, curr); + } diff --git a/patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch b/patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch index 1be769f..7941d3f 100644 --- a/patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch +++ b/patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch @@ -1,6 +1,6 @@ -From 2dfce508797826c3a328e9541629283b8069e247 Mon Sep 17 00:00:00 2001 +From 655fff36fa50c13b9efce03aa5fa42312c1cf21d Mon Sep 17 00:00:00 2001 From: Shrikanth Hegde -Date: Wed, 6 Mar 2024 15:54:54 +0530 +Date: Wed, 6 Mar 2024 10:24:52 +0000 Subject: [PATCH] sched/fair: Combine EAS check with overutilized access Patch-mainline: Not yet, v6 under review @@ -22,12 +22,13 @@ No change in functionality intended. Suggested-by: Vincent Guittot Signed-off-by: Shrikanth Hegde Signed-off-by: Mel Gorman + --- - kernel/sched/fair.c | 26 ++++++++++---------------- - 1 file changed, 10 insertions(+), 16 deletions(-) + kernel/sched/fair.c | 24 +++++++++--------------- + 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index edd0c089a1a2..60b9fed23951 100644 +index 33ce8880e652..a71ef7e2230b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6584,12 +6584,11 @@ static inline bool cpu_overutilized(int cpu) @@ -58,16 +59,7 @@ index edd0c089a1a2..60b9fed23951 100644 set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); } #else -@@ -6626,7 +6623,7 @@ static inline void set_rd_overutilized_status(struct root_domain *rd, - return 0; - } - --static inline int is_rd_overutilized(struct root_domain *rd) -+static inline int is_rd_not_overutilized(struct root_domain *rd) - { - return 0; - } -@@ -7900,7 +7897,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) +@@ -7886,7 +7883,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) rcu_read_lock(); pd = rcu_dereference(rd->pd); @@ -76,7 +68,7 @@ index edd0c089a1a2..60b9fed23951 100644 goto unlock; /* -@@ -8103,7 +8100,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) +@@ -8089,7 +8086,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) cpumask_test_cpu(cpu, p->cpus_ptr)) return cpu; @@ -85,7 +77,7 @@ index edd0c089a1a2..60b9fed23951 100644 new_cpu = find_energy_efficient_cpu(p, prev_cpu); if (new_cpu >= 0) return new_cpu; -@@ -10914,12 +10911,9 @@ static struct sched_group *find_busiest_group(struct lb_env *env) +@@ -10900,12 +10897,9 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (busiest->group_type == group_misfit_task) goto force_balance; diff --git a/patches.suse/sched-fair-Introduce-is_rd_overutilized-helper-function-to-access-root_domain-overutilized.patch b/patches.suse/sched-fair-Introduce-is_rd_overutilized-helper-function-to-access-root_domain-overutilized.patch new file mode 100644 index 0000000..e865e9d --- /dev/null +++ b/patches.suse/sched-fair-Introduce-is_rd_overutilized-helper-function-to-access-root_domain-overutilized.patch @@ -0,0 +1,76 @@ +From 4ad8f3ab8216a94620ee3f1b2ae065606c17c88f Mon Sep 17 00:00:00 2001 +From: Shrikanth Hegde +Date: Thu, 7 Mar 2024 14:27:24 +0530 +Subject: [PATCH] sched/fair: Introduce is_rd_overutilized() helper function to + access root_domain::overutilized + +References: bsc#1221157 +Git-commit: d0f5d3cefc259f498456338d319098dc84393b24 +Patch-mainline: Queued in subsystem maintainer repository +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git + +The root_domain::overutilized field is READ_ONCE() accessed in +multiple places, which could be simplified with a helper function. + +This might also make it more apparent that it needs to be used +only in case of EAS. + +No change in functionality intended. + +Signed-off-by: Shrikanth Hegde +Signed-off-by: Ingo Molnar +Reviewed-by: Qais Yousef +Reviewed-by: Vincent Guittot +Link: https://lore.kernel.org/r/20240307085725.444486-3-sshegde@linux.ibm.com +Signed-off-by: Mel Gorman +--- + kernel/sched/fair.c | 15 ++++++++++++--- + 1 file changed, 12 insertions(+), 3 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index d0d8ec4cb4e8..33ce8880e652 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -6583,6 +6583,15 @@ static inline bool cpu_overutilized(int cpu) + return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu); + } + ++/* ++ * Ensure that caller can do EAS. overutilized value ++ * make sense only if EAS is enabled ++ */ ++static inline int is_rd_overutilized(struct root_domain *rd) ++{ ++ return READ_ONCE(rd->overutilized); ++} ++ + static inline void set_rd_overutilized_status(struct root_domain *rd, + unsigned int status) + { +@@ -6602,7 +6611,7 @@ static inline void check_update_overutilized_status(struct rq *rq) + if (!sched_energy_enabled()) + return; + +- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) ++ if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) + set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); + } + #else +@@ -7877,7 +7886,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) + + rcu_read_lock(); + pd = rcu_dereference(rd->pd); +- if (!pd || READ_ONCE(rd->overutilized)) ++ if (!pd || is_rd_overutilized(rd)) + goto unlock; + + /* +@@ -10894,7 +10903,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) + if (sched_energy_enabled()) { + struct root_domain *rd = env->dst_rq->rd; + +- if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) ++ if (rcu_dereference(rd->pd) && !is_rd_overutilized(rd)) + goto out_balanced; + } + diff --git a/patches.suse/sched-fair-Use-helper-function-to-access-rd-overutilized.patch b/patches.suse/sched-fair-Use-helper-function-to-access-rd-overutilized.patch deleted file mode 100644 index 0d62ad1..0000000 --- a/patches.suse/sched-fair-Use-helper-function-to-access-rd-overutilized.patch +++ /dev/null @@ -1,81 +0,0 @@ -From 8680079874e3829ade106ea108b25c8edd01aff5 Mon Sep 17 00:00:00 2001 -From: Shrikanth Hegde -Date: Wed, 6 Mar 2024 15:54:53 +0530 -Subject: [PATCH] sched/fair: Use helper function to access rd->overutilized - -Patch-mainline: Not yet, v6 under review -URL: https://lore.kernel.org/lkml/20240307085725.444486-1-sshegde@linux.ibm.com/ -References: bsc#1221157 - -Overutilized field is accessed directly in multiple places. -So it could use a helper function. That way one might be more -informed that it needs to be used only in case of EAS. - -No change in functionality intended. - -Reviewed-by: Qais Yousef -Signed-off-by: Shrikanth Hegde -Signed-off-by: Mel Gorman ---- - kernel/sched/fair.c | 20 +++++++++++++++++--- - 1 file changed, 17 insertions(+), 3 deletions(-) - -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index be112fbc244b..edd0c089a1a2 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -6583,6 +6583,15 @@ static inline bool cpu_overutilized(int cpu) - return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu); - } - -+/* -+ * Ensure that caller can do EAS. overutilized value -+ * make sense only if EAS is enabled -+ */ -+static inline int is_rd_overutilized(struct root_domain *rd) -+{ -+ return READ_ONCE(rd->overutilized); -+} -+ - static inline void set_rd_overutilized_status(struct root_domain *rd, - unsigned int status) - { -@@ -6602,7 +6611,7 @@ static inline void check_update_overutilized_status(struct rq *rq) - if (!sched_energy_enabled()) - return; - -- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) -+ if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) - set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED); - } - #else -@@ -6616,6 +6625,11 @@ static inline void set_rd_overutilized_status(struct root_domain *rd, - { - return 0; - } -+ -+static inline int is_rd_overutilized(struct root_domain *rd) -+{ -+ return 0; -+} - #endif - - /* Runqueue only has SCHED_IDLE tasks enqueued */ -@@ -7886,7 +7900,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) - - rcu_read_lock(); - pd = rcu_dereference(rd->pd); -- if (!pd || READ_ONCE(rd->overutilized)) -+ if (!pd || is_rd_overutilized(rd)) - goto unlock; - - /* -@@ -10903,7 +10917,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) - if (sched_energy_enabled()) { - struct root_domain *rd = env->dst_rq->rd; - -- if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) -+ if (rcu_dereference(rd->pd) && !is_rd_overutilized(rd)) - goto out_balanced; - } - diff --git a/series.conf b/series.conf index 133184f..cd43973 100644 --- a/series.conf +++ b/series.conf @@ -20158,8 +20158,8 @@ +mgorman patches.suse/sched-fair-Revert-update_pick_idlest-Select-group-with-lowest-group_util-when-idle_cpus-are-equal.patch # Cache line contention reduction pending review upstream - patches.suse/sched-fair-Add-EAS-checks-before-updating-overutilized.patch - patches.suse/sched-fair-Use-helper-function-to-access-rd-overutilized.patch + patches.suse/sched-fair-Add-EAS-checks-before-updating-root_domain-overutilized.patch + patches.suse/sched-fair-Introduce-is_rd_overutilized-helper-function-to-access-root_domain-overutilized.patch patches.suse/sched-fair-Combine-EAS-check-with-overutilized-access.patch ########################################################