Blob Blame History Raw
From c48ddaf3acb932224faf7c60db7c7ad9aca727c4 Mon Sep 17 00:00:00 2001
From: Zhang Qiao <zhangqiao22@huawei.com>
Date: Sat, 18 Jun 2022 02:11:51 +0800
Subject: [PATCH] sched: Remove unused function group_first_cpu()

References: bnc#1189999 (Scheduler functional and performance backports)
Patch-mainline: v6.0-rc1
Git-commit: c64b551f6a338eb9724a2f9ef3dddf80ccef2894

As of commit afe06efdf07c ("sched: Extend scheduler's asym packing")
group_first_cpu() became an unused function, remove it.

Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lore.kernel.org/r/20220617181151.29980-3-zhangqiao22@huawei.com
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 kernel/sched/sched.h | 9 ---------
 1 file changed, 9 deletions(-)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f275e58e3e76..9b6790f37db6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1827,15 +1827,6 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
 	return to_cpumask(sg->sgc->cpumask);
 }
 
-/**
- * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
- * @group: The group whose first CPU is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
-	return cpumask_first(sched_group_span(group));
-}
-
 extern int group_balance_cpu(struct sched_group *sg);
 
 #ifdef CONFIG_SCHED_DEBUG