Blob Blame History Raw
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 13 Nov 2017 14:28:33 +0100
Subject: perf/core: Remove perf_event::group_entry
Git-commit: 8343aae66167df6708128a778e750d48dbe31302
Patch-mainline: v4.17-rc1
References: FATE#326324

Now that all the grouping is done with RB trees, we no longer need
group_entry and can replace the whole thing with sibling_list.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Dmitri Prokhorov <Dmitry.Prohorov@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valery Cherepennikov <valery.cherepennikov@intel.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Tony Jones <tonyj@suse.de>
---
 arch/alpha/kernel/perf_event.c           |  2 +-
 arch/arm/mach-imx/mmdc.c                 |  2 +-
 arch/arm/mm/cache-l2x0-pmu.c             |  2 +-
 arch/mips/kernel/perf_event_mipsxx.c     |  2 +-
 arch/powerpc/perf/core-book3s.c          |  2 +-
 arch/powerpc/perf/core-fsl-emb.c         |  2 +-
 arch/sparc/kernel/perf_event.c           |  2 +-
 arch/x86/events/core.c                   |  2 +-
 arch/x86/events/intel/uncore.c           |  2 +-
 drivers/bus/arm-cci.c                    |  2 +-
 drivers/bus/arm-ccn.c                    |  2 +-
 drivers/perf/arm_dsu_pmu.c               |  2 +-
 drivers/perf/arm_pmu.c                   |  2 +-
 drivers/perf/hisilicon/hisi_uncore_pmu.c |  3 +--
 drivers/perf/qcom_l2_pmu.c               |  4 ++--
 drivers/perf/qcom_l3_pmu.c               |  2 +-
 drivers/perf/xgene_pmu.c                 |  2 +-
 include/linux/perf_event.h               |  5 -----
 kernel/events/core.c                     | 37 ++++++++++++++++----------------
 19 files changed, 36 insertions(+), 43 deletions(-)

diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index a1f6bc7f1e4c..435864c24479 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count,
 		evtype[n] = group->hw.event_base;
 		current_idx[n++] = PMC_NO_INDEX;
 	}
-	list_for_each_entry(pe, &group->sibling_list, group_entry) {
+	list_for_each_entry(pe, &group->sibling_list, sibling_list) {
 		if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
 				return -1;
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 5fb1d2254b5e..27a9ca20933e 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event)
 			return false;
 	}
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
 			return false;
 	}
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
index 0a1e2280141f..3a89ea4c2b57 100644
--- a/arch/arm/mm/cache-l2x0-pmu.c
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event)
 	else if (!is_software_event(leader))
 		return false;
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (sibling->pmu == pmu)
 			num_hw++;
 		else if (!is_software_event(sibling))
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 6668f67a61c3..46097ff3208b 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event)
 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
 		return -EINVAL;
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
 			return -EINVAL;
 	}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index f89bbd54ecec..7c1f66050433 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count,
 		flags[n] = group->hw.event_base;
 		events[n++] = group->hw.config;
 	}
-	list_for_each_entry(event, &group->sibling_list, group_entry) {
+	list_for_each_entry(event, &group->sibling_list, sibling_list) {
 		if (event->pmu->task_ctx_nr == perf_hw_context &&
 		    event->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 5d747b4cb8ee..94c2e63662c6 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count,
 		ctrs[n] = group;
 		n++;
 	}
-	list_for_each_entry(event, &group->sibling_list, group_entry) {
+	list_for_each_entry(event, &group->sibling_list, sibling_list) {
 		if (!is_software_event(event) &&
 		    event->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 5c1f54758312..a0a86d369119 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count,
 		events[n] = group->hw.event_base;
 		current_idx[n++] = PIC_NO_INDEX;
 	}
-	list_for_each_entry(event, &group->sibling_list, group_entry) {
+	list_for_each_entry(event, &group->sibling_list, sibling_list) {
 		if (!is_software_event(event) &&
 		    event->state != PERF_EVENT_STATE_OFF) {
 			if (n >= max_count)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 9c86e10f1196..77a4125b6b1f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 	if (!dogrp)
 		return n;
 
-	list_for_each_entry(event, &leader->sibling_list, group_entry) {
+	list_for_each_entry(event, &leader->sibling_list, sibling_list) {
 		if (!is_x86_event(event) ||
 		    event->state <= PERF_EVENT_STATE_OFF)
 			continue;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 7874c980d569..9e374cd22ad2 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
 	if (!dogrp)
 		return n;
 
-	list_for_each_entry(event, &leader->sibling_list, group_entry) {
+	list_for_each_entry(event, &leader->sibling_list, sibling_list) {
 		if (!is_box_event(box, event) ||
 		    event->state <= PERF_EVENT_STATE_OFF)
 			continue;
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 5426c04fe24b..c98435bdb64f 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event)
 	if (!validate_event(event->pmu, &fake_pmu, leader))
 		return -EINVAL;
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (!validate_event(event->pmu, &fake_pmu, sibling))
 			return -EINVAL;
 	}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index b52332e52ca5..1c310a4be000 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -847,7 +847,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 		return -EINVAL;
 
 	list_for_each_entry(sibling, &event->group_leader->sibling_list,
-			group_entry)
+			sibling_list)
 		if (sibling->pmu != event->pmu &&
 				!is_software_event(sibling))
 			return -EINVAL;
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 38f2cc2a6c74..660680d78147 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event)
 	memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
 	if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
 		return false;
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
 			return false;
 	}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 0c2ed11c0603..628d7a7b9526 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -311,7 +311,7 @@ validate_group(struct perf_event *event)
 	if (!validate_event(event->pmu, &fake_pmu, leader))
 		return -EINVAL;
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (!validate_event(event->pmu, &fake_pmu, sibling))
 			return -EINVAL;
 	}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 7ed24b954422..e3356087fd76 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -82,8 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event)
 			counters++;
 	}
 
-	list_for_each_entry(sibling, &event->group_leader->sibling_list,
-			    group_entry) {
+	list_for_each_entry(sibling, &event->group_leader->sibling_list, sibling_list) {
 		if (is_software_event(sibling))
 			continue;
 		if (sibling->pmu != event->pmu)
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 4fdc8486a8e4..5e535a718965 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -535,7 +535,7 @@ static int l2_cache_event_init(struct perf_event *event)
 	}
 
 	list_for_each_entry(sibling, &event->group_leader->sibling_list,
-			    group_entry)
+			    sibling_list)
 		if (sibling->pmu != event->pmu &&
 		    !is_software_event(sibling)) {
 			dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -572,7 +572,7 @@ static int l2_cache_event_init(struct perf_event *event)
 	}
 
 	list_for_each_entry(sibling, &event->group_leader->sibling_list,
-			    group_entry) {
+			    sibling_list) {
 		if ((sibling != event) &&
 		    !is_software_event(sibling) &&
 		    (L2_EVT_GROUP(sibling->attr.config) ==
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 7f6b62b29e9d..5dedf4b1a552 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
 	counters = event_num_counters(event);
 	counters += event_num_counters(leader);
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list) {
 		if (is_software_event(sibling))
 			continue;
 		if (sibling->pmu != event->pmu)
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index eb23311bc70c..f1f4a56cab5e 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -950,7 +950,7 @@ static int xgene_perf_event_init(struct perf_event *event)
 		return -EINVAL;
 
 	list_for_each_entry(sibling, &event->group_leader->sibling_list,
-			group_entry)
+			sibling_list)
 		if (sibling->pmu != event->pmu &&
 				!is_software_event(sibling))
 			return -EINVAL;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6e3f854a34d8..84044ec21b31 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -549,14 +549,9 @@ struct perf_event {
 	struct list_head		event_entry;
 
 	/*
-	 * XXX: group_entry and sibling_list should be mutually exclusive;
-	 * either you're a sibling on a group, or you're the group leader.
-	 * Rework the code to always use the same list element.
-	 *
 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
 	 * either sufficies for read.
 	 */
-	struct list_head		group_entry;
 	struct list_head		sibling_list;
 	/*
 	 * Node on the pinned or flexible tree located at the event context;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2d8c0208ca4a..9a07bbe66451 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader)
 {
 	struct perf_event *sibling;
 
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry)
+	list_for_each_entry(sibling, &leader->sibling_list, sibling_list)
 		perf_event_update_time(sibling);
 }
 
@@ -1835,12 +1835,12 @@ static void perf_group_attach(struct perf_event *event)
 
 	group_leader->group_caps &= event->event_caps;
 
-	list_add_tail(&event->group_entry, &group_leader->sibling_list);
+	list_add_tail(&event->sibling_list, &group_leader->sibling_list);
 	group_leader->nr_siblings++;
 
 	perf_event__header_size(group_leader);
 
-	list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
+	list_for_each_entry(pos, &group_leader->sibling_list, sibling_list)
 		perf_event__header_size(pos);
 }
 
@@ -1904,7 +1904,7 @@ static void perf_group_detach(struct perf_event *event)
 	 * If this is a sibling, remove it from its group.
 	 */
 	if (event->group_leader != event) {
-		list_del_init(&event->group_entry);
+		list_del_init(&event->sibling_list);
 		event->group_leader->nr_siblings--;
 		goto out;
 	}
@@ -1914,7 +1914,7 @@ static void perf_group_detach(struct perf_event *event)
 	 * upgrade the siblings to singleton events by adding them
 	 * to whatever list we are on.
 	 */
-	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
+	list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
 
 		sibling->group_leader = sibling;
 
@@ -1922,7 +1922,7 @@ static void perf_group_detach(struct perf_event *event)
 		sibling->group_caps = event->group_caps;
 
 		if (!RB_EMPTY_NODE(&event->group_node)) {
-			list_del_init(&sibling->group_entry);
+			list_del_init(&sibling->sibling_list);
 			add_event_to_groups(sibling, event->ctx);
 		}
 
@@ -1932,7 +1932,7 @@ static void perf_group_detach(struct perf_event *event)
 out:
 	perf_event__header_size(event->group_leader);
 
-	list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
+	list_for_each_entry(tmp, &event->group_leader->sibling_list, sibling_list)
 		perf_event__header_size(tmp);
 }
 
@@ -1960,7 +1960,7 @@ static inline int pmu_filter_match(struct perf_event *event)
 	if (!__pmu_filter_match(event))
 		return 0;
 
-	list_for_each_entry(child, &event->sibling_list, group_entry) {
+	list_for_each_entry(child, &event->sibling_list, sibling_list) {
 		if (!__pmu_filter_match(child))
 			return 0;
 	}
@@ -2028,7 +2028,7 @@ group_sched_out(struct perf_event *group_event,
 	/*
 	 * Schedule out siblings (if any):
 	 */
-	list_for_each_entry(event, &group_event->sibling_list, group_entry)
+	list_for_each_entry(event, &group_event->sibling_list, sibling_list)
 		event_sched_out(event, cpuctx, ctx);
 
 	perf_pmu_enable(ctx->pmu);
@@ -2307,7 +2307,7 @@ group_sched_in(struct perf_event *group_event,
 	/*
 	 * Schedule in siblings as one group (if any):
 	 */
-	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+	list_for_each_entry(event, &group_event->sibling_list, sibling_list) {
 		if (event_sched_in(event, cpuctx, ctx)) {
 			partial_group = event;
 			goto group_error;
@@ -2323,7 +2323,7 @@ group_sched_in(struct perf_event *group_event,
 	 * partial group before returning:
 	 * The events up to the failed event are scheduled out normally.
 	 */
-	list_for_each_entry(event, &group_event->sibling_list, group_entry) {
+	list_for_each_entry(event, &group_event->sibling_list, sibling_list) {
 		if (event == partial_group)
 			break;
 
@@ -3796,7 +3796,7 @@ static void __perf_event_read(void *info)
 
 	pmu->read(event);
 
-	list_for_each_entry(sub, &event->sibling_list, group_entry) {
+	list_for_each_entry(sub, &event->sibling_list, sibling_list) {
 		if (sub->state == PERF_EVENT_STATE_ACTIVE) {
 			/*
 			 * Use sibling's PMU rather than @event's since
@@ -4642,7 +4642,7 @@ static int __perf_read_group_add(struct perf_event *leader,
 	if (read_format & PERF_FORMAT_ID)
 		values[n++] = primary_event_id(leader);
 
-	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sub, &leader->sibling_list, sibling_list) {
 		values[n++] += perf_event_count(sub);
 		if (read_format & PERF_FORMAT_ID)
 			values[n++] = primary_event_id(sub);
@@ -4836,7 +4836,7 @@ static void perf_event_for_each(struct perf_event *event,
 	event = event->group_leader;
 
 	perf_event_for_each_child(event, func);
-	list_for_each_entry(sibling, &event->sibling_list, group_entry)
+	list_for_each_entry(sibling, &event->sibling_list, sibling_list)
 		perf_event_for_each_child(sibling, func);
 }
 
@@ -5995,7 +5995,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
 
 	__output_copy(handle, values, n * sizeof(u64));
 
-	list_for_each_entry(sub, &leader->sibling_list, group_entry) {
+	list_for_each_entry(sub, &leader->sibling_list, sibling_list) {
 		n = 0;
 
 		if ((sub != event) &&
@@ -9813,7 +9813,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 	mutex_init(&event->child_mutex);
 	INIT_LIST_HEAD(&event->child_list);
 
-	INIT_LIST_HEAD(&event->group_entry);
 	INIT_LIST_HEAD(&event->event_entry);
 	INIT_LIST_HEAD(&event->sibling_list);
 	init_event_group(event);
@@ -10581,7 +10580,7 @@ SYSCALL_DEFINE5(perf_event_open,
 		put_ctx(gctx);
 
 		list_for_each_entry(sibling, &group_leader->sibling_list,
-				    group_entry) {
+				    sibling_list) {
 			perf_remove_from_context(sibling, 0);
 			put_ctx(gctx);
 		}
@@ -10603,7 +10602,7 @@ SYSCALL_DEFINE5(perf_event_open,
 		 * reachable through the group lists.
 		 */
 		list_for_each_entry(sibling, &group_leader->sibling_list,
-				    group_entry) {
+				    sibling_list) {
 			perf_event__state_init(sibling);
 			perf_install_in_context(ctx, sibling, sibling->cpu);
 			get_ctx(ctx);
@@ -11242,7 +11241,7 @@ static int inherit_group(struct perf_event *parent_event,
 	 * case inherit_event() will create individual events, similar to what
 	 * perf_group_detach() would do anyway.
 	 */
-	list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
+	list_for_each_entry(sub, &parent_event->sibling_list, sibling_list) {
 		child_ctr = inherit_event(sub, parent, parent_ctx,
 					    child, leader, child_ctx);
 		if (IS_ERR(child_ctr))