Subject: revert stop_machine: Provide stop_machine_cpuslocked()
From: Mike Galbraith <mgalbraith@suse.de>
Date: Wed Aug 22 07:33:59 CEST 2018
Patch-mainline: Never, -rt specific
References: SLE Realtime Extension
Part of massive/complex/validated RT all-in-one hotplug update.
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
include/linux/stop_machine.h | 26 +++-----------------------
kernel/stop_machine.c | 11 ++++-------
2 files changed, 7 insertions(+), 30 deletions(-)
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -116,29 +116,15 @@ static inline int try_stop_cpus(const st
* @fn() runs.
*
* This can be thought of as a very heavy write lock, equivalent to
- * grabbing every spinlock in the kernel.
- *
- * Protects against CPU hotplug.
- */
+ * grabbing every spinlock in the kernel. */
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
-/**
- * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
- * @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
- *
- * Same as above. Must be called from with in a cpus_read_lock() protected
- * region. Avoids nested calls to cpus_read_lock().
- */
-int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
-
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
-static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static __always_inline int stop_machine(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
{
unsigned long flags;
int ret;
@@ -149,12 +135,6 @@ static __always_inline int stop_machine_
}
static __always_inline int
-stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
-{
- return stop_machine_cpuslocked(fn, data, cpus);
-}
-
-static __always_inline int
stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -575,8 +575,7 @@ static int __init cpu_stop_init(void)
}
early_initcall(cpu_stop_init);
-int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
- const struct cpumask *cpus)
+static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
struct multi_stop_data msdata = {
.fn = fn,
@@ -585,8 +584,6 @@ int stop_machine_cpuslocked(cpu_stop_fn_
.active_cpus = cpus,
};
- lockdep_assert_cpus_held();
-
if (!stop_machine_initialized) {
/*
* Handle the case where stop_machine() is called
@@ -616,9 +613,9 @@ int stop_machine(cpu_stop_fn_t fn, void
int ret;
/* No CPUs can come up or down during this. */
- cpus_read_lock();
- ret = stop_machine_cpuslocked(fn, data, cpus);
- cpus_read_unlock();
+ get_online_cpus();
+ ret = __stop_machine(fn, data, cpus);
+ put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(stop_machine);