2005-04-16 22:20:36 +00:00
|
|
|
#ifndef _LINUX_STOP_MACHINE
|
|
|
|
#define _LINUX_STOP_MACHINE
|
|
|
|
/* "Bogolock": stop the entire machine, disable interrupts. This is a
|
|
|
|
very heavy lock, which is equivalent to grabbing every spinlock
|
|
|
|
(and more). So the "read" side to such a lock is anything which
|
2008-08-26 05:19:27 +00:00
|
|
|
disables preeempt. */
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/cpu.h>
|
2008-07-28 17:16:30 +00:00
|
|
|
#include <linux/cpumask.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/system.h>
|
|
|
|
|
|
|
|
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
|
2008-02-28 16:33:03 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
2008-07-28 17:16:30 +00:00
|
|
|
* stop_machine: freeze the machine on all CPUs and run this function
|
2005-04-16 22:20:36 +00:00
|
|
|
* @fn: the function to run
|
|
|
|
* @data: the data ptr for the @fn()
|
2008-07-28 17:16:30 +00:00
|
|
|
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2008-07-28 17:16:28 +00:00
|
|
|
* Description: This causes a thread to be scheduled on every cpu,
|
|
|
|
* each of which disables interrupts. The result is that noone is
|
|
|
|
* holding a spinlock or inside any other preempt-disabled region when
|
|
|
|
* @fn() runs.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This can be thought of as a very heavy write lock, equivalent to
|
|
|
|
* grabbing every spinlock in the kernel. */
|
2008-12-31 23:42:28 +00:00
|
|
|
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
2008-07-28 17:16:30 +00:00
|
|
|
* __stop_machine: freeze the machine on all CPUs and run this function
|
2005-04-16 22:20:36 +00:00
|
|
|
* @fn: the function to run
|
|
|
|
* @data: the data ptr for the @fn
|
2008-07-28 17:16:30 +00:00
|
|
|
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2008-07-28 17:16:28 +00:00
|
|
|
* Description: This is a special version of the above, which assumes cpus
|
|
|
|
* won't come or go while it's being called. Used by hotplug cpu.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-12-31 23:42:28 +00:00
|
|
|
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
2008-12-22 11:36:30 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* stop_machine_create: create all stop_machine threads
|
|
|
|
*
|
|
|
|
* Description: This causes all stop_machine threads to be created before
|
|
|
|
* stop_machine actually gets called. This can be used by subsystems that
|
|
|
|
* need a non failing stop_machine infrastructure.
|
|
|
|
*/
|
|
|
|
int stop_machine_create(void);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* stop_machine_destroy: destroy all stop_machine threads
|
|
|
|
*
|
|
|
|
* Description: This causes all stop_machine threads which were created with
|
|
|
|
* stop_machine_create to be destroyed again.
|
|
|
|
*/
|
|
|
|
void stop_machine_destroy(void);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
|
|
|
|
2008-07-28 17:16:30 +00:00
|
|
|
static inline int stop_machine(int (*fn)(void *), void *data,
|
2008-12-31 23:42:28 +00:00
|
|
|
const struct cpumask *cpus)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
local_irq_disable();
|
|
|
|
ret = fn(data);
|
|
|
|
local_irq_enable();
|
|
|
|
return ret;
|
|
|
|
}
|
2008-12-22 11:36:30 +00:00
|
|
|
|
|
|
|
static inline int stop_machine_create(void) { return 0; }
|
|
|
|
static inline void stop_machine_destroy(void) { }
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif /* _LINUX_STOP_MACHINE */
|