cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr

* This patch replaces the dangerous lvalue version of cpumask_of_cpu
    with new cpumask_of_cpu_ptr macros.  These are patterned after the
    node_to_cpumask_ptr macros.

    In general terms, if there is a cpumask_of_cpu_map[] then a pointer to
    the cpumask_of_cpu_map[cpu] entry is used.  The cpumask_of_cpu_map
    is provided when there is a large NR_CPUS count, reducing
    greatly the amount of code generated and stack space used for
    cpumask_of_cpu().  The pointer to the cpumask_t value is needed for
    calling set_cpus_allowed_ptr() to reduce the amount of stack space
    needed to pass the cpumask_t value.

    If there isn't a cpumask_of_cpu_map[], then a temporary variable is
    declared and filled in with value from cpumask_of_cpu(cpu) as well as
    a pointer variable pointing to this temporary variable.  Afterwards,
    the pointer is used to reference the cpumask value.  The compiler
    will optimize out the extra dereference through the pointer as well
    as the stack space used for the pointer, resulting in identical code.

    A good example of the orthogonal usages is in net/sunrpc/svc.c:

	case SVC_POOL_PERCPU:
	{
		unsigned int cpu = m->pool_to[pidx];
		cpumask_of_cpu_ptr(cpumask, cpu);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, cpumask);
		return 1;
	}
	case SVC_POOL_PERNODE:
	{
		unsigned int node = m->pool_to[pidx];
		node_to_cpumask_ptr(nodecpumask, node);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, nodecpumask);
		return 1;
	}

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Mike Travis 2008-07-15 14:14:30 -07:00 committed by Ingo Molnar
parent bb2c018b09
commit 65c0118453
14 changed files with 91 additions and 35 deletions

View file

@ -73,6 +73,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct cpuinfo_x86 *c = &cpu_data(cpu);
cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
int retval;
unsigned int eax, ebx, ecx, edx;
unsigned int edx_part;
@ -91,7 +92,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
/* Make sure we are running on right CPU */
saved_mask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
retval = set_cpus_allowed_ptr(current, new_mask);
if (retval)
return -1;

View file

@ -200,10 +200,12 @@ static void drv_read(struct drv_cmd *cmd)
static void drv_write(struct drv_cmd *cmd)
{
cpumask_t saved_mask = current->cpus_allowed;
cpumask_of_cpu_ptr_declare(cpu_mask);
unsigned int i;
for_each_cpu_mask_nr(i, cmd->mask) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
cpumask_of_cpu_ptr_next(cpu_mask, i);
set_cpus_allowed_ptr(current, cpu_mask);
do_drv_write(cmd);
}
@ -267,11 +269,12 @@ static unsigned int get_measured_perf(unsigned int cpu)
} aperf_cur, mperf_cur;
cpumask_t saved_mask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
unsigned int perf_percent;
unsigned int retval;
saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, cpu_mask);
if (get_cpu() != cpu) {
/* We were not able to run on requested processor */
put_cpu();
@ -337,6 +340,7 @@ static unsigned int get_measured_perf(unsigned int cpu)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
cpumask_of_cpu_ptr(cpu_mask, cpu);
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
unsigned int freq;
unsigned int cached_freq;
@ -349,7 +353,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
}
cached_freq = data->freq_table[data->acpi_data->state].frequency;
freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
freq = extract_freq(get_cur_val(cpu_mask), data);
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.

View file

@ -479,11 +479,12 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
static int check_supported_cpu(unsigned int cpu)
{
cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, cpu);
u32 eax, ebx, ecx, edx;
unsigned int rc = 0;
oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@ -1016,6 +1017,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
{
cpumask_t oldmask;
cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
@ -1030,7 +1032,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
set_cpus_allowed_ptr(current, cpu_mask);
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@ -1105,6 +1107,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
cpumask_t oldmask;
cpumask_of_cpu_ptr_declare(newmask);
int rc;
if (!cpu_online(pol->cpu))
@ -1156,7 +1159,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
cpumask_of_cpu_ptr_next(newmask, pol->cpu);
set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@ -1178,7 +1182,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE)
pol->cpus = cpumask_of_cpu(pol->cpu);
pol->cpus = *newmask;
else
pol->cpus = per_cpu(cpu_core_map, pol->cpu);
data->available_cores = &(pol->cpus);
@ -1244,6 +1248,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
{
struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int khz = 0;
unsigned int first;
@ -1253,7 +1258,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (!data)
return -EINVAL;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, newmask);
if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX
"limiting to CPU %d failed in powernowk8_get\n", cpu);

View file

@ -313,9 +313,10 @@ static unsigned int get_cur_freq(unsigned int cpu)
unsigned l, h;
unsigned clock_freq;
cpumask_t saved_mask;
cpumask_of_cpu_ptr(new_mask, cpu);
saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != cpu)
return 0;
@ -554,9 +555,11 @@ static int centrino_target (struct cpufreq_policy *policy,
*/
if (!cpus_empty(covered_cpus)) {
cpumask_of_cpu_ptr_declare(new_mask);
for_each_cpu_mask_nr(j, covered_cpus) {
set_cpus_allowed_ptr(current,
&cpumask_of_cpu(j));
cpumask_of_cpu_ptr_next(new_mask, j);
set_cpus_allowed_ptr(current, new_mask);
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
}

View file

@ -244,7 +244,8 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
static unsigned int speedstep_get(unsigned int cpu)
{
return _speedstep_get(&cpumask_of_cpu(cpu));
cpumask_of_cpu_ptr(newmask, cpu);
return _speedstep_get(newmask);
}
/**

View file

@ -516,6 +516,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
unsigned long j;
int retval;
cpumask_t oldmask;
cpumask_of_cpu_ptr(newmask, cpu);
if (num_cache_leaves == 0)
return -ENOENT;
@ -526,7 +527,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
return -ENOMEM;
oldmask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
retval = set_cpus_allowed_ptr(current, newmask);
if (retval)
goto out;

View file

@ -388,6 +388,7 @@ static int do_microcode_update (void)
void *new_mc = NULL;
int cpu;
cpumask_t old;
cpumask_of_cpu_ptr_declare(newmask);
old = current->cpus_allowed;
@ -404,7 +405,8 @@ static int do_microcode_update (void)
if (!uci->valid)
continue;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpumask_of_cpu_ptr_next(newmask, cpu);
set_cpus_allowed_ptr(current, newmask);
error = get_maching_microcode(new_mc, cpu);
if (error < 0)
goto out;
@ -574,6 +576,7 @@ static int apply_microcode_check_cpu(int cpu)
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
unsigned int val[2];
int err = 0;
@ -582,7 +585,7 @@ static int apply_microcode_check_cpu(int cpu)
return 0;
old = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, newmask);
/* Check if the microcode we have in memory matches the CPU */
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@ -620,11 +623,12 @@ static int apply_microcode_check_cpu(int cpu)
static void microcode_init_cpu(int cpu, int resume)
{
cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
old = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, newmask);
mutex_lock(&microcode_mutex);
collect_cpu_info(cpu);
if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@ -656,11 +660,12 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz)
return -EINVAL;
if (val == 1) {
cpumask_t old;
cpumask_of_cpu_ptr(newmask, cpu);
old = current->cpus_allowed;
get_online_cpus();
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, newmask);
mutex_lock(&microcode_mutex);
if (uci->valid)

View file

@ -403,24 +403,28 @@ void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
int reboot_cpu_id;
/* The boot cpu is always logical cpu 0 */
reboot_cpu_id = 0;
int reboot_cpu_id = 0;
cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
#ifdef CONFIG_X86_32
/* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
cpu_online(reboot_cpu))
cpu_online(reboot_cpu)) {
reboot_cpu_id = reboot_cpu;
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
#endif
/* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id))
if (!cpu_online(reboot_cpu_id)) {
reboot_cpu_id = smp_processor_id();
cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
}
/* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
set_cpus_allowed_ptr(current, newmask);
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.

View file

@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret;
if (!pr)
@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
* Migrate task to the cpu pointed by pr.
*/
saved_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
cpumask_of_cpu_ptr_next(new_mask, pr->id);
set_cpus_allowed_ptr(current, new_mask);
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask);
@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
cpumask_t saved_mask;
cpumask_of_cpu_ptr_declare(new_mask);
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
cpumask_of_cpu_ptr_next(new_mask, pr->id);
set_cpus_allowed_ptr(current, new_mask);
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
cpumask_of_cpu_ptr_next(new_mask, i);
set_cpus_allowed_ptr(current, new_mask);
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);

View file

@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
static int smi_request(struct smi_cmd *smi_cmd)
{
cpumask_t old_mask;
cpumask_of_cpu_ptr(new_mask, 0);
int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) {
@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
/* SMI requires CPU 0 */
old_mask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__);

View file

@ -62,6 +62,15 @@
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
*ifdef CONFIG_HAS_CPUMASK_OF_CPU
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
* cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*else
* cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
* cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
* cpumask_of_cpu_ptr(v, cpu) Combines above two operations
*endif
* CPU_MASK_ALL Initializer - all bits set
* CPU_MASK_NONE Initializer - no bits set
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask
@ -237,10 +246,15 @@ static inline void __cpus_shift_left(cpumask_t *dstp,
#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
extern cpumask_t *cpumask_of_cpu_map;
#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
#define cpumask_of_cpu_ptr(v, cpu) \
const cpumask_t *v = &cpumask_of_cpu(cpu)
#define cpumask_of_cpu_ptr_declare(v) \
const cpumask_t *v
#define cpumask_of_cpu_ptr_next(v, cpu) \
v = &cpumask_of_cpu(cpu)
#else
#define cpumask_of_cpu(cpu) \
(*({ \
({ \
typeof(_unused_cpumask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
m.bits[0] = 1UL<<(cpu); \
@ -248,8 +262,16 @@ extern cpumask_t *cpumask_of_cpu_map;
cpus_clear(m); \
cpu_set((cpu), m); \
} \
&m; \
}))
m; \
})
#define cpumask_of_cpu_ptr(v, cpu) \
cpumask_t _##v = cpumask_of_cpu(cpu); \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_declare(v) \
cpumask_t _##v; \
const cpumask_t *v = &_##v
#define cpumask_of_cpu_ptr_next(v, cpu) \
_##v = cpumask_of_cpu(cpu)
#endif
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)

View file

@ -33,8 +33,9 @@ static int stopmachine(void *cpu)
{
int irqs_disabled = 0;
int prepared = 0;
cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
set_cpus_allowed_ptr(current, cpumask);
/* Ack: we are alive */
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */

View file

@ -213,7 +213,9 @@ static void start_stack_timers(void)
int cpu;
for_each_online_cpu(cpu) {
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
cpumask_of_cpu_ptr(new_mask, cpu);
set_cpus_allowed_ptr(current, new_mask);
start_stack_timer(cpu);
}
set_cpus_allowed_ptr(current, &saved_mask);

View file

@ -314,9 +314,10 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
case SVC_POOL_PERCPU:
{
unsigned int cpu = m->pool_to[pidx];
cpumask_of_cpu_ptr(cpumask, cpu);
*oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, cpumask);
return 1;
}
case SVC_POOL_PERNODE: