mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* 'cpumask-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: cpumask: rename tsk_cpumask to tsk_cpus_allowed cpumask: don't recommend set_cpus_allowed hack in Documentation/cpu-hotplug.txt cpumask: avoid dereferencing struct cpumask cpumask: convert drivers/idle/i7300_idle.c to cpumask_var_t cpumask: use modern cpumask style in drivers/scsi/fcoe/fcoe.c cpumask: avoid deprecated function in mm/slab.c cpumask: use cpu_online in kernel/perf_event.c
This commit is contained in:
commit
55db493b65
8 changed files with 33 additions and 45 deletions
|
@ -315,42 +315,27 @@ A: The following are what is required for CPU hotplug infrastructure to work
|
||||||
|
|
||||||
Q: I need to ensure that a particular cpu is not removed when there is some
|
Q: I need to ensure that a particular cpu is not removed when there is some
|
||||||
work specific to this cpu is in progress.
|
work specific to this cpu is in progress.
|
||||||
A: First switch the current thread context to preferred cpu
|
A: There are two ways. If your code can be run in interrupt context, use
|
||||||
|
smp_call_function_single(), otherwise use work_on_cpu(). Note that
|
||||||
|
work_on_cpu() is slow, and can fail due to out of memory:
|
||||||
|
|
||||||
int my_func_on_cpu(int cpu)
|
int my_func_on_cpu(int cpu)
|
||||||
{
|
{
|
||||||
cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
|
int err;
|
||||||
int curr_cpu, err = 0;
|
get_online_cpus();
|
||||||
|
if (!cpu_online(cpu))
|
||||||
saved_mask = current->cpus_allowed;
|
err = -EINVAL;
|
||||||
cpu_set(cpu, new_mask);
|
else
|
||||||
err = set_cpus_allowed(current, new_mask);
|
#if NEEDS_BLOCKING
|
||||||
|
err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
|
||||||
if (err)
|
#else
|
||||||
return err;
|
smp_call_function_single(cpu, __my_func_on_cpu, &err,
|
||||||
|
true);
|
||||||
/*
|
#endif
|
||||||
* If we got scheduled out just after the return from
|
put_online_cpus();
|
||||||
* set_cpus_allowed() before running the work, this ensures
|
|
||||||
* we stay locked.
|
|
||||||
*/
|
|
||||||
curr_cpu = get_cpu();
|
|
||||||
|
|
||||||
if (curr_cpu != cpu) {
|
|
||||||
err = -EAGAIN;
|
|
||||||
goto ret;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Do work : But cant sleep, since get_cpu() disables preempt
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
ret:
|
|
||||||
put_cpu();
|
|
||||||
set_cpus_allowed(current, saved_mask);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Q: How do we determine how many CPUs are available for hotplug.
|
Q: How do we determine how many CPUs are available for hotplug.
|
||||||
A: There is no clear spec defined way from ACPI that can give us that
|
A: There is no clear spec defined way from ACPI that can give us that
|
||||||
information today. Based on some input from Natalie of Unisys,
|
information today. Based on some input from Natalie of Unisys,
|
||||||
|
|
|
@ -1136,7 +1136,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
|
||||||
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
|
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cpumask_copy(oldmask, tsk_cpumask(current));
|
cpumask_copy(oldmask, tsk_cpus_allowed(current));
|
||||||
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
|
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
|
||||||
|
|
||||||
if (smp_processor_id() != pol->cpu) {
|
if (smp_processor_id() != pol->cpu) {
|
||||||
|
|
|
@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
|
||||||
static u8 i7300_idle_thrtlow_saved;
|
static u8 i7300_idle_thrtlow_saved;
|
||||||
static u32 i7300_idle_mc_saved;
|
static u32 i7300_idle_mc_saved;
|
||||||
|
|
||||||
static cpumask_t idle_cpumask;
|
static cpumask_var_t idle_cpumask;
|
||||||
static ktime_t start_ktime;
|
static ktime_t start_ktime;
|
||||||
static unsigned long avg_idle_us;
|
static unsigned long avg_idle_us;
|
||||||
|
|
||||||
|
@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
spin_lock_irqsave(&i7300_idle_lock, flags);
|
spin_lock_irqsave(&i7300_idle_lock, flags);
|
||||||
if (val == IDLE_START) {
|
if (val == IDLE_START) {
|
||||||
|
|
||||||
cpu_set(smp_processor_id(), idle_cpumask);
|
cpumask_set_cpu(smp_processor_id(), idle_cpumask);
|
||||||
|
|
||||||
if (cpus_weight(idle_cpumask) != num_online_cpus())
|
if (cpumask_weight(idle_cpumask) != num_online_cpus())
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
now_ktime = ktime_get();
|
now_ktime = ktime_get();
|
||||||
|
@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
|
||||||
i7300_idle_ioat_start();
|
i7300_idle_ioat_start();
|
||||||
|
|
||||||
} else if (val == IDLE_END) {
|
} else if (val == IDLE_END) {
|
||||||
cpu_clear(smp_processor_id(), idle_cpumask);
|
cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
|
||||||
if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
|
if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
|
||||||
/* First CPU coming out of idle */
|
/* First CPU coming out of idle */
|
||||||
u64 idle_duration_us;
|
u64 idle_duration_us;
|
||||||
|
|
||||||
|
@ -553,7 +553,6 @@ struct debugfs_file_info {
|
||||||
static int __init i7300_idle_init(void)
|
static int __init i7300_idle_init(void)
|
||||||
{
|
{
|
||||||
spin_lock_init(&i7300_idle_lock);
|
spin_lock_init(&i7300_idle_lock);
|
||||||
cpus_clear(idle_cpumask);
|
|
||||||
total_us = 0;
|
total_us = 0;
|
||||||
|
|
||||||
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
|
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
|
||||||
|
@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
|
||||||
if (i7300_idle_ioat_init())
|
if (i7300_idle_ioat_init())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
|
debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
|
||||||
if (debugfs_dir) {
|
if (debugfs_dir) {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
|
||||||
static void __exit i7300_idle_exit(void)
|
static void __exit i7300_idle_exit(void)
|
||||||
{
|
{
|
||||||
idle_notifier_unregister(&i7300_idle_nb);
|
idle_notifier_unregister(&i7300_idle_nb);
|
||||||
|
free_cpumask_var(idle_cpumask);
|
||||||
|
|
||||||
if (debugfs_dir) {
|
if (debugfs_dir) {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
|
@ -1260,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
||||||
"CPU.\n");
|
"CPU.\n");
|
||||||
|
|
||||||
spin_unlock_bh(&fps->fcoe_rx_list.lock);
|
spin_unlock_bh(&fps->fcoe_rx_list.lock);
|
||||||
cpu = first_cpu(cpu_online_map);
|
cpu = cpumask_first(cpu_online_mask);
|
||||||
fps = &per_cpu(fcoe_percpu, cpu);
|
fps = &per_cpu(fcoe_percpu, cpu);
|
||||||
spin_lock_bh(&fps->fcoe_rx_list.lock);
|
spin_lock_bh(&fps->fcoe_rx_list.lock);
|
||||||
if (!fps->thread) {
|
if (!fps->thread) {
|
||||||
|
|
|
@ -1553,7 +1553,7 @@ struct task_struct {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
/* Future-safe accessor for struct task_struct's cpus_allowed. */
|
||||||
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
|
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
|
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
|
||||||
|
|
|
@ -1614,7 +1614,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
|
||||||
* offline CPU and activate it when the CPU comes up, but
|
* offline CPU and activate it when the CPU comes up, but
|
||||||
* that's for later.
|
* that's for later.
|
||||||
*/
|
*/
|
||||||
if (!cpu_isset(cpu, cpu_online_map))
|
if (!cpu_online(cpu))
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
|
|
|
@ -237,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m)
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
print_tickdevice(m, tick_get_broadcast_device(), -1);
|
print_tickdevice(m, tick_get_broadcast_device(), -1);
|
||||||
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
|
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
|
||||||
tick_get_broadcast_mask()->bits[0]);
|
cpumask_bits(tick_get_broadcast_mask())[0]);
|
||||||
#ifdef CONFIG_TICK_ONESHOT
|
#ifdef CONFIG_TICK_ONESHOT
|
||||||
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
|
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
|
||||||
tick_get_broadcast_oneshot_mask()->bits[0]);
|
cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
|
||||||
#endif
|
#endif
|
||||||
SEQ_printf(m, "\n");
|
SEQ_printf(m, "\n");
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1132,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
||||||
if (nc)
|
if (nc)
|
||||||
free_block(cachep, nc->entry, nc->avail, node);
|
free_block(cachep, nc->entry, nc->avail, node);
|
||||||
|
|
||||||
if (!cpus_empty(*mask)) {
|
if (!cpumask_empty(mask)) {
|
||||||
spin_unlock_irq(&l3->list_lock);
|
spin_unlock_irq(&l3->list_lock);
|
||||||
goto free_array_cache;
|
goto free_array_cache;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue