[PATCH] cpu hotplug: fix locking in cpufreq drivers

When calling target drivers to set frequency, we take cpucontrol lock.
When we modified the code to accomodate CPU hotplug, there was an attempt
to take a double lock of cpucontrol leading to a deadlock.  Since the
current thread context is already holding the cpucontrol lock, we dont need
to make another attempt to acquire it.

Now we leave a trace in current->flags indicating current thread already is
under cpucontrol lock held, so we dont attempt to do this another time.

Thanks to Andrew Morton for the beating:-)

From: Brice Goglin <Brice.Goglin@ens-lyon.org>

  Build fix

(akpm: this patch is still unpleasant.  Ashok continues to look for a cleaner
solution, doesn't he?  ;))

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Brice Goglin <Brice.Goglin@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Ashok Raj 2005-11-08 21:34:24 -08:00 committed by Linus Torvalds
parent 330d57fb98
commit 90d45d17f3
4 changed files with 49 additions and 14 deletions

View file

@ -38,7 +38,6 @@ static struct cpufreq_driver *cpufreq_driver;
static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
static DEFINE_SPINLOCK(cpufreq_driver_lock); static DEFINE_SPINLOCK(cpufreq_driver_lock);
/* internal prototypes */ /* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static void handle_update(void *data); static void handle_update(void *data);
@ -1115,24 +1114,21 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
int retval = -EINVAL; int retval = -EINVAL;
/* /*
* Converted the lock_cpu_hotplug to preempt_disable() * If we are already in context of hotplug thread, we dont need to
* and preempt_enable(). This is a bit kludgy and relies on how cpu * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent
* hotplug works. All we need is a guarantee that cpu hotplug won't make * hotplug from removing this cpu that we are working on.
* progress on any cpu. Once we do preempt_disable(), this would ensure
* that hotplug threads don't get onto this cpu, thereby delaying
* the cpu remove process.
*
* We removed the lock_cpu_hotplug since we need to call this function
* via cpu hotplug callbacks, which result in locking the cpu hotplug
* thread itself. Agree this is not very clean, cpufreq community
* could improve this if required. - Ashok Raj <ashok.raj@intel.com>
*/ */
preempt_disable(); if (!current_in_cpu_hotplug())
lock_cpu_hotplug();
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation); target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target) if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation); retval = cpufreq_driver->target(policy, target_freq, relation);
preempt_enable();
if (!current_in_cpu_hotplug())
unlock_cpu_hotplug();
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(__cpufreq_driver_target); EXPORT_SYMBOL_GPL(__cpufreq_driver_target);

View file

@ -42,6 +42,7 @@ struct notifier_block;
/* Need to know about CPUs going up/down? */ /* Need to know about CPUs going up/down? */
extern int register_cpu_notifier(struct notifier_block *nb); extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb); extern void unregister_cpu_notifier(struct notifier_block *nb);
extern int current_in_cpu_hotplug(void);
int cpu_up(unsigned int cpu); int cpu_up(unsigned int cpu);
@ -54,6 +55,10 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
static inline void unregister_cpu_notifier(struct notifier_block *nb) static inline void unregister_cpu_notifier(struct notifier_block *nb)
{ {
} }
static inline int current_in_cpu_hotplug(void)
{
return 0;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
extern struct sysdev_class cpu_sysdev_class; extern struct sysdev_class cpu_sysdev_class;

View file

@ -909,6 +909,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
#define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */
/* /*
* Only the _current_ task can read/write to tsk->flags, but other * Only the _current_ task can read/write to tsk->flags, but other

View file

@ -21,6 +21,24 @@ EXPORT_SYMBOL_GPL(cpucontrol);
static struct notifier_block *cpu_chain; static struct notifier_block *cpu_chain;
/*
* Used to check by callers if they need to acquire the cpucontrol
* or not to protect a cpu from being removed. Its sometimes required to
* call these functions both for normal operations, and in response to
* a cpu being added/removed. If the context of the call is in the same
* thread context as a CPU hotplug thread, we dont need to take the lock
* since its already protected
* check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
*/
int current_in_cpu_hotplug(void)
{
return (current->flags & PF_HOTPLUG_CPU);
}
EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
/* Need to know about CPUs going up/down? */ /* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb) int register_cpu_notifier(struct notifier_block *nb)
{ {
@ -94,6 +112,13 @@ int cpu_down(unsigned int cpu)
goto out; goto out;
} }
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug. Callers can check if cpucontrol
* is already acquired by current thread, and if so not cause
* a dead lock by not acquiring the lock
*/
current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu); (void *)(long)cpu);
if (err == NOTIFY_BAD) { if (err == NOTIFY_BAD) {
@ -146,6 +171,7 @@ out_thread:
out_allowed: out_allowed:
set_cpus_allowed(current, old_allowed); set_cpus_allowed(current, old_allowed);
out: out:
current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug(); unlock_cpu_hotplug();
return err; return err;
} }
@ -163,6 +189,12 @@ int __devinit cpu_up(unsigned int cpu)
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug.
*/
current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) { if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n", printk("%s: attempt to bring up CPU %u failed\n",
@ -185,6 +217,7 @@ out_notify:
if (ret != 0) if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out: out:
current->flags &= ~PF_HOTPLUG_CPU;
up(&cpucontrol); up(&cpucontrol);
return ret; return ret;
} }