mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
mutex: preemption fixes
The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
93d81d1aca
commit
41719b0309
3 changed files with 12 additions and 4 deletions
|
@ -328,6 +328,7 @@ extern signed long schedule_timeout(signed long timeout);
|
|||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void __schedule(void);
|
||||
asmlinkage void schedule(void);
|
||||
|
||||
struct nsproxy;
|
||||
|
|
|
@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
struct mutex_waiter waiter;
|
||||
unsigned long flags;
|
||||
|
||||
preempt_disable();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
|
@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
return -EINTR;
|
||||
}
|
||||
__set_task_state(task, state);
|
||||
|
||||
/* didnt get the lock, go to sleep: */
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
schedule();
|
||||
__schedule();
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -193,6 +195,7 @@ done:
|
|||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4538,15 +4538,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
|
|||
/*
|
||||
* schedule() is the main scheduler function.
|
||||
*/
|
||||
asmlinkage void __sched schedule(void)
|
||||
asmlinkage void __sched __schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
unsigned long *switch_count;
|
||||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
rcu_qsctr_inc(cpu);
|
||||
|
@ -4603,7 +4601,13 @@ need_resched_nonpreemptible:
|
|||
|
||||
if (unlikely(reacquire_kernel_lock(current) < 0))
|
||||
goto need_resched_nonpreemptible;
|
||||
}
|
||||
|
||||
asmlinkage void __sched schedule(void)
|
||||
{
|
||||
need_resched:
|
||||
preempt_disable();
|
||||
__schedule();
|
||||
preempt_enable_no_resched();
|
||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
||||
goto need_resched;
|
||||
|
|
Loading…
Reference in a new issue