mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
sched: Pull up the might_sleep() check into cond_resched()
might_sleep() is called late-ish in cond_resched(), after the need_resched()/preempt enabled/system running tests are checked. It's better to check the sleeps while atomic earlier and not depend on some environment datas that reduce the chances to detect a problem. Also define cond_resched_*() helpers as macros, so that the FILE/LINE reported in the sleeping while atomic warning displays the real origin and not sched.h Changes in v2: - Call __might_sleep() directly instead of might_sleep() which may call cond_resched() - Turn cond_resched() into a macro so that the file:line couple reported refers to the caller of cond_resched() and not __cond_resched() itself. Changes in v3: - Also propagate this __might_sleep() pull up to cond_resched_lock() and cond_resched_softirq() Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1247725694-6082-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6f80bd985f
commit
613afbf832
3 changed files with 25 additions and 17 deletions
|
@ -32,6 +32,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/fs_struct.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include "internal.h"
|
||||
|
||||
int sysctl_vfs_cache_pressure __read_mostly = 100;
|
||||
|
|
|
@ -2286,17 +2286,26 @@ static inline int need_resched(void)
|
|||
*/
|
||||
extern int _cond_resched(void);
|
||||
|
||||
static inline int cond_resched(void)
|
||||
{
|
||||
return _cond_resched();
|
||||
}
|
||||
#define cond_resched() ({ \
|
||||
__might_sleep(__FILE__, __LINE__, 0); \
|
||||
_cond_resched(); \
|
||||
})
|
||||
|
||||
extern int cond_resched_lock(spinlock_t * lock);
|
||||
extern int cond_resched_softirq(void);
|
||||
static inline int cond_resched_bkl(void)
|
||||
{
|
||||
return _cond_resched();
|
||||
}
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
#define cond_resched_lock(lock) ({ \
|
||||
__might_sleep(__FILE__, __LINE__, PREEMPT_OFFSET); \
|
||||
__cond_resched_lock(lock); \
|
||||
})
|
||||
|
||||
extern int __cond_resched_softirq(void);
|
||||
|
||||
#define cond_resched_softirq() ({ \
|
||||
__might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
|
||||
__cond_resched_softirq(); \
|
||||
})
|
||||
|
||||
#define cond_resched_bkl() cond_resched()
|
||||
|
||||
/*
|
||||
* Does a critical section need to be broken due to another
|
||||
|
|
|
@ -6610,8 +6610,6 @@ static inline int should_resched(void)
|
|||
|
||||
static void __cond_resched(void)
|
||||
{
|
||||
__might_sleep(__FILE__, __LINE__, 0);
|
||||
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
|
@ -6628,14 +6626,14 @@ int __sched _cond_resched(void)
|
|||
EXPORT_SYMBOL(_cond_resched);
|
||||
|
||||
/*
|
||||
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
||||
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
||||
* call schedule, and on return reacquire the lock.
|
||||
*
|
||||
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
|
||||
* operations here to prevent schedule() from being called twice (once via
|
||||
* spin_unlock(), once by hand).
|
||||
*/
|
||||
int cond_resched_lock(spinlock_t *lock)
|
||||
int __cond_resched_lock(spinlock_t *lock)
|
||||
{
|
||||
int resched = should_resched();
|
||||
int ret = 0;
|
||||
|
@ -6651,9 +6649,9 @@ int cond_resched_lock(spinlock_t *lock)
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(cond_resched_lock);
|
||||
EXPORT_SYMBOL(__cond_resched_lock);
|
||||
|
||||
int __sched cond_resched_softirq(void)
|
||||
int __sched __cond_resched_softirq(void)
|
||||
{
|
||||
BUG_ON(!in_softirq());
|
||||
|
||||
|
@ -6665,7 +6663,7 @@ int __sched cond_resched_softirq(void)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(cond_resched_softirq);
|
||||
EXPORT_SYMBOL(__cond_resched_softirq);
|
||||
|
||||
/**
|
||||
* yield - yield the current processor to other threads.
|
||||
|
|
Loading…
Reference in a new issue