mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Teach might_sleep() about preemptible RCU
In practice, it is harmless to voluntarily sleep in a rcu_read_lock() section if we are running under preempt rcu, but it is illegal if we build a kernel running non-preemptable rcu. Currently, might_sleep() doesn't notice sleepable operations under rcu_read_lock() sections if we are running under preemptable rcu because preempt_count() is left untouched after rcu_read_lock() in this case. But we want developers who test their changes under such config to notice the "sleeping while atomic" issues. So we add rcu_read_lock_nesting to prempt_count() in might_sleep() checks. [ v2: Handle rcu-tiny ] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <1260991265-8451-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
416eb39556
commit
234da7bcdc
3 changed files with 17 additions and 1 deletions
|
@ -101,4 +101,9 @@ static inline void exit_rcu(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int rcu_preempt_depth(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTINY_H */
|
#endif /* __LINUX_RCUTINY_H */
|
||||||
|
|
|
@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
|
||||||
extern void synchronize_rcu(void);
|
extern void synchronize_rcu(void);
|
||||||
extern void exit_rcu(void);
|
extern void exit_rcu(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defined as macro as it is a very low level header
|
||||||
|
* included from areas that don't even know about current
|
||||||
|
*/
|
||||||
|
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
|
||||||
|
|
||||||
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||||
|
|
||||||
static inline void __rcu_read_lock(void)
|
static inline void __rcu_read_lock(void)
|
||||||
|
@ -63,6 +69,11 @@ static inline void exit_rcu(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int rcu_preempt_depth(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||||
|
|
||||||
static inline void __rcu_read_lock_bh(void)
|
static inline void __rcu_read_lock_bh(void)
|
||||||
|
|
|
@ -9682,7 +9682,7 @@ void __init sched_init(void)
|
||||||
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
|
||||||
static inline int preempt_count_equals(int preempt_offset)
|
static inline int preempt_count_equals(int preempt_offset)
|
||||||
{
|
{
|
||||||
int nested = preempt_count() & ~PREEMPT_ACTIVE;
|
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
|
||||||
|
|
||||||
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
|
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue