mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: remove the !PREEMPT_BKL code
remove the !PREEMPT_BKL code. this removes 160 lines of legacy code. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
58b8a73ab8
commit
6478d8800b
5 changed files with 5 additions and 161 deletions
|
@ -72,11 +72,7 @@
|
|||
#define in_softirq() (softirq_count())
|
||||
#define in_interrupt() (irq_count())
|
||||
|
||||
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
|
||||
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
|
||||
#else
|
||||
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
|
||||
#endif
|
||||
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# define PREEMPT_CHECK_OFFSET 1
|
||||
|
|
|
@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
|
|||
__release_kernel_lock(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Non-SMP kernels will never block on the kernel lock,
|
||||
* so we are better off returning a constant zero from
|
||||
* reacquire_kernel_lock() so that the compiler can see
|
||||
* it at compile-time.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
|
||||
# define return_value_on_smp return
|
||||
#else
|
||||
# define return_value_on_smp
|
||||
#endif
|
||||
|
||||
static inline int reacquire_kernel_lock(struct task_struct *task)
|
||||
{
|
||||
if (unlikely(task->lock_depth >= 0))
|
||||
return_value_on_smp __reacquire_kernel_lock();
|
||||
return __reacquire_kernel_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,10 +52,6 @@ config PREEMPT
|
|||
|
||||
endchoice
|
||||
|
||||
config PREEMPT_BKL
|
||||
def_bool y
|
||||
depends on SMP || PREEMPT
|
||||
|
||||
config RCU_TRACE
|
||||
bool "Enable tracing for RCU - currently stats in debugfs"
|
||||
select DEBUG_FS
|
||||
|
|
|
@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
|
|||
asmlinkage void __sched preempt_schedule(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
struct task_struct *task = current;
|
||||
int saved_lock_depth;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If there is a non-zero preempt_count or interrupts are disabled,
|
||||
* we do not want to preempt the current task. Just return..
|
||||
|
@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
|
|||
* clear ->lock_depth so that schedule() doesnt
|
||||
* auto-release the semaphore:
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
saved_lock_depth = task->lock_depth;
|
||||
task->lock_depth = -1;
|
||||
#endif
|
||||
schedule();
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
task->lock_depth = saved_lock_depth;
|
||||
#endif
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
|
@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
|
|||
asmlinkage void __sched preempt_schedule_irq(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
struct task_struct *task = current;
|
||||
int saved_lock_depth;
|
||||
#endif
|
||||
|
||||
/* Catch callers which need to be fixed */
|
||||
BUG_ON(ti->preempt_count || !irqs_disabled());
|
||||
|
||||
|
@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
|||
* clear ->lock_depth so that schedule() doesnt
|
||||
* auto-release the semaphore:
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
saved_lock_depth = task->lock_depth;
|
||||
task->lock_depth = -1;
|
||||
#endif
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
local_irq_disable();
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
task->lock_depth = saved_lock_depth;
|
||||
#endif
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
|
@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
|
||||
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
|
||||
#else
|
||||
task_thread_info(idle)->preempt_count = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#ifdef CONFIG_PREEMPT_BKL
|
||||
/*
|
||||
* The 'big kernel semaphore'
|
||||
*
|
||||
|
@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void)
|
|||
up(&kernel_sem);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* The 'big kernel lock'
|
||||
*
|
||||
* This spinlock is taken and released recursively by lock_kernel()
|
||||
* and unlock_kernel(). It is transparently dropped and reacquired
|
||||
* over schedule(). It is used to protect legacy code that hasn't
|
||||
* been migrated to a proper locking design yet.
|
||||
*
|
||||
* Don't use in new code.
|
||||
*/
|
||||
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
|
||||
|
||||
|
||||
/*
|
||||
* Acquire/release the underlying lock from the scheduler.
|
||||
*
|
||||
* This is called with preemption disabled, and should
|
||||
* return an error value if it cannot get the lock and
|
||||
* TIF_NEED_RESCHED gets set.
|
||||
*
|
||||
* If it successfully gets the lock, it should increment
|
||||
* the preemption count like any spinlock does.
|
||||
*
|
||||
* (This works on UP too - _raw_spin_trylock will never
|
||||
* return false in that case)
|
||||
*/
|
||||
int __lockfunc __reacquire_kernel_lock(void)
|
||||
{
|
||||
while (!_raw_spin_trylock(&kernel_flag)) {
|
||||
if (test_thread_flag(TIF_NEED_RESCHED))
|
||||
return -EAGAIN;
|
||||
cpu_relax();
|
||||
}
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __lockfunc __release_kernel_lock(void)
|
||||
{
|
||||
_raw_spin_unlock(&kernel_flag);
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* These are the BKL spinlocks - we try to be polite about preemption.
|
||||
* If SMP is not on (ie UP preemption), this all goes away because the
|
||||
* _raw_spin_trylock() will always succeed.
|
||||
*/
|
||||
#ifdef CONFIG_PREEMPT
|
||||
static inline void __lock_kernel(void)
|
||||
{
|
||||
preempt_disable();
|
||||
if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
|
||||
/*
|
||||
* If preemption was disabled even before this
|
||||
* was called, there's nothing we can be polite
|
||||
* about - just spin.
|
||||
*/
|
||||
if (preempt_count() > 1) {
|
||||
_raw_spin_lock(&kernel_flag);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, let's wait for the kernel lock
|
||||
* with preemption enabled..
|
||||
*/
|
||||
do {
|
||||
preempt_enable();
|
||||
while (spin_is_locked(&kernel_flag))
|
||||
cpu_relax();
|
||||
preempt_disable();
|
||||
} while (!_raw_spin_trylock(&kernel_flag));
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Non-preemption case - just get the spinlock
|
||||
*/
|
||||
static inline void __lock_kernel(void)
|
||||
{
|
||||
_raw_spin_lock(&kernel_flag);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __unlock_kernel(void)
|
||||
{
|
||||
/*
|
||||
* the BKL is not covered by lockdep, so we open-code the
|
||||
* unlocking sequence (and thus avoid the dep-chain ops):
|
||||
*/
|
||||
_raw_spin_unlock(&kernel_flag);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Getting the big kernel lock.
|
||||
*
|
||||
* This cannot happen asynchronously, so we only need to
|
||||
* worry about other CPU's.
|
||||
*/
|
||||
void __lockfunc lock_kernel(void)
|
||||
{
|
||||
int depth = current->lock_depth+1;
|
||||
if (likely(!depth))
|
||||
__lock_kernel();
|
||||
current->lock_depth = depth;
|
||||
}
|
||||
|
||||
void __lockfunc unlock_kernel(void)
|
||||
{
|
||||
BUG_ON(current->lock_depth < 0);
|
||||
if (likely(--current->lock_depth < 0))
|
||||
__unlock_kernel();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(lock_kernel);
|
||||
EXPORT_SYMBOL(unlock_kernel);
|
||||
|
||||
|
|
Loading…
Reference in a new issue