mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
ftrace: use Makefile to remove tracing from lockdep
This patch removes the "notrace" annotation from lockdep and adds the debugging files in the kernel director to those that should not be compiled with "-pg" mcount tracing. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
9d0a420b73
commit
1d09daa55d
2 changed files with 21 additions and 13 deletions
|
@ -11,6 +11,14 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
|
|||
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
|
||||
notifier.o ksysfs.o pm_qos_params.o sched_clock.o
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
# Do not profile debug utilities
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
KBUILD_CFLAGS = $(if $(filter-out lockdep% %debug,$(basename $(notdir $@))), \
|
||||
$(ORIG_CFLAGS), \
|
||||
$(subst -pg,,$(ORIG_CFLAGS)))
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-y += time/
|
||||
|
|
|
@ -271,14 +271,14 @@ static struct list_head chainhash_table[CHAINHASH_SIZE];
|
|||
((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
|
||||
(key2))
|
||||
|
||||
notrace void lockdep_off(void)
|
||||
void lockdep_off(void)
|
||||
{
|
||||
current->lockdep_recursion++;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(lockdep_off);
|
||||
|
||||
notrace void lockdep_on(void)
|
||||
void lockdep_on(void)
|
||||
{
|
||||
current->lockdep_recursion--;
|
||||
}
|
||||
|
@ -1041,7 +1041,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
|
|||
* Return 1 otherwise and keep <backwards_match> unchanged.
|
||||
* Return 0 on error.
|
||||
*/
|
||||
static noinline notrace int
|
||||
static noinline int
|
||||
find_usage_backwards(struct lock_class *source, unsigned int depth)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
|
@ -1591,7 +1591,7 @@ static inline int validate_chain(struct task_struct *curr,
|
|||
* We are building curr_chain_key incrementally, so double-check
|
||||
* it from scratch, to make sure that it's done correctly:
|
||||
*/
|
||||
static notrace void check_chain_key(struct task_struct *curr)
|
||||
static void check_chain_key(struct task_struct *curr)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
struct held_lock *hlock, *prev_hlock = NULL;
|
||||
|
@ -1967,7 +1967,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
|
|||
/*
|
||||
* Mark all held locks with a usage bit:
|
||||
*/
|
||||
static notrace int
|
||||
static int
|
||||
mark_held_locks(struct task_struct *curr, int hardirq)
|
||||
{
|
||||
enum lock_usage_bit usage_bit;
|
||||
|
@ -2014,7 +2014,7 @@ void early_boot_irqs_on(void)
|
|||
/*
|
||||
* Hardirqs will be enabled:
|
||||
*/
|
||||
void notrace trace_hardirqs_on_caller(unsigned long a0)
|
||||
void trace_hardirqs_on_caller(unsigned long a0)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
unsigned long ip;
|
||||
|
@ -2060,7 +2060,7 @@ void notrace trace_hardirqs_on_caller(unsigned long a0)
|
|||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||||
|
||||
void notrace trace_hardirqs_on(void)
|
||||
void trace_hardirqs_on(void)
|
||||
{
|
||||
trace_hardirqs_on_caller(CALLER_ADDR0);
|
||||
}
|
||||
|
@ -2069,7 +2069,7 @@ EXPORT_SYMBOL(trace_hardirqs_on);
|
|||
/*
|
||||
* Hardirqs were disabled:
|
||||
*/
|
||||
void notrace trace_hardirqs_off_caller(unsigned long a0)
|
||||
void trace_hardirqs_off_caller(unsigned long a0)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
|
@ -2094,7 +2094,7 @@ void notrace trace_hardirqs_off_caller(unsigned long a0)
|
|||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
||||
|
||||
void notrace trace_hardirqs_off(void)
|
||||
void trace_hardirqs_off(void)
|
||||
{
|
||||
trace_hardirqs_off_caller(CALLER_ADDR0);
|
||||
}
|
||||
|
@ -2260,7 +2260,7 @@ static inline int separate_irq_context(struct task_struct *curr,
|
|||
/*
|
||||
* Mark a lock with a usage bit, and validate the state transition:
|
||||
*/
|
||||
static notrace int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit)
|
||||
{
|
||||
unsigned int new_mask = 1 << new_bit, ret = 1;
|
||||
|
@ -2663,7 +2663,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
|||
/*
|
||||
* Check whether we follow the irq-flags state precisely:
|
||||
*/
|
||||
static notrace void check_flags(unsigned long flags)
|
||||
static void check_flags(unsigned long flags)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
|
||||
if (!debug_locks)
|
||||
|
@ -2700,7 +2700,7 @@ static notrace void check_flags(unsigned long flags)
|
|||
* We are not always called with irqs disabled - do that here,
|
||||
* and also avoid lockdep recursion:
|
||||
*/
|
||||
notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
int trylock, int read, int check, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -2723,7 +2723,7 @@ notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
|||
|
||||
EXPORT_SYMBOL_GPL(lock_acquire);
|
||||
|
||||
notrace void lock_release(struct lockdep_map *lock, int nested,
|
||||
void lock_release(struct lockdep_map *lock, int nested,
|
||||
unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
Loading…
Reference in a new issue