From b332828c39326b1dca617f387dd15d12e81cd5f0 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:10 +0530 Subject: [PATCH 001/470] hw-breakpoints: prepare the code for Hardware Breakpoint interfaces The generic hardware breakpoint interface provides an abstraction of hardware breakpoints in front of specific arch implementations for both kernel and user side breakpoints. This includes execution breakpoints and read/write breakpoints, also known as "watchpoints". This patch introduces header files containing constants, structure definitions and declaration of functions used by the hardware breakpoint core and x86 specific code. It also introduces an array based storage for the debug-register values in 'struct thread_struct', while modifying all users of debugreg member in the structure. [ Impact: add headers for new hardware breakpoint interface ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/a.out-core.h | 8 +- arch/x86/include/asm/debugreg.h | 29 ++++++ arch/x86/include/asm/hw_breakpoint.h | 55 +++++++++++ arch/x86/include/asm/processor.h | 8 +- arch/x86/kernel/process.c | 16 +-- arch/x86/kernel/ptrace.c | 16 +-- arch/x86/power/cpu_32.c | 8 +- arch/x86/power/cpu_64.c | 8 +- include/asm-generic/hw_breakpoint.h | 139 +++++++++++++++++++++++++++ 9 files changed, 255 insertions(+), 32 deletions(-) create mode 100644 arch/x86/include/asm/hw_breakpoint.h create mode 100644 include/asm-generic/hw_breakpoint.h diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index bb70e397aa8..fc4685dd6e4 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -32,10 +32,10 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; - dump->u_debugreg[0] = current->thread.debugreg0; - dump->u_debugreg[1] = current->thread.debugreg1; - dump->u_debugreg[2] = current->thread.debugreg2; - dump->u_debugreg[3] = current->thread.debugreg3; + dump->u_debugreg[0] = current->thread.debugreg[0]; + dump->u_debugreg[1] = current->thread.debugreg[1]; + dump->u_debugreg[2] = current->thread.debugreg[2]; + dump->u_debugreg[3] = current->thread.debugreg[3]; dump->u_debugreg[4] = 0; dump->u_debugreg[5] = 0; dump->u_debugreg[6] = current->thread.debugreg6; diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 3ea6f37be9e..23439fbb1d0 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -18,6 +18,7 @@ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ #define DR_TRAP3 (0x8) /* db3 */ +#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ @@ -49,6 +50,8 @@ #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ +#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */ +#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */ #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ @@ -67,4 +70,30 @@ #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ +/* + * HW breakpoint additions + */ +#ifdef __KERNEL__ + +/* For process management */ +extern void flush_thread_hw_breakpoint(struct task_struct *tsk); +extern int copy_thread_hw_breakpoint(struct task_struct *tsk, + struct task_struct *child, unsigned long clone_flags); + +/* For CPU management */ +extern void load_debug_registers(void); +static inline void hw_breakpoint_disable(void) +{ + /* Zero the control register for HW Breakpoint */ + set_debugreg(0UL, 7); + + /* Zero-out the individual HW breakpoint address registers */ + set_debugreg(0UL, 0); + set_debugreg(0UL, 1); + set_debugreg(0UL, 2); + set_debugreg(0UL, 3); +} + +#endif /* __KERNEL__ */ + #endif /* _ASM_X86_DEBUGREG_H */ diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h new file mode 100644 index 00000000000..1acb4d45de7 --- /dev/null +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -0,0 +1,55 @@ +#ifndef _I386_HW_BREAKPOINT_H +#define _I386_HW_BREAKPOINT_H + +#ifdef __KERNEL__ +#define __ARCH_HW_BREAKPOINT_H + +struct arch_hw_breakpoint { + char *name; /* Contains name of the symbol to set bkpt */ + unsigned long address; + u8 len; + u8 type; +}; + +#include +#include + +/* Available HW breakpoint length encodings */ +#define HW_BREAKPOINT_LEN_1 0x40 +#define HW_BREAKPOINT_LEN_2 0x44 +#define HW_BREAKPOINT_LEN_4 0x4c +#define HW_BREAKPOINT_LEN_EXECUTE 0x40 + +#ifdef CONFIG_X86_64 +#define HW_BREAKPOINT_LEN_8 0x48 +#endif + +/* Available HW breakpoint type encodings */ + +/* trigger on instruction execute */ +#define HW_BREAKPOINT_EXECUTE 0x80 +/* trigger on memory write */ +#define HW_BREAKPOINT_WRITE 0x81 +/* trigger on memory read or write */ +#define HW_BREAKPOINT_RW 0x83 + +/* Total number of available HW breakpoint registers */ +#define HBP_NUM 4 + +extern struct hw_breakpoint *hbp_kernel[HBP_NUM]; +DECLARE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); +extern unsigned int hbp_user_refcount[HBP_NUM]; + +extern void arch_install_thread_hw_breakpoint(struct task_struct *tsk); +extern void arch_uninstall_thread_hw_breakpoint(void); +extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); +extern int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, + struct task_struct *tsk); +extern void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk); +extern void arch_flush_thread_hw_breakpoint(struct task_struct *tsk); +extern void arch_update_kernel_hw_breakpoint(void *); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); +#endif /* __KERNEL__ */ +#endif /* _I386_HW_BREAKPOINT_H */ + diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 0b2fab0051e..448b34a8e39 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -29,6 +29,7 @@ struct mm_struct; #include #include +#define HBP_NUM 4 /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -431,12 +432,11 @@ struct thread_struct { unsigned long fs; unsigned long gs; /* Hardware debugging registers: */ - unsigned long debugreg0; - unsigned long debugreg1; - unsigned long debugreg2; - unsigned long debugreg3; + unsigned long debugreg[HBP_NUM]; unsigned long debugreg6; unsigned long debugreg7; + /* Hardware breakpoint info */ + struct hw_breakpoint *hbp[HBP_NUM]; /* Fault info: */ unsigned long cr2; unsigned long trap_no; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index fb5dfb891f0..291527cb438 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -106,10 +106,10 @@ void flush_thread(void) clear_tsk_thread_flag(tsk, TIF_DEBUG); - tsk->thread.debugreg0 = 0; - tsk->thread.debugreg1 = 0; - tsk->thread.debugreg2 = 0; - tsk->thread.debugreg3 = 0; + tsk->thread.debugreg[0] = 0; + tsk->thread.debugreg[1] = 0; + tsk->thread.debugreg[2] = 0; + tsk->thread.debugreg[3] = 0; tsk->thread.debugreg6 = 0; tsk->thread.debugreg7 = 0; memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); @@ -194,10 +194,10 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - set_debugreg(next->debugreg0, 0); - set_debugreg(next->debugreg1, 1); - set_debugreg(next->debugreg2, 2); - set_debugreg(next->debugreg3, 3); + set_debugreg(next->debugreg[0], 0); + set_debugreg(next->debugreg[1], 1); + set_debugreg(next->debugreg[2], 2); + set_debugreg(next->debugreg[3], 3); /* no 4 and 5 */ set_debugreg(next->debugreg6, 6); set_debugreg(next->debugreg7, 7); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 09ecbde91c1..313be40be55 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -471,10 +471,10 @@ static int genregs_set(struct task_struct *target, static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) { switch (n) { - case 0: return child->thread.debugreg0; - case 1: return child->thread.debugreg1; - case 2: return child->thread.debugreg2; - case 3: return child->thread.debugreg3; + case 0: return child->thread.debugreg[0]; + case 1: return child->thread.debugreg[1]; + case 2: return child->thread.debugreg[2]; + case 3: return child->thread.debugreg[3]; case 6: return child->thread.debugreg6; case 7: return child->thread.debugreg7; } @@ -493,10 +493,10 @@ static int ptrace_set_debugreg(struct task_struct *child, return -EIO; switch (n) { - case 0: child->thread.debugreg0 = data; break; - case 1: child->thread.debugreg1 = data; break; - case 2: child->thread.debugreg2 = data; break; - case 3: child->thread.debugreg3 = data; break; + case 0: child->thread.debugreg[0] = data; break; + case 1: child->thread.debugreg[1] = data; break; + case 2: child->thread.debugreg[2] = data; break; + case 3: child->thread.debugreg[3] = data; break; case 6: if ((data & ~0xffffffffUL) != 0) diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index ce702c5b3a2..51991394800 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -84,10 +84,10 @@ static void fix_processor_context(void) * Now maybe reload the debug registers */ if (current->thread.debugreg7) { - set_debugreg(current->thread.debugreg0, 0); - set_debugreg(current->thread.debugreg1, 1); - set_debugreg(current->thread.debugreg2, 2); - set_debugreg(current->thread.debugreg3, 3); + set_debugreg(current->thread.debugreg[0], 0); + set_debugreg(current->thread.debugreg[1], 1); + set_debugreg(current->thread.debugreg[2], 2); + set_debugreg(current->thread.debugreg[3], 3); /* no 4 and 5 */ set_debugreg(current->thread.debugreg6, 6); set_debugreg(current->thread.debugreg7, 7); diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 5343540f260..1e3bdcc959f 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -163,10 +163,10 @@ static void fix_processor_context(void) * Now maybe reload the debug registers */ if (current->thread.debugreg7){ - loaddebug(¤t->thread, 0); - loaddebug(¤t->thread, 1); - loaddebug(¤t->thread, 2); - loaddebug(¤t->thread, 3); + set_debugreg(current->thread.debugreg[0], 0); + set_debugreg(current->thread.debugreg[1], 1); + set_debugreg(current->thread.debugreg[2], 2); + set_debugreg(current->thread.debugreg[3], 3); /* no 4 and 5 */ loaddebug(¤t->thread, 6); loaddebug(¤t->thread, 7); diff --git a/include/asm-generic/hw_breakpoint.h b/include/asm-generic/hw_breakpoint.h new file mode 100644 index 00000000000..9bf2d12eb74 --- /dev/null +++ b/include/asm-generic/hw_breakpoint.h @@ -0,0 +1,139 @@ +#ifndef _ASM_GENERIC_HW_BREAKPOINT_H +#define _ASM_GENERIC_HW_BREAKPOINT_H + +#ifndef __ARCH_HW_BREAKPOINT_H +#error "Please don't include this file directly" +#endif + +#ifdef __KERNEL__ +#include +#include +#include + +/** + * struct hw_breakpoint - unified kernel/user-space hardware breakpoint + * @triggered: callback invoked after target address access + * @info: arch-specific breakpoint info (address, length, and type) + * + * %hw_breakpoint structures are the kernel's way of representing + * hardware breakpoints. These are data breakpoints + * (also known as "watchpoints", triggered on data access), and the breakpoint's + * target address can be located in either kernel space or user space. + * + * The breakpoint's address, length, and type are highly + * architecture-specific. The values are encoded in the @info field; you + * specify them when registering the breakpoint. To examine the encoded + * values use hw_breakpoint_get_{kaddress,uaddress,len,type}(), declared + * below. + * + * The address is specified as a regular kernel pointer (for kernel-space + * breakponts) or as an %__user pointer (for user-space breakpoints). + * With register_user_hw_breakpoint(), the address must refer to a + * location in user space. The breakpoint will be active only while the + * requested task is running. Conversely with + * register_kernel_hw_breakpoint(), the address must refer to a location + * in kernel space, and the breakpoint will be active on all CPUs + * regardless of the current task. + * + * The length is the breakpoint's extent in bytes, which is subject to + * certain limitations. include/asm/hw_breakpoint.h contains macros + * defining the available lengths for a specific architecture. Note that + * the address's alignment must match the length. The breakpoint will + * catch accesses to any byte in the range from address to address + + * (length - 1). + * + * The breakpoint's type indicates the sort of access that will cause it + * to trigger. Possible values may include: + * + * %HW_BREAKPOINT_RW (triggered on read or write access), + * %HW_BREAKPOINT_WRITE (triggered on write access), and + * %HW_BREAKPOINT_READ (triggered on read access). + * + * Appropriate macros are defined in include/asm/hw_breakpoint.h; not all + * possibilities are available on all architectures. Execute breakpoints + * must have length equal to the special value %HW_BREAKPOINT_LEN_EXECUTE. + * + * When a breakpoint gets hit, the @triggered callback is + * invoked in_interrupt with a pointer to the %hw_breakpoint structure and the + * processor registers. + * Data breakpoints occur after the memory access has taken place. + * Breakpoints are disabled during execution @triggered, to avoid + * recursive traps and allow unhindered access to breakpointed memory. + * + * This sample code sets a breakpoint on pid_max and registers a callback + * function for writes to that variable. Note that it is not portable + * as written, because not all architectures support HW_BREAKPOINT_LEN_4. + * + * ---------------------------------------------------------------------- + * + * #include + * + * struct hw_breakpoint my_bp; + * + * static void my_triggered(struct hw_breakpoint *bp, struct pt_regs *regs) + * { + * printk(KERN_DEBUG "Inside triggered routine of breakpoint exception\n"); + * dump_stack(); + * ............... + * } + * + * static struct hw_breakpoint my_bp; + * + * static int init_module(void) + * { + * ...................... + * my_bp.info.type = HW_BREAKPOINT_WRITE; + * my_bp.info.len = HW_BREAKPOINT_LEN_4; + * + * my_bp.installed = (void *)my_bp_installed; + * + * rc = register_kernel_hw_breakpoint(&my_bp); + * ...................... + * } + * + * static void cleanup_module(void) + * { + * ...................... + * unregister_kernel_hw_breakpoint(&my_bp); + * ...................... + * } + * + * ---------------------------------------------------------------------- + */ +struct hw_breakpoint { + void (*triggered)(struct hw_breakpoint *, struct pt_regs *); + struct arch_hw_breakpoint info; +}; + +/* + * len and type values are defined in include/asm/hw_breakpoint.h. + * Available values vary according to the architecture. On i386 the + * possibilities are: + * + * HW_BREAKPOINT_LEN_1 + * HW_BREAKPOINT_LEN_2 + * HW_BREAKPOINT_LEN_4 + * HW_BREAKPOINT_RW + * HW_BREAKPOINT_READ + * + * On other architectures HW_BREAKPOINT_LEN_8 may be available, and the + * 1-, 2-, and 4-byte lengths may be unavailable. There also may be + * HW_BREAKPOINT_WRITE. You can use #ifdef to check at compile time. + */ + +extern int register_user_hw_breakpoint(struct task_struct *tsk, + struct hw_breakpoint *bp); +extern int modify_user_hw_breakpoint(struct task_struct *tsk, + struct hw_breakpoint *bp); +extern void unregister_user_hw_breakpoint(struct task_struct *tsk, + struct hw_breakpoint *bp); +/* + * Kernel breakpoints are not associated with any particular thread. + */ +extern int register_kernel_hw_breakpoint(struct hw_breakpoint *bp); +extern void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp); + +extern unsigned int hbp_kernel_pos; + +#endif /* __KERNEL__ */ +#endif /* _ASM_GENERIC_HW_BREAKPOINT_H */ From 62a038d34db26771756cf3689e36de638bedd2c4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:33 +0530 Subject: [PATCH 002/470] hw-breakpoints: introducing generic hardware breakpoint handler interfaces This patch introduces the generic Hardware Breakpoint interfaces for both user and kernel space requests. This core Api handles the hardware breakpoints through new helpers. It handles the user-space breakpoints and kernel breakpoints in front of arch implementation. One can choose kernel wide breakpoints using the following helpers and passing them a generic struct hw_breakpoint: - register_kernel_hw_breakpoint() - unregister_kernel_hw_breakpoint() - modify_kernel_hw_breakpoint() On the other side, you can choose per task breakpoints. - register_user_hw_breakpoint() - unregister_user_hw_breakpoint() - modify_user_hw_breakpoint() [ fweisbec@gmail.com: fix conflict against perfcounter ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/Kconfig | 4 + kernel/Makefile | 1 + kernel/hw_breakpoint.c | 378 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 383 insertions(+) create mode 100644 kernel/hw_breakpoint.c diff --git a/arch/Kconfig b/arch/Kconfig index 78a35e9dc10..1adf2d0e635 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -112,3 +112,7 @@ config HAVE_DMA_API_DEBUG config HAVE_DEFAULT_NO_SPIN_MUTEXES bool + +config HAVE_HW_BREAKPOINT + bool + diff --git a/kernel/Makefile b/kernel/Makefile index a35eee3436d..18ad1110b22 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -96,6 +96,7 @@ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c new file mode 100644 index 00000000000..c1f64e65a9f --- /dev/null +++ b/kernel/hw_breakpoint.c @@ -0,0 +1,378 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2007 Alan Stern + * Copyright (C) IBM Corporation, 2009 + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + * This file contains the arch-independent routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_X86 +#include +#endif +/* + * Spinlock that protects all (un)register operations over kernel/user-space + * breakpoint requests + */ +static DEFINE_SPINLOCK(hw_breakpoint_lock); + +/* Array of kernel-space breakpoint structures */ +struct hw_breakpoint *hbp_kernel[HBP_NUM]; + +/* + * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being + * modified but we need the older copy to handle any hbp exceptions. It will + * sync with hbp_kernel[] value after updation is done through IPIs. + */ +DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); + +/* + * Kernel breakpoints grow downwards, starting from HBP_NUM + * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for + * kernel-space request. We will initialise it here and not in an __init + * routine because load_debug_registers(), which uses this variable can be + * called very early during CPU initialisation. + */ +unsigned int hbp_kernel_pos = HBP_NUM; + +/* + * An array containing refcount of threads using a given bkpt register + * Accesses are synchronised by acquiring hw_breakpoint_lock + */ +unsigned int hbp_user_refcount[HBP_NUM]; + +/* + * Load the debug registers during startup of a CPU. + */ +void load_debug_registers(void) +{ + unsigned long flags; + struct task_struct *tsk = current; + + spin_lock_bh(&hw_breakpoint_lock); + + /* Prevent IPIs for new kernel breakpoint updates */ + local_irq_save(flags); + arch_update_kernel_hw_breakpoint(NULL); + local_irq_restore(flags); + + if (test_tsk_thread_flag(tsk, TIF_DEBUG)) + arch_install_thread_hw_breakpoint(tsk); + + spin_unlock_bh(&hw_breakpoint_lock); +} + +/* + * Erase all the hardware breakpoint info associated with a thread. + * + * If tsk != current then tsk must not be usable (for example, a + * child being cleaned up from a failed fork). + */ +void flush_thread_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *thread = &(tsk->thread); + + spin_lock_bh(&hw_breakpoint_lock); + + /* The thread no longer has any breakpoints associated with it */ + clear_tsk_thread_flag(tsk, TIF_DEBUG); + for (i = 0; i < HBP_NUM; i++) { + if (thread->hbp[i]) { + hbp_user_refcount[i]--; + kfree(thread->hbp[i]); + thread->hbp[i] = NULL; + } + } + + arch_flush_thread_hw_breakpoint(tsk); + + /* Actually uninstall the breakpoints if necessary */ + if (tsk == current) + arch_uninstall_thread_hw_breakpoint(); + spin_unlock_bh(&hw_breakpoint_lock); +} + +/* + * Copy the hardware breakpoint info from a thread to its cloned child. + */ +int copy_thread_hw_breakpoint(struct task_struct *tsk, + struct task_struct *child, unsigned long clone_flags) +{ + /* + * We will assume that breakpoint settings are not inherited + * and the child starts out with no debug registers set. + * But what about CLONE_PTRACE? + */ + clear_tsk_thread_flag(child, TIF_DEBUG); + + /* We will call flush routine since the debugregs are not inherited */ + arch_flush_thread_hw_breakpoint(child); + + return 0; +} + +static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, + struct hw_breakpoint *bp) +{ + struct thread_struct *thread = &(tsk->thread); + int rc; + + /* Do not overcommit. Fail if kernel has used the hbp registers */ + if (pos >= hbp_kernel_pos) + return -ENOSPC; + + rc = arch_validate_hwbkpt_settings(bp, tsk); + if (rc) + return rc; + + thread->hbp[pos] = bp; + hbp_user_refcount[pos]++; + + arch_update_user_hw_breakpoint(pos, tsk); + /* + * Does it need to be installed right now? + * Otherwise it will get installed the next time tsk runs + */ + if (tsk == current) + arch_install_thread_hw_breakpoint(tsk); + + return rc; +} + +/* + * Modify the address of a hbp register already in use by the task + * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() + */ +static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, + struct hw_breakpoint *bp) +{ + struct thread_struct *thread = &(tsk->thread); + + if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) + return -EINVAL; + + if (thread->hbp[pos] == NULL) + return -EINVAL; + + thread->hbp[pos] = bp; + /* + * 'pos' must be that of a hbp register already used by 'tsk' + * Otherwise arch_modify_user_hw_breakpoint() will fail + */ + arch_update_user_hw_breakpoint(pos, tsk); + + if (tsk == current) + arch_install_thread_hw_breakpoint(tsk); + + return 0; +} + +static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) +{ + hbp_user_refcount[pos]--; + tsk->thread.hbp[pos] = NULL; + + arch_update_user_hw_breakpoint(pos, tsk); + + if (tsk == current) + arch_install_thread_hw_breakpoint(tsk); +} + +/** + * register_user_hw_breakpoint - register a hardware breakpoint for user space + * @tsk: pointer to 'task_struct' of the process to which the address belongs + * @bp: the breakpoint structure to register + * + * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and + * @bp->triggered must be set properly before invocation + * + */ +int register_user_hw_breakpoint(struct task_struct *tsk, + struct hw_breakpoint *bp) +{ + struct thread_struct *thread = &(tsk->thread); + int i, rc = -ENOSPC; + + spin_lock_bh(&hw_breakpoint_lock); + + for (i = 0; i < hbp_kernel_pos; i++) { + if (!thread->hbp[i]) { + rc = __register_user_hw_breakpoint(i, tsk, bp); + break; + } + } + if (!rc) + set_tsk_thread_flag(tsk, TIF_DEBUG); + + spin_unlock_bh(&hw_breakpoint_lock); + return rc; +} +EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); + +/** + * modify_user_hw_breakpoint - modify a user-space hardware breakpoint + * @tsk: pointer to 'task_struct' of the process to which the address belongs + * @bp: the breakpoint structure to unregister + * + */ +int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) +{ + struct thread_struct *thread = &(tsk->thread); + int i, ret = -ENOENT; + + spin_lock_bh(&hw_breakpoint_lock); + for (i = 0; i < hbp_kernel_pos; i++) { + if (bp == thread->hbp[i]) { + ret = __modify_user_hw_breakpoint(i, tsk, bp); + break; + } + } + spin_unlock_bh(&hw_breakpoint_lock); + return ret; +} +EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); + +/** + * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint + * @tsk: pointer to 'task_struct' of the process to which the address belongs + * @bp: the breakpoint structure to unregister + * + */ +void unregister_user_hw_breakpoint(struct task_struct *tsk, + struct hw_breakpoint *bp) +{ + struct thread_struct *thread = &(tsk->thread); + int i, pos = -1, hbp_counter = 0; + + spin_lock_bh(&hw_breakpoint_lock); + for (i = 0; i < hbp_kernel_pos; i++) { + if (thread->hbp[i]) + hbp_counter++; + if (bp == thread->hbp[i]) + pos = i; + } + if (pos >= 0) { + __unregister_user_hw_breakpoint(pos, tsk); + hbp_counter--; + } + if (!hbp_counter) + clear_tsk_thread_flag(tsk, TIF_DEBUG); + + spin_unlock_bh(&hw_breakpoint_lock); +} +EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); + +/** + * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space + * @bp: the breakpoint structure to register + * + * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and + * @bp->triggered must be set properly before invocation + * + */ +int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) +{ + int rc; + + rc = arch_validate_hwbkpt_settings(bp, NULL); + if (rc) + return rc; + + spin_lock_bh(&hw_breakpoint_lock); + + rc = -ENOSPC; + /* Check if we are over-committing */ + if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { + hbp_kernel_pos--; + hbp_kernel[hbp_kernel_pos] = bp; + on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); + rc = 0; + } + + spin_unlock_bh(&hw_breakpoint_lock); + return rc; +} +EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); + +/** + * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space + * @bp: the breakpoint structure to unregister + * + * Uninstalls and unregisters @bp. + */ +void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) +{ + int i, j; + + spin_lock_bh(&hw_breakpoint_lock); + + /* Find the 'bp' in our list of breakpoints for kernel */ + for (i = hbp_kernel_pos; i < HBP_NUM; i++) + if (bp == hbp_kernel[i]) + break; + + /* Check if we did not find a match for 'bp'. If so return early */ + if (i == HBP_NUM) { + spin_unlock_bh(&hw_breakpoint_lock); + return; + } + + /* + * We'll shift the breakpoints one-level above to compact if + * unregistration creates a hole + */ + for (j = i; j > hbp_kernel_pos; j--) + hbp_kernel[j] = hbp_kernel[j-1]; + + hbp_kernel[hbp_kernel_pos] = NULL; + on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); + hbp_kernel_pos++; + + spin_unlock_bh(&hw_breakpoint_lock); +} +EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); + +static struct notifier_block hw_breakpoint_exceptions_nb = { + .notifier_call = hw_breakpoint_exceptions_notify, + /* we need to be notified first */ + .priority = 0x7fffffff +}; + +static int __init init_hw_breakpoint(void) +{ + return register_die_notifier(&hw_breakpoint_exceptions_nb); +} + +core_initcall(init_hw_breakpoint); From 0067f1297241ea567f2b22a455519752d70fcca9 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:57 +0530 Subject: [PATCH 003/470] hw-breakpoints: x86 architecture implementation of Hardware Breakpoint interfaces This patch introduces the arch-specific implementation of the generic hardware breakpoints in kernel/hw_breakpoint.c inside x86 specific directories. It contains functions which help to validate and serve requests using Hardware Breakpoint registers on x86 processors. [ fweisbec@gmail.com: fix conflict against kmemcheck ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/Kconfig | 1 + arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/hw_breakpoint.c | 382 ++++++++++++++++++++++++++++++++ 3 files changed, 384 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kernel/hw_breakpoint.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df9e885eee1..3033375ed6b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -46,6 +46,7 @@ config X86 select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_HW_BREAKPOINT config ARCH_DEFCONFIG string diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 77df4d654ff..cbc78182917 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o -obj-y += alternative.o i8253.o pci-nommu.o +obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += tsc.o io_delay.o rtc.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c new file mode 100644 index 00000000000..4867c9f3b5f --- /dev/null +++ b/arch/x86/kernel/hw_breakpoint.c @@ -0,0 +1,382 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2007 Alan Stern + * Copyright (C) 2009 IBM Corporation + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Unmasked kernel DR7 value */ +static unsigned long kdr7; + +/* + * Masks for the bits corresponding to registers DR0 - DR3 in DR7 register. + * Used to clear and verify the status of bits corresponding to DR0 - DR3 + */ +static const unsigned long dr7_masks[HBP_NUM] = { + 0x000f0003, /* LEN0, R/W0, G0, L0 */ + 0x00f0000c, /* LEN1, R/W1, G1, L1 */ + 0x0f000030, /* LEN2, R/W2, G2, L2 */ + 0xf00000c0 /* LEN3, R/W3, G3, L3 */ +}; + + +/* + * Encode the length, type, Exact, and Enable bits for a particular breakpoint + * as stored in debug register 7. + */ +static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) +{ + unsigned long bp_info; + + bp_info = (len | type) & 0xf; + bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); + bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)) | + DR_GLOBAL_SLOWDOWN; + return bp_info; +} + +void arch_update_kernel_hw_breakpoint(void *unused) +{ + struct hw_breakpoint *bp; + int i, cpu = get_cpu(); + unsigned long temp_kdr7 = 0; + + /* Don't allow debug exceptions while we update the registers */ + set_debugreg(0UL, 7); + + for (i = hbp_kernel_pos; i < HBP_NUM; i++) { + per_cpu(this_hbp_kernel[i], cpu) = bp = hbp_kernel[i]; + if (bp) { + temp_kdr7 |= encode_dr7(i, bp->info.len, bp->info.type); + set_debugreg(bp->info.address, i); + } + } + + /* No need to set DR6. Update the debug registers with kernel-space + * breakpoint values from kdr7 and user-space requests from the + * current process + */ + kdr7 = temp_kdr7; + set_debugreg(kdr7 | current->thread.debugreg7, 7); + put_cpu_no_resched(); +} + +/* + * Install the thread breakpoints in their debug registers. + */ +void arch_install_thread_hw_breakpoint(struct task_struct *tsk) +{ + struct thread_struct *thread = &(tsk->thread); + + switch (hbp_kernel_pos) { + case 4: + set_debugreg(thread->debugreg[3], 3); + case 3: + set_debugreg(thread->debugreg[2], 2); + case 2: + set_debugreg(thread->debugreg[1], 1); + case 1: + set_debugreg(thread->debugreg[0], 0); + default: + break; + } + + /* No need to set DR6 */ + set_debugreg((kdr7 | thread->debugreg7), 7); +} + +/* + * Install the debug register values for just the kernel, no thread. + */ +void arch_uninstall_thread_hw_breakpoint() +{ + /* Clear the user-space portion of debugreg7 by setting only kdr7 */ + set_debugreg(kdr7, 7); + +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case HW_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case HW_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case HW_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; +#ifdef CONFIG_X86_64 + case HW_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; +#endif + } + return len_in_bytes; +} + +/* + * Check for virtual address in user space. + */ +int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va <= TASK_SIZE - len); +} + +/* + * Check for virtual address in kernel space. + */ +int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Store a breakpoint's encoded address, length, and type. + */ +static int arch_store_info(struct hw_breakpoint *bp, struct task_struct *tsk) +{ + /* + * User-space requests will always have the address field populated + * Symbol names from user-space are rejected + */ + if (tsk && bp->info.name) + return -EINVAL; + /* + * For kernel-addresses, either the address or symbol name can be + * specified. + */ + if (bp->info.name) + bp->info.address = (unsigned long) + kallsyms_lookup_name(bp->info.name); + if (bp->info.address) + return 0; + return -EINVAL; +} + +/* + * Validate the arch-specific HW Breakpoint register settings + */ +int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, + struct task_struct *tsk) +{ + unsigned int align; + int ret = -EINVAL; + + switch (bp->info.type) { + /* + * Ptrace-refactoring code + * For now, we'll allow instruction breakpoint only for user-space + * addresses + */ + case HW_BREAKPOINT_EXECUTE: + if ((!arch_check_va_in_userspace(bp->info.address, + bp->info.len)) && + bp->info.len != HW_BREAKPOINT_LEN_EXECUTE) + return ret; + break; + case HW_BREAKPOINT_WRITE: + break; + case HW_BREAKPOINT_RW: + break; + default: + return ret; + } + + switch (bp->info.len) { + case HW_BREAKPOINT_LEN_1: + align = 0; + break; + case HW_BREAKPOINT_LEN_2: + align = 1; + break; + case HW_BREAKPOINT_LEN_4: + align = 3; + break; +#ifdef CONFIG_X86_64 + case HW_BREAKPOINT_LEN_8: + align = 7; + break; +#endif + default: + return ret; + } + + if (bp->triggered) + ret = arch_store_info(bp, tsk); + + if (ret < 0) + return ret; + /* + * Check that the low-order bits of the address are appropriate + * for the alignment implied by len. + */ + if (bp->info.address & align) + return -EINVAL; + + /* Check that the virtual address is in the proper range */ + if (tsk) { + if (!arch_check_va_in_userspace(bp->info.address, bp->info.len)) + return -EFAULT; + } else { + if (!arch_check_va_in_kernelspace(bp->info.address, + bp->info.len)) + return -EFAULT; + } + return 0; +} + +void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk) +{ + struct thread_struct *thread = &(tsk->thread); + struct hw_breakpoint *bp = thread->hbp[pos]; + + thread->debugreg7 &= ~dr7_masks[pos]; + if (bp) { + thread->debugreg[pos] = bp->info.address; + thread->debugreg7 |= encode_dr7(pos, bp->info.len, + bp->info.type); + } else + thread->debugreg[pos] = 0; +} + +void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *thread = &(tsk->thread); + + thread->debugreg7 = 0; + for (i = 0; i < HBP_NUM; i++) + thread->debugreg[i] = 0; +} + +/* + * Handle debug exception notifications. + * + * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. + * + * NOTIFY_DONE returned if one of the following conditions is true. + * i) When the causative address is from user-space and the exception + * is a valid one, i.e. not triggered as a result of lazy debug register + * switching + * ii) When there are more bits than trap set in DR6 register (such + * as BD, BS or BT) indicating that more than one debug condition is + * met and requires some more action in do_debug(). + * + * NOTIFY_STOP returned for all other cases + * + */ +int __kprobes hw_breakpoint_handler(struct die_args *args) +{ + int i, cpu, rc = NOTIFY_STOP; + struct hw_breakpoint *bp; + /* The DR6 value is stored in args->err */ + unsigned long dr7, dr6 = args->err; + + /* Do an early return if no trap bits are set in DR6 */ + if ((dr6 & DR_TRAP_BITS) == 0) + return NOTIFY_DONE; + + /* Lazy debug register switching */ + if (!test_tsk_thread_flag(current, TIF_DEBUG)) + arch_uninstall_thread_hw_breakpoint(); + + get_debugreg(dr7, 7); + /* Disable breakpoints during exception handling */ + set_debugreg(0UL, 7); + /* + * Assert that local interrupts are disabled + * Reset the DRn bits in the virtualized register value. + * The ptrace trigger routine will add in whatever is needed. + */ + current->thread.debugreg6 &= ~DR_TRAP_BITS; + cpu = get_cpu(); + + /* Handle all the breakpoints that were triggered */ + for (i = 0; i < HBP_NUM; ++i) { + if (likely(!(dr6 & (DR_TRAP0 << i)))) + continue; + /* + * Find the corresponding hw_breakpoint structure and + * invoke its triggered callback. + */ + if (i >= hbp_kernel_pos) + bp = per_cpu(this_hbp_kernel[i], cpu); + else { + bp = current->thread.hbp[i]; + if (bp) + rc = NOTIFY_DONE; + } + /* + * bp can be NULL due to lazy debug register switching + * or due to the delay between updates of hbp_kernel_pos + * and this_hbp_kernel. + */ + if (!bp) + continue; + + (bp->triggered)(bp, args->regs); + } + if (dr6 & (~DR_TRAP_BITS)) + rc = NOTIFY_DONE; + + set_debugreg(dr7, 7); + put_cpu_no_resched(); + return rc; +} + +/* + * Handle debug exception notifications. + */ +int __kprobes hw_breakpoint_exceptions_notify( + struct notifier_block *unused, unsigned long val, void *data) +{ + if (val != DIE_DEBUG) + return NOTIFY_DONE; + + return hw_breakpoint_handler(data); +} From 08d68323d1f0c34452e614263b212ca556dae47f Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:08 +0530 Subject: [PATCH 004/470] hw-breakpoints: modifying generic debug exception to use thread-specific debug registers This patch modifies the breakpoint exception handler code to use the new abstract debug register names. [ fweisbec@gmail.com: fix conflict against kmemcheck ] [ Impact: refactor and cleanup x86 debug exception handler ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/traps.c | 69 ++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 45 deletions(-) diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a1d288327ff..de9913247dd 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -529,73 +529,52 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; - unsigned long condition; + unsigned long dr6; int si_code; - get_debugreg(condition, 6); + get_debugreg(dr6, 6); + /* DR6 may or may not be cleared by the CPU */ + set_debugreg(0, 6); /* * The processor cleared BTF, so don't mark that we need it set. */ clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); tsk->thread.debugctlmsr = 0; - if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, + /* Store the virtualized DR6 value */ + tsk->thread.debugreg6 = dr6; + + if (notify_die(DIE_DEBUG, "debug", regs, dr6, error_code, SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); - /* Mask out spurious debug traps due to lazy DR7 setting */ - if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { - if (!tsk->thread.debugreg7) - goto clear_dr7; + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, 1); + return; } -#ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) - goto debug_vm86; -#endif - - /* Save debug status register where ptrace can see it */ - tsk->thread.debugreg6 = condition; - /* - * Single-stepping through TF: make sure we ignore any events in - * kernel space (but re-enable TF when returning to user mode). + * Single-stepping through system calls: ignore any exceptions in + * kernel space, but re-enable TF when returning to user mode. + * + * We already checked v86 mode above, so we can check for kernel mode + * by just checking the CPL of CS. */ - if (condition & DR_STEP) { - if (!user_mode(regs)) - goto clear_TF_reenable; + if ((dr6 & DR_STEP) && !user_mode(regs)) { + tsk->thread.debugreg6 &= ~DR_STEP; + set_tsk_thread_flag(tsk, TIF_SINGLESTEP); + regs->flags &= ~X86_EFLAGS_TF; } - - si_code = get_si_code(condition); - /* Ok, finally something we can handle */ - send_sigtrap(tsk, regs, error_code, si_code); - - /* - * Disable additional traps. They'll be re-enabled when - * the signal is delivered. - */ -clear_dr7: - set_debugreg(0, 7); + si_code = get_si_code(tsk->thread.debugreg6); + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) + send_sigtrap(tsk, regs, error_code, si_code); preempt_conditional_cli(regs); - return; -#ifdef CONFIG_X86_32 -debug_vm86: - /* reenable preemption: handle_vm86_trap() might sleep */ - dec_preempt_count(); - handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); - conditional_cli(regs); - return; -#endif - -clear_TF_reenable: - set_tsk_thread_flag(tsk, TIF_SINGLESTEP); - regs->flags &= ~X86_EFLAGS_TF; - preempt_conditional_cli(regs); return; } From 1e3500666f7c5daaadadb8431a2927cdbbdb7dd4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:26 +0530 Subject: [PATCH 005/470] hw-breakpoints: use wrapper routines around debug registers in processor related functions This patch enables the use of wrapper routines to access the debug/breakpoint registers on cpu management. The hardcoded debug registers save and restore operations for threads breakpoints are replaced by wrappers. And now that we handle the kernel breakpoints too, we also need to handle them on cpu hotplug operations. [ Impact: adapt new hardware breakpoint api to cpu hotplug ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/smpboot.c | 3 +++ arch/x86/power/cpu_32.c | 13 +++---------- arch/x86/power/cpu_64.c | 12 +++--------- 3 files changed, 9 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 58d24ef917d..2b2652d205c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -326,6 +327,7 @@ notrace static void __cpuinit start_secondary(void *unused) setup_secondary_clock(); wmb(); + load_debug_registers(); cpu_idle(); } @@ -1250,6 +1252,7 @@ void cpu_disable_common(void) remove_cpu_from_maps(cpu); unlock_vector_lock(); fixup_irqs(); + hw_breakpoint_disable(); } int native_cpu_disable(void) diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 51991394800..2bc3b016de9 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -13,6 +13,7 @@ #include #include #include +#include static struct saved_context saved_context; @@ -48,6 +49,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); ctxt->cr4 = read_cr4_safe(); + hw_breakpoint_disable(); } /* Needed by apm.c */ @@ -83,16 +85,7 @@ static void fix_processor_context(void) /* * Now maybe reload the debug registers */ - if (current->thread.debugreg7) { - set_debugreg(current->thread.debugreg[0], 0); - set_debugreg(current->thread.debugreg[1], 1); - set_debugreg(current->thread.debugreg[2], 2); - set_debugreg(current->thread.debugreg[3], 3); - /* no 4 and 5 */ - set_debugreg(current->thread.debugreg6, 6); - set_debugreg(current->thread.debugreg7, 7); - } - + load_debug_registers(); } static void __restore_processor_state(struct saved_context *ctxt) diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 1e3bdcc959f..46866a13a93 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -16,6 +16,7 @@ #include #include #include +#include static void fix_processor_context(void); @@ -71,6 +72,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr3 = read_cr3(); ctxt->cr4 = read_cr4(); ctxt->cr8 = read_cr8(); + hw_breakpoint_disable(); } void save_processor_state(void) @@ -162,13 +164,5 @@ static void fix_processor_context(void) /* * Now maybe reload the debug registers */ - if (current->thread.debugreg7){ - set_debugreg(current->thread.debugreg[0], 0); - set_debugreg(current->thread.debugreg[1], 1); - set_debugreg(current->thread.debugreg[2], 2); - set_debugreg(current->thread.debugreg[3], 3); - /* no 4 and 5 */ - loaddebug(¤t->thread, 6); - loaddebug(¤t->thread, 7); - } + load_debug_registers(); } From 66cb5917295958652ff6ba36d83f98f2379c46b4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:55 +0530 Subject: [PATCH 006/470] hw-breakpoints: use the new wrapper routines to access debug registers in process/thread code This patch enables the use of abstract debug registers in process-handling routines, according to the new hardware breakpoint Api. [ Impact: adapt thread breakpoints handling code to the new breakpoint Api ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/process.c | 22 ++++++---------------- arch/x86/kernel/process_32.c | 28 ++++++++++++++++++++++++++++ arch/x86/kernel/process_64.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 291527cb438..19a686c401b 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); @@ -46,6 +48,8 @@ void free_thread_xstate(struct task_struct *tsk) kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); tsk->thread.xstate = NULL; } + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + flush_thread_hw_breakpoint(tsk); WARN(tsk->thread.ds_ctx, "leaking DS context\n"); } @@ -106,12 +110,8 @@ void flush_thread(void) clear_tsk_thread_flag(tsk, TIF_DEBUG); - tsk->thread.debugreg[0] = 0; - tsk->thread.debugreg[1] = 0; - tsk->thread.debugreg[2] = 0; - tsk->thread.debugreg[3] = 0; - tsk->thread.debugreg6 = 0; - tsk->thread.debugreg7 = 0; + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + flush_thread_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* * Forget coprocessor state.. @@ -193,16 +193,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, else if (next->debugctlmsr != prev->debugctlmsr) update_debugctlmsr(next->debugctlmsr); - if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - set_debugreg(next->debugreg[0], 0); - set_debugreg(next->debugreg[1], 1); - set_debugreg(next->debugreg[2], 2); - set_debugreg(next->debugreg[3], 3); - /* no 4 and 5 */ - set_debugreg(next->debugreg6, 6); - set_debugreg(next->debugreg7, 7); - } - if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ test_tsk_thread_flag(next_p, TIF_NOTSC)) { /* prev and next are different */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b5e4bfef447..297ffff2ffc 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -61,6 +61,8 @@ #include #include #include +#include +#include asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -265,7 +267,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, task_user_gs(p) = get_user_gs(regs); + p->thread.io_bitmap_ptr = NULL; tsk = current; + err = -ENOMEM; + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + if (copy_thread_hw_breakpoint(tsk, p, clone_flags)) + goto out; + if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); @@ -285,10 +293,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); +out: if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } + if (err) + flush_thread_hw_breakpoint(p); clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); p->thread.ds_ctx = NULL; @@ -427,6 +438,23 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) lazy_load_gs(next->gs); percpu_write(current_task, next_p); + /* + * There's a problem with moving the arch_install_thread_hw_breakpoint() + * call before current is updated. Suppose a kernel breakpoint is + * triggered in between the two, the hw-breakpoint handler will see that + * the 'current' task does not have TIF_DEBUG flag set and will think it + * is leftover from an old task (lazy switching) and will erase it. Then + * until the next context switch, no user-breakpoints will be installed. + * + * The real problem is that it's impossible to update both current and + * physical debug registers at the same instant, so there will always be + * a window in which they disagree and a breakpoint might get triggered. + * Since we use lazy switching, we are forced to assume that a + * disagreement means that current is correct and the exception is due + * to lazy debug register switching. + */ + if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) + arch_install_thread_hw_breakpoint(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 5a1a1de292e..f7b276d4b3f 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -55,6 +55,8 @@ #include #include #include +#include +#include asmlinkage extern void ret_from_fork(void); @@ -248,6 +250,8 @@ void release_thread(struct task_struct *dead_task) BUG(); } } + if (unlikely(dead_task->thread.debugreg7)) + flush_thread_hw_breakpoint(dead_task); } static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) @@ -303,12 +307,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; + p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); savesegment(fs, p->thread.fsindex); savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); + err = -ENOMEM; + if (unlikely(test_tsk_thread_flag(me, TIF_DEBUG))) + if (copy_thread_hw_breakpoint(me, p, clone_flags)) + goto out; + if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { @@ -347,6 +357,9 @@ out: kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } + if (err) + flush_thread_hw_breakpoint(p); + return err; } @@ -492,6 +505,24 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ if (tsk_used_math(next_p) && next_p->fpu_counter > 5) math_state_restore(); + /* + * There's a problem with moving the arch_install_thread_hw_breakpoint() + * call before current is updated. Suppose a kernel breakpoint is + * triggered in between the two, the hw-breakpoint handler will see that + * the 'current' task does not have TIF_DEBUG flag set and will think it + * is leftover from an old task (lazy switching) and will erase it. Then + * until the next context switch, no user-breakpoints will be installed. + * + * The real problem is that it's impossible to update both current and + * physical debug registers at the same instant, so there will always be + * a window in which they disagree and a breakpoint might get triggered. + * Since we use lazy switching, we are forced to assume that a + * disagreement means that current is correct and the exception is due + * to lazy debug register switching. + */ + if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) + arch_install_thread_hw_breakpoint(next_p); + return prev_p; } From da0cdc14f5f7e0faee6b2393fefed056cdb17146 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:45:03 +0530 Subject: [PATCH 007/470] hw-breakpoints: modify signal handling code to refrain from re-enabling HW Breakpoints This patch disables re-enabling of Hardware Breakpoint registers through the signal handling code. This is now done during from hw_breakpoint_handler(). Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/signal.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 14425166b8e..f33d2e0ef09 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -800,15 +800,6 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* - * Re-enable any watchpoints before delivering the - * signal to user space. The processor register will - * have been cleared if the watchpoint triggered - * inside the kernel. - */ - if (current->thread.debugreg7) - set_debugreg(current->thread.debugreg7, 7); - /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* From 72f674d203cd230426437cdcf7dd6f681dad8b0d Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:45:48 +0530 Subject: [PATCH 008/470] hw-breakpoints: modify Ptrace routines to access breakpoint registers This patch modifies the ptrace code to use the new wrapper routines around the debug/breakpoint registers. [ Impact: adapt x86 ptrace to the new breakpoint Api ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Signed-off-by: Maneesh Soni Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/ptrace.c | 237 ++++++++++++++++++++++++--------------- 1 file changed, 144 insertions(+), 93 deletions(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 313be40be55..b457f78b7db 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -136,11 +137,6 @@ static int set_segment_reg(struct task_struct *task, return 0; } -static unsigned long debugreg_addr_limit(struct task_struct *task) -{ - return TASK_SIZE - 3; -} - #else /* CONFIG_X86_64 */ #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) @@ -265,15 +261,6 @@ static int set_segment_reg(struct task_struct *task, return 0; } -static unsigned long debugreg_addr_limit(struct task_struct *task) -{ -#ifdef CONFIG_IA32_EMULATION - if (test_tsk_thread_flag(task, TIF_IA32)) - return IA32_PAGE_OFFSET - 3; -#endif - return TASK_SIZE_MAX - 7; -} - #endif /* CONFIG_X86_32 */ static unsigned long get_flags(struct task_struct *task) @@ -464,95 +451,159 @@ static int genregs_set(struct task_struct *target, } /* - * This function is trivial and will be inlined by the compiler. - * Having it separates the implementation details of debug - * registers from the interface details of ptrace. + * Decode the length and type bits for a particular breakpoint as + * stored in debug register 7. Return the "enabled" status. */ -static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) +static int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, + unsigned *type) { - switch (n) { - case 0: return child->thread.debugreg[0]; - case 1: return child->thread.debugreg[1]; - case 2: return child->thread.debugreg[2]; - case 3: return child->thread.debugreg[3]; - case 6: return child->thread.debugreg6; - case 7: return child->thread.debugreg7; - } - return 0; + int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); + + *len = (bp_info & 0xc) | 0x40; + *type = (bp_info & 0x3) | 0x80; + return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; } -static int ptrace_set_debugreg(struct task_struct *child, - int n, unsigned long data) +static void ptrace_triggered(struct hw_breakpoint *bp, struct pt_regs *regs) { + struct thread_struct *thread = &(current->thread); int i; - if (unlikely(n == 4 || n == 5)) - return -EIO; - - if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) - return -EIO; - - switch (n) { - case 0: child->thread.debugreg[0] = data; break; - case 1: child->thread.debugreg[1] = data; break; - case 2: child->thread.debugreg[2] = data; break; - case 3: child->thread.debugreg[3] = data; break; - - case 6: - if ((data & ~0xffffffffUL) != 0) - return -EIO; - child->thread.debugreg6 = data; - break; - - case 7: + /* + * Store in the virtual DR6 register the fact that the breakpoint + * was hit so the thread's debugger will see it. + */ + for (i = 0; i < hbp_kernel_pos; i++) /* - * Sanity-check data. Take one half-byte at once with - * check = (val >> (16 + 4*i)) & 0xf. It contains the - * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits - * 2 and 3 are LENi. Given a list of invalid values, - * we do mask |= 1 << invalid_value, so that - * (mask >> check) & 1 is a correct test for invalid - * values. - * - * R/Wi contains the type of the breakpoint / - * watchpoint, LENi contains the length of the watched - * data in the watchpoint case. - * - * The invalid values are: - * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] - * - R/Wi == 0x10 (break on I/O reads or writes), so - * mask |= 0x4444. - * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= - * 0x1110. - * - * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. - * - * See the Intel Manual "System Programming Guide", - * 15.2.4 - * - * Note that LENi == 0x10 is defined on x86_64 in long - * mode (i.e. even for 32-bit userspace software, but - * 64-bit kernel), so the x86_64 mask value is 0x5454. - * See the AMD manual no. 24593 (AMD64 System Programming) + * We will check bp->info.address against the address stored in + * thread's hbp structure and not debugreg[i]. This is to ensure + * that the corresponding bit for 'i' in DR7 register is enabled */ -#ifdef CONFIG_X86_32 -#define DR7_MASK 0x5f54 -#else -#define DR7_MASK 0x5554 -#endif - data &= ~DR_CONTROL_RESERVED; - for (i = 0; i < 4; i++) - if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) - return -EIO; - child->thread.debugreg7 = data; - if (data) - set_tsk_thread_flag(child, TIF_DEBUG); - else - clear_tsk_thread_flag(child, TIF_DEBUG); - break; - } + if (bp->info.address == thread->hbp[i]->info.address) + break; - return 0; + thread->debugreg6 |= (DR_TRAP0 << i); +} + +/* + * Handle ptrace writes to debug register 7. + */ +static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) +{ + struct thread_struct *thread = &(tsk->thread); + unsigned long old_dr7 = thread->debugreg7; + int i, orig_ret = 0, rc = 0; + int enabled, second_pass = 0; + unsigned len, type; + struct hw_breakpoint *bp; + + data &= ~DR_CONTROL_RESERVED; +restore: + /* + * Loop through all the hardware breakpoints, making the + * appropriate changes to each. + */ + for (i = 0; i < HBP_NUM; i++) { + enabled = decode_dr7(data, i, &len, &type); + bp = thread->hbp[i]; + + if (!enabled) { + if (bp) { + /* Don't unregister the breakpoints right-away, + * unless all register_user_hw_breakpoint() + * requests have succeeded. This prevents + * any window of opportunity for debug + * register grabbing by other users. + */ + if (!second_pass) + continue; + unregister_user_hw_breakpoint(tsk, bp); + kfree(bp); + } + continue; + } + if (!bp) { + rc = -ENOMEM; + bp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); + if (bp) { + bp->info.address = thread->debugreg[i]; + bp->triggered = ptrace_triggered; + bp->info.len = len; + bp->info.type = type; + rc = register_user_hw_breakpoint(tsk, bp); + if (rc) + kfree(bp); + } + } else + rc = modify_user_hw_breakpoint(tsk, bp); + if (rc) + break; + } + /* + * Make a second pass to free the remaining unused breakpoints + * or to restore the original breakpoints if an error occurred. + */ + if (!second_pass) { + second_pass = 1; + if (rc < 0) { + orig_ret = rc; + data = old_dr7; + } + goto restore; + } + return ((orig_ret < 0) ? orig_ret : rc); +} + +/* + * Handle PTRACE_PEEKUSR calls for the debug register area. + */ +unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) +{ + struct thread_struct *thread = &(tsk->thread); + unsigned long val = 0; + + if (n < HBP_NUM) + val = thread->debugreg[n]; + else if (n == 6) + val = thread->debugreg6; + else if (n == 7) + val = thread->debugreg7; + return val; +} + +/* + * Handle PTRACE_POKEUSR calls for the debug register area. + */ +int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) +{ + struct thread_struct *thread = &(tsk->thread); + int rc = 0; + + /* There are no DR4 or DR5 registers */ + if (n == 4 || n == 5) + return -EIO; + + if (n == 6) { + tsk->thread.debugreg6 = val; + goto ret_path; + } + if (n < HBP_NUM) { + if (thread->hbp[n]) { + if (arch_check_va_in_userspace(val, + thread->hbp[n]->info.len) == 0) { + rc = -EIO; + goto ret_path; + } + thread->hbp[n]->info.address = val; + } + thread->debugreg[n] = val; + } + /* All that's left is DR7 */ + if (n == 7) + rc = ptrace_write_dr7(tsk, val); + +ret_path: + return rc; } /* From 17f557e5b5d43a2af66c969f6560ac7105020672 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:46:03 +0530 Subject: [PATCH 009/470] hw-breakpoints: cleanup HW Breakpoint registers before kexec This patch disables Hardware breakpoints before doing a 'kexec' on the machine so that the cpu doesn't keep debug registers values which would be out of sync for the new image. Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/machine_kexec_32.c | 2 ++ arch/x86/kernel/machine_kexec_64.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index c1c429d0013..c843f8406da 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -25,6 +25,7 @@ #include #include #include +#include static void set_idt(void *newidt, __u16 limit) { @@ -202,6 +203,7 @@ void machine_kexec(struct kimage *image) /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); + hw_breakpoint_disable(); if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 84c3bf209e9..4a8bb82248a 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -18,6 +18,7 @@ #include #include #include +#include static int init_one_level2_page(struct kimage *image, pgd_t *pgd, unsigned long addr) @@ -282,6 +283,7 @@ void machine_kexec(struct kimage *image) /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); + hw_breakpoint_disable(); if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC From 432039933a16b8227b7b267f46ac1c1b9b3adf14 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:46:20 +0530 Subject: [PATCH 010/470] hw-breakpoints: sample HW breakpoint over kernel data address This patch introduces a sample kernel module to demonstrate the use of Hardware Breakpoint feature. It places a breakpoint over the kernel variable 'pid_max' to monitor all write operations and emits a function-backtrace when done. Signed-off-by: K.Prasad Signed-off-by: Frederic Weisbecker --- samples/Kconfig | 6 ++ samples/Makefile | 3 +- samples/hw_breakpoint/Makefile | 1 + samples/hw_breakpoint/data_breakpoint.c | 83 +++++++++++++++++++++++++ 4 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 samples/hw_breakpoint/Makefile create mode 100644 samples/hw_breakpoint/data_breakpoint.c diff --git a/samples/Kconfig b/samples/Kconfig index b75d28cba3f..8458516c693 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -45,5 +45,11 @@ config SAMPLE_KRETPROBES default m depends on SAMPLE_KPROBES && KRETPROBES +config SAMPLE_HW_BREAKPOINT + tristate "Build kernel hardware breakpoint examples -- loadable module only" + depends on HAVE_HW_BREAKPOINT && m + help + This builds kernel hardware breakpoint example modules. + endif # SAMPLES diff --git a/samples/Makefile b/samples/Makefile index 13e4b470b53..42e17559877 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -1,3 +1,4 @@ # Makefile for Linux samples code -obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ tracepoints/ trace_events/ +obj-$(CONFIG_SAMPLES) += markers/ kobject/ kprobes/ tracepoints/ \ + trace_events/ hw_breakpoint/ diff --git a/samples/hw_breakpoint/Makefile b/samples/hw_breakpoint/Makefile new file mode 100644 index 00000000000..0f5c31c2fc4 --- /dev/null +++ b/samples/hw_breakpoint/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SAMPLE_HW_BREAKPOINT) += data_breakpoint.o diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c new file mode 100644 index 00000000000..9cbdbb871b7 --- /dev/null +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -0,0 +1,83 @@ +/* + * data_breakpoint.c - Sample HW Breakpoint file to watch kernel data address + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * usage: insmod data_breakpoint.ko ksym= + * + * This file is a kernel module that places a breakpoint over ksym_name kernel + * variable using Hardware Breakpoint register. The corresponding handler which + * prints a backtrace is invoked everytime a write operation is performed on + * that variable. + * + * Copyright (C) IBM Corporation, 2009 + */ +#include /* Needed by all modules */ +#include /* Needed for KERN_INFO */ +#include /* Needed for the macros */ + +#include + +struct hw_breakpoint sample_hbp; + +static char ksym_name[KSYM_NAME_LEN] = "pid_max"; +module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); +MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" + " write operations on the kernel symbol"); + +void sample_hbp_handler(struct hw_breakpoint *temp, struct pt_regs + *temp_regs) +{ + printk(KERN_INFO "%s value is changed\n", ksym_name); + dump_stack(); + printk(KERN_INFO "Dump stack from sample_hbp_handler\n"); +} + +static int __init hw_break_module_init(void) +{ + int ret; + +#ifdef CONFIG_X86 + sample_hbp.info.name = ksym_name; + sample_hbp.info.type = HW_BREAKPOINT_WRITE; + sample_hbp.info.len = HW_BREAKPOINT_LEN_4; +#endif /* CONFIG_X86 */ + + sample_hbp.triggered = (void *)sample_hbp_handler; + + ret = register_kernel_hw_breakpoint(&sample_hbp); + + if (ret < 0) { + printk(KERN_INFO "Breakpoint registration failed\n"); + return ret; + } else + printk(KERN_INFO "HW Breakpoint for %s write installed\n", + ksym_name); + + return 0; +} + +static void __exit hw_break_module_exit(void) +{ + unregister_kernel_hw_breakpoint(&sample_hbp); + printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name); +} + +module_init(hw_break_module_init); +module_exit(hw_break_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("K.Prasad"); +MODULE_DESCRIPTION("ksym breakpoint"); From 0722db015c246204044299eae3b02d18d3ca4faf Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:46:40 +0530 Subject: [PATCH 011/470] hw-breakpoints: ftrace plugin for kernel symbol tracing using HW Breakpoint interfaces This patch adds an ftrace plugin to detect and profile memory access over kernel variables. It uses HW Breakpoint interfaces to 'watch memory addresses. Signed-off-by: K.Prasad Signed-off-by: Frederic Weisbecker --- kernel/trace/Kconfig | 21 ++ kernel/trace/Makefile | 1 + kernel/trace/trace.h | 23 ++ kernel/trace/trace_ksym.c | 525 ++++++++++++++++++++++++++++++++++ kernel/trace/trace_selftest.c | 53 ++++ 5 files changed, 623 insertions(+) create mode 100644 kernel/trace/trace_ksym.c diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index a508b9d2adb..d7f01e6e8ba 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -314,6 +314,27 @@ config POWER_TRACER power management decisions, specifically the C-state and P-state behavior. +config KSYM_TRACER + bool "Trace read and write access on kernel memory locations" + depends on HAVE_HW_BREAKPOINT + select TRACING + help + This tracer helps find read and write operations on any given kernel + symbol i.e. /proc/kallsyms. + +config PROFILE_KSYM_TRACER + bool "Profile all kernel memory accesses on 'watched' variables" + depends on KSYM_TRACER + help + This tracer profiles kernel accesses on variables watched through the + ksym tracer ftrace plugin. Depending upon the hardware, all read + and write operations on kernel variables can be monitored for + accesses. + + The results will be displayed in: + /debugfs/tracing/profile_ksym + + Say N if unsure. config STACK_TRACER bool "Trace max stack" diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 06b85850fab..658aace8c41 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -51,5 +51,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o +obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o libftrace-y := ftrace.o diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 6e735d4771f..7d5cc37b8fc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -15,6 +15,10 @@ #include #include +#ifdef CONFIG_KSYM_TRACER +#include +#endif + enum trace_type { __TRACE_FIRST_TYPE = 0, @@ -40,6 +44,7 @@ enum trace_type { TRACE_KMEM_FREE, TRACE_POWER, TRACE_BLK, + TRACE_KSYM, __TRACE_LAST_TYPE, }; @@ -207,6 +212,21 @@ struct syscall_trace_exit { unsigned long ret; }; +#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" +extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); + +struct trace_ksym { + struct trace_entry ent; + struct hw_breakpoint *ksym_hbp; + unsigned long ksym_addr; + unsigned long ip; +#ifdef CONFIG_PROFILE_KSYM_TRACER + unsigned long counter; +#endif + struct hlist_node ksym_hlist; + char ksym_name[KSYM_NAME_LEN]; + char p_name[TASK_COMM_LEN]; +}; /* * trace_flag_type is an enumeration that holds different @@ -323,6 +343,7 @@ extern void __ftrace_bad_type(void); TRACE_SYSCALL_ENTER); \ IF_ASSIGN(var, ent, struct syscall_trace_exit, \ TRACE_SYSCALL_EXIT); \ + IF_ASSIGN(var, ent, struct trace_ksym, TRACE_KSYM); \ __ftrace_bad_type(); \ } while (0) @@ -540,6 +561,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr); extern int trace_selftest_startup_hw_branches(struct tracer *trace, struct trace_array *tr); +extern int trace_selftest_startup_ksym(struct tracer *trace, + struct trace_array *tr); #endif /* CONFIG_FTRACE_STARTUP_TEST */ extern void *head_page(struct trace_array_cpu *data); diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c new file mode 100644 index 00000000000..11c74f6404c --- /dev/null +++ b/kernel/trace/trace_ksym.c @@ -0,0 +1,525 @@ +/* + * trace_ksym.c - Kernel Symbol Tracer + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +#include +#include +#include +#include +#include +#include + +#include "trace_output.h" +#include "trace_stat.h" +#include "trace.h" + +/* For now, let us restrict the no. of symbols traced simultaneously to number + * of available hardware breakpoint registers. + */ +#define KSYM_TRACER_MAX HBP_NUM + +#define KSYM_TRACER_OP_LEN 3 /* rw- */ +#define KSYM_FILTER_ENTRY_LEN (KSYM_NAME_LEN + KSYM_TRACER_OP_LEN + 1) + +static struct trace_array *ksym_trace_array; + +static unsigned int ksym_filter_entry_count; +static unsigned int ksym_tracing_enabled; + +static HLIST_HEAD(ksym_filter_head); + +#ifdef CONFIG_PROFILE_KSYM_TRACER + +#define MAX_UL_INT 0xffffffff + +static DEFINE_MUTEX(ksym_tracer_mutex); + +void ksym_collect_stats(unsigned long hbp_hit_addr) +{ + struct hlist_node *node; + struct trace_ksym *entry; + + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { + if ((entry->ksym_addr == hbp_hit_addr) && + (entry->counter <= MAX_UL_INT)) { + entry->counter++; + break; + } + } + rcu_read_unlock(); +} +#endif /* CONFIG_PROFILE_KSYM_TRACER */ + +void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) +{ + struct ring_buffer_event *event; + struct trace_array *tr; + struct trace_ksym *entry; + int pc; + + if (!ksym_tracing_enabled) + return; + + tr = ksym_trace_array; + pc = preempt_count(); + + event = trace_buffer_lock_reserve(tr, TRACE_KSYM, + sizeof(*entry), 0, pc); + if (!event) + return; + + entry = ring_buffer_event_data(event); + strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); + entry->ksym_hbp = hbp; + entry->ip = instruction_pointer(regs); + strlcpy(entry->p_name, current->comm, TASK_COMM_LEN); +#ifdef CONFIG_PROFILE_KSYM_TRACER + ksym_collect_stats(hbp->info.address); +#endif /* CONFIG_PROFILE_KSYM_TRACER */ + + trace_buffer_unlock_commit(tr, event, 0, pc); +} + +/* Valid access types are represented as + * + * rw- : Set Read/Write Access Breakpoint + * -w- : Set Write Access Breakpoint + * --- : Clear Breakpoints + * --x : Set Execution Break points (Not available yet) + * + */ +static int ksym_trace_get_access_type(char *access_str) +{ + int pos, access = 0; + + for (pos = 0; pos < KSYM_TRACER_OP_LEN; pos++) { + switch (access_str[pos]) { + case 'r': + access += (pos == 0) ? 4 : -1; + break; + case 'w': + access += (pos == 1) ? 2 : -1; + break; + case '-': + break; + default: + return -EINVAL; + } + } + + switch (access) { + case 6: + access = HW_BREAKPOINT_RW; + break; + case 2: + access = HW_BREAKPOINT_WRITE; + break; + case 0: + access = 0; + } + + return access; +} + +/* + * There can be several possible malformed requests and we attempt to capture + * all of them. We enumerate some of the rules + * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. + * i.e. multiple ':' symbols disallowed. Possible uses are of the form + * ::. + * 2. No delimiter symbol ':' in the input string + * 3. Spurious operator symbols or symbols not in their respective positions + * 4. :--- i.e. clear breakpoint request when ksym_name not in file + * 5. Kernel symbol not a part of /proc/kallsyms + * 6. Duplicate requests + */ +static int parse_ksym_trace_str(char *input_string, char **ksymname, + unsigned long *addr) +{ + char *delimiter = ":"; + int ret; + + ret = -EINVAL; + *ksymname = strsep(&input_string, delimiter); + *addr = kallsyms_lookup_name(*ksymname); + + /* Check for malformed request: (2), (1) and (5) */ + if ((!input_string) || + (strlen(input_string) != (KSYM_TRACER_OP_LEN + 1)) || + (*addr == 0)) + goto return_code; + ret = ksym_trace_get_access_type(input_string); + +return_code: + return ret; +} + +int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) +{ + struct trace_ksym *entry; + int ret; + + if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { + printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" + " new requests for tracing can be accepted now.\n", + KSYM_TRACER_MAX); + return -ENOSPC; + } + + entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); + if (!entry->ksym_hbp) { + kfree(entry); + return -ENOMEM; + } + + entry->ksym_hbp->info.name = ksymname; + entry->ksym_hbp->info.type = op; + entry->ksym_addr = entry->ksym_hbp->info.address = addr; +#ifdef CONFIG_X86 + entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4; +#endif + entry->ksym_hbp->triggered = (void *)ksym_hbp_handler; + + ret = register_kernel_hw_breakpoint(entry->ksym_hbp); + if (ret < 0) { + printk(KERN_INFO "ksym_tracer request failed. Try again" + " later!!\n"); + kfree(entry->ksym_hbp); + kfree(entry); + return -EAGAIN; + } + hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); + ksym_filter_entry_count++; + + return 0; +} + +static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct trace_ksym *entry; + struct hlist_node *node; + char buf[KSYM_FILTER_ENTRY_LEN * KSYM_TRACER_MAX]; + ssize_t ret, cnt = 0; + + mutex_lock(&ksym_tracer_mutex); + + hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { + cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, "%s:", + entry->ksym_hbp->info.name); + if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) + cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, + "-w-\n"); + else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) + cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, + "rw-\n"); + } + ret = simple_read_from_buffer(ubuf, count, ppos, buf, strlen(buf)); + mutex_unlock(&ksym_tracer_mutex); + + return ret; +} + +static ssize_t ksym_trace_filter_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct trace_ksym *entry; + struct hlist_node *node; + char *input_string, *ksymname = NULL; + unsigned long ksym_addr = 0; + int ret, op, changed = 0; + + /* Ignore echo "" > ksym_trace_filter */ + if (count == 0) + return 0; + + input_string = kzalloc(count, GFP_KERNEL); + if (!input_string) + return -ENOMEM; + + if (copy_from_user(input_string, buffer, count)) { + kfree(input_string); + return -EFAULT; + } + + ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); + if (ret < 0) { + kfree(input_string); + return ret; + } + + mutex_lock(&ksym_tracer_mutex); + + ret = -EINVAL; + hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { + if (entry->ksym_addr == ksym_addr) { + /* Check for malformed request: (6) */ + if (entry->ksym_hbp->info.type != op) + changed = 1; + else + goto err_ret; + break; + } + } + if (changed) { + unregister_kernel_hw_breakpoint(entry->ksym_hbp); + entry->ksym_hbp->info.type = op; + if (op > 0) { + ret = register_kernel_hw_breakpoint(entry->ksym_hbp); + if (ret == 0) { + ret = count; + goto unlock_ret_path; + } + } + ksym_filter_entry_count--; + hlist_del_rcu(&(entry->ksym_hlist)); + synchronize_rcu(); + kfree(entry->ksym_hbp); + kfree(entry); + ret = count; + goto err_ret; + } else { + /* Check for malformed request: (4) */ + if (op == 0) + goto err_ret; + ret = process_new_ksym_entry(ksymname, op, ksym_addr); + if (ret) + goto err_ret; + } + ret = count; + goto unlock_ret_path; + +err_ret: + kfree(input_string); + +unlock_ret_path: + mutex_unlock(&ksym_tracer_mutex); + return ret; +} + +static const struct file_operations ksym_tracing_fops = { + .open = tracing_open_generic, + .read = ksym_trace_filter_read, + .write = ksym_trace_filter_write, +}; + +static void ksym_trace_reset(struct trace_array *tr) +{ + struct trace_ksym *entry; + struct hlist_node *node, *node1; + + ksym_tracing_enabled = 0; + + mutex_lock(&ksym_tracer_mutex); + hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, + ksym_hlist) { + unregister_kernel_hw_breakpoint(entry->ksym_hbp); + ksym_filter_entry_count--; + hlist_del_rcu(&(entry->ksym_hlist)); + synchronize_rcu(); + /* Free the 'input_string' only if reset + * after startup self-test + */ +#ifdef CONFIG_FTRACE_SELFTEST + if (strncmp(entry->ksym_hbp->info.name, KSYM_SELFTEST_ENTRY, + strlen(KSYM_SELFTEST_ENTRY)) != 0) +#endif /* CONFIG_FTRACE_SELFTEST*/ + kfree(entry->ksym_hbp->info.name); + kfree(entry->ksym_hbp); + kfree(entry); + } + mutex_unlock(&ksym_tracer_mutex); +} + +static int ksym_trace_init(struct trace_array *tr) +{ + int cpu, ret = 0; + + for_each_online_cpu(cpu) + tracing_reset(tr, cpu); + ksym_tracing_enabled = 1; + ksym_trace_array = tr; + + return ret; +} + +static void ksym_trace_print_header(struct seq_file *m) +{ + + seq_puts(m, + "# TASK-PID CPU# Symbol Type " + "Function \n"); + seq_puts(m, + "# | | | | " + "| \n"); +} + +static enum print_line_t ksym_trace_output(struct trace_iterator *iter) +{ + struct trace_entry *entry = iter->ent; + struct trace_seq *s = &iter->seq; + struct trace_ksym *field; + char str[KSYM_SYMBOL_LEN]; + int ret; + + if (entry->type != TRACE_KSYM) + return TRACE_TYPE_UNHANDLED; + + trace_assign_type(field, entry); + + ret = trace_seq_printf(s, "%-15s %-5d %-3d %-20s ", field->p_name, + entry->pid, iter->cpu, field->ksym_name); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + switch (field->ksym_hbp->info.type) { + case HW_BREAKPOINT_WRITE: + ret = trace_seq_printf(s, " W "); + break; + case HW_BREAKPOINT_RW: + ret = trace_seq_printf(s, " RW "); + break; + default: + return TRACE_TYPE_PARTIAL_LINE; + } + + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + sprint_symbol(str, field->ip); + ret = trace_seq_printf(s, "%-20s\n", str); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + return TRACE_TYPE_HANDLED; +} + +struct tracer ksym_tracer __read_mostly = +{ + .name = "ksym_tracer", + .init = ksym_trace_init, + .reset = ksym_trace_reset, +#ifdef CONFIG_FTRACE_SELFTEST + .selftest = trace_selftest_startup_ksym, +#endif + .print_header = ksym_trace_print_header, + .print_line = ksym_trace_output +}; + +__init static int init_ksym_trace(void) +{ + struct dentry *d_tracer; + struct dentry *entry; + + d_tracer = tracing_init_dentry(); + ksym_filter_entry_count = 0; + + entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, + NULL, &ksym_tracing_fops); + if (!entry) + pr_warning("Could not create debugfs " + "'ksym_trace_filter' file\n"); + + return register_tracer(&ksym_tracer); +} +device_initcall(init_ksym_trace); + + +#ifdef CONFIG_PROFILE_KSYM_TRACER +static int ksym_tracer_stat_headers(struct seq_file *m) +{ + seq_printf(m, " Access type "); + seq_printf(m, " Symbol Counter \n"); + return 0; +} + +static int ksym_tracer_stat_show(struct seq_file *m, void *v) +{ + struct hlist_node *stat = v; + struct trace_ksym *entry; + int access_type = 0; + char fn_name[KSYM_NAME_LEN]; + + entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); + + if (entry->ksym_hbp) + access_type = entry->ksym_hbp->info.type; + + switch (access_type) { + case HW_BREAKPOINT_WRITE: + seq_printf(m, " W "); + break; + case HW_BREAKPOINT_RW: + seq_printf(m, " RW "); + break; + default: + seq_printf(m, " NA "); + } + + if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0) + seq_printf(m, " %s ", fn_name); + else + seq_printf(m, " "); + + seq_printf(m, "%15lu\n", entry->counter); + return 0; +} + +static void *ksym_tracer_stat_start(struct tracer_stat *trace) +{ + return &(ksym_filter_head.first); +} + +static void * +ksym_tracer_stat_next(void *v, int idx) +{ + struct hlist_node *stat = v; + + return stat->next; +} + +static struct tracer_stat ksym_tracer_stats = { + .name = "ksym_tracer", + .stat_start = ksym_tracer_stat_start, + .stat_next = ksym_tracer_stat_next, + .stat_headers = ksym_tracer_stat_headers, + .stat_show = ksym_tracer_stat_show +}; + +__init static int ksym_tracer_stat_init(void) +{ + int ret; + + ret = register_stat_tracer(&ksym_tracer_stats); + if (ret) { + printk(KERN_WARNING "Warning: could not register " + "ksym tracer stats\n"); + return 1; + } + + return 0; +} +fs_initcall(ksym_tracer_stat_init); +#endif /* CONFIG_PROFILE_KSYM_TRACER */ diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 00dd6485bdd..71f2edb0fd8 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) case TRACE_GRAPH_ENT: case TRACE_GRAPH_RET: case TRACE_HW_BRANCHES: + case TRACE_KSYM: return 1; } return 0; @@ -807,3 +808,55 @@ trace_selftest_startup_hw_branches(struct tracer *trace, return ret; } #endif /* CONFIG_HW_BRANCH_TRACER */ + +#ifdef CONFIG_KSYM_TRACER +static int ksym_selftest_dummy; + +int +trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) +{ + unsigned long count; + int ret; + + /* start the tracing */ + ret = tracer_init(trace, tr); + if (ret) { + warn_failed_init_tracer(trace, ret); + return ret; + } + + ksym_selftest_dummy = 0; + /* Register the read-write tracing request */ + ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW, + (unsigned long)(&ksym_selftest_dummy)); + + if (ret < 0) { + printk(KERN_CONT "ksym_trace read-write startup test failed\n"); + goto ret_path; + } + /* Perform a read and a write operation over the dummy variable to + * trigger the tracer + */ + if (ksym_selftest_dummy == 0) + ksym_selftest_dummy++; + + /* stop the tracing. */ + tracing_stop(); + /* check the trace buffer */ + ret = trace_test_buffer(tr, &count); + trace->reset(tr); + tracing_start(); + + /* read & write operations - one each is performed on the dummy variable + * triggering two entries in the trace buffer + */ + if (!ret && count != 2) { + printk(KERN_CONT "Ksym tracer startup test failed"); + ret = -1; + } + +ret_path: + return ret; +} +#endif /* CONFIG_KSYM_TRACER */ + From 62edab9056a6cf0c9207339c8892c923a5217e45 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:47:06 +0530 Subject: [PATCH 012/470] hw-breakpoints: reset bits in dr6 after the corresponding exception is handled This patch resets the bit in dr6 after the corresponding exception is handled in code, so that we keep a clean track of the current virtual debug status register. [ Impact: keep track of breakpoints triggering completion ] Signed-off-by: K.Prasad Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/hw_breakpoint.c | 13 +++++++++++-- arch/x86/kernel/kgdb.c | 6 ++++++ arch/x86/kernel/kprobes.c | 9 ++++++++- arch/x86/kernel/traps.c | 4 ++-- arch/x86/mm/kmmio.c | 8 +++++++- 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 4867c9f3b5f..69451473dbd 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -314,8 +314,12 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct hw_breakpoint *bp; - /* The DR6 value is stored in args->err */ - unsigned long dr7, dr6 = args->err; + unsigned long dr7, dr6; + unsigned long *dr6_p; + + /* The DR6 value is pointed by args->err */ + dr6_p = (unsigned long *)ERR_PTR(args->err); + dr6 = *dr6_p; /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) @@ -351,6 +355,11 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) if (bp) rc = NOTIFY_DONE; } + /* + * Reset the 'i'th TRAP bit in dr6 to denote completion of + * exception handling + */ + (*dr6_p) &= ~(DR_TRAP0 << i); /* * bp can be NULL due to lazy debug register switching * or due to the delay between updates of hbp_kernel_pos diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index b1f4dffb919..f820b73c7f2 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -43,6 +43,7 @@ #include #include +#include #include #include @@ -434,6 +435,11 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) "resuming...\n"); kgdb_arch_handle_exception(args->trapnr, args->signr, args->err, "c", "", regs); + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; return NOTIFY_STOP; } diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b00..b5b1848c533 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -54,6 +54,7 @@ #include #include #include +#include void jprobe_return_end(void); @@ -967,8 +968,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_DEBUG: - if (post_kprobe_handler(args->regs)) + if (post_kprobe_handler(args->regs)) { + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; ret = NOTIFY_STOP; + } break; case DIE_GPF: /* diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index de9913247dd..124a4d5a95b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -545,8 +545,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; - if (notify_die(DIE_DEBUG, "debug", regs, dr6, error_code, - SIGTRAP) == NOTIFY_STOP) + if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, + SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 16ccbd77917..11a4ad4d625 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -540,8 +540,14 @@ kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) struct die_args *arg = args; if (val == DIE_DEBUG && (arg->err & DR_STEP)) - if (post_kmmio_handler(arg->err, arg->regs) == 1) + if (post_kmmio_handler(arg->err, arg->regs) == 1) { + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; return NOTIFY_STOP; + } return NOTIFY_DONE; } From 73874005cd8800440be4299bd095387fff4b90ac Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Jun 2009 01:43:38 +0200 Subject: [PATCH 013/470] hw-breakpoints: fix undeclared ksym_tracer_mutex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ksym_tracer_mutex is declared inside an #ifdef CONFIG_PROFILE_KSYM_TRACER section. This makes it unavailable for the hardware breakpoint tracer if it is configured without the breakpoint profiler. This patch fixes the following build error: kernel/trace/trace_ksym.c: In function ‘ksym_trace_filter_read’: kernel/trace/trace_ksym.c:226: erreur: ‘ksym_tracer_mutex’ undeclared (first use in this function) kernel/trace/trace_ksym.c:226: erreur: (Each undeclared identifier is reported only once kernel/trace/trace_ksym.c:226: erreur: for each function it appears in.) kernel/trace/trace_ksym.c: In function ‘ksym_trace_filter_write’: kernel/trace/trace_ksym.c:273: erreur: ‘ksym_tracer_mutex’ undeclared (first use in this function) kernel/trace/trace_ksym.c: In function ‘ksym_trace_reset’: kernel/trace/trace_ksym.c:335: erreur: ‘ksym_tracer_mutex’ undeclared (first use in this function) make[1]: *** [kernel/trace/trace_ksym.o] Erreur 1 [ Impact: fix a build error ] Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_ksym.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 11c74f6404c..eef97e7c8db 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -44,12 +44,12 @@ static unsigned int ksym_tracing_enabled; static HLIST_HEAD(ksym_filter_head); +static DEFINE_MUTEX(ksym_tracer_mutex); + #ifdef CONFIG_PROFILE_KSYM_TRACER #define MAX_UL_INT 0xffffffff -static DEFINE_MUTEX(ksym_tracer_mutex); - void ksym_collect_stats(unsigned long hbp_hit_addr) { struct hlist_node *node; From 4555835b707d5c778ee1c9076670bc99b1eeaf61 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 17 Jun 2009 14:44:19 +0530 Subject: [PATCH 014/470] x86: hw_breakpoint.c arch_check_va_in_kernelspace and hw_breakpoint_handler should be static arch_check_va_in_kernelspace() and hw_breakpoint_handler() is used only by same file so it should be static. Also fixed non-ANSI function declaration of function 'arch_uninstall_thread_hw_breakpoint' Fixed following sparse warnings : arch/x86/kernel/hw_breakpoint.c:124:42: warning: non-ANSI function declaration of function 'arch_uninstall_thread_hw_breakpoint' arch/x86/kernel/hw_breakpoint.c:169:5: warning: symbol 'arch_check_va_in_kernelspace' was not declared. Should it be static? arch/x86/kernel/hw_breakpoint.c:313:15: warning: symbol 'hw_breakpoint_handler' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Cc: Alan Stern Cc: "K.Prasad" Cc: Frederic Weisbecker LKML-Reference: <1245230059.2662.4.camel@ht.satnam> Signed-off-by: Ingo Molnar --- arch/x86/kernel/hw_breakpoint.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 51d959528b1..9316a9de4de 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -121,7 +121,7 @@ void arch_install_thread_hw_breakpoint(struct task_struct *tsk) /* * Install the debug register values for just the kernel, no thread. */ -void arch_uninstall_thread_hw_breakpoint() +void arch_uninstall_thread_hw_breakpoint(void) { /* Clear the user-space portion of debugreg7 by setting only kdr7 */ set_debugreg(kdr7, 7); @@ -166,7 +166,7 @@ int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) /* * Check for virtual address in kernel space. */ -int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) +static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) { unsigned int len; @@ -310,7 +310,7 @@ void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) * NOTIFY_STOP returned for all other cases * */ -int __kprobes hw_breakpoint_handler(struct die_args *args) +static int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct hw_breakpoint *bp; From 9d22b536609abf0d64648f99518676ea58245e3b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 1 Jul 2009 19:52:30 +0530 Subject: [PATCH 015/470] x86: Mark ptrace_get_debugreg() as static This sparse warning: arch/x86/kernel/ptrace.c:560:15: warning: symbol 'ptrace_get_debugreg' was not declared. Should it be static? triggers because ptrace_get_debugreg() is global but is only used in a single .c file. change ptrace_get_debugreg() to static to fix that - this also addresses the sparse warning. Signed-off-by: Jaswinder Singh Rajput Cc: Steven Rostedt LKML-Reference: <1246458150.6940.19.camel@hpdv5.satnam> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b457f78b7db..cabdabce3cb 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -557,7 +557,7 @@ restore: /* * Handle PTRACE_PEEKUSR calls for the debug register area. */ -unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) +static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) { struct thread_struct *thread = &(tsk->thread); unsigned long val = 0; From db59504d89db1462a5281fb55b1d962cb74a398f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:52:36 +0800 Subject: [PATCH 016/470] ksym_tracer: Extract trace entry from struct trace_ksym struct trace_ksym is used as an entry in hbp list, and is also used as trace_entry stored in ring buffer. This is not necessary and is a waste of memory in ring buffer. There is also a bug that dereferencing field->ksym_hbp in ksym_trace_output() can be invalid. Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E2A4.4050007@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 13 ++++--------- kernel/trace/trace_ksym.c | 26 ++++++++++++++++++-------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7d5cc37b8fc..ff1ef411a17 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -215,17 +215,12 @@ struct syscall_trace_exit { #define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); -struct trace_ksym { +struct ksym_trace_entry { struct trace_entry ent; - struct hw_breakpoint *ksym_hbp; - unsigned long ksym_addr; unsigned long ip; -#ifdef CONFIG_PROFILE_KSYM_TRACER - unsigned long counter; -#endif - struct hlist_node ksym_hlist; + unsigned char type; char ksym_name[KSYM_NAME_LEN]; - char p_name[TASK_COMM_LEN]; + char cmd[TASK_COMM_LEN]; }; /* @@ -343,7 +338,7 @@ extern void __ftrace_bad_type(void); TRACE_SYSCALL_ENTER); \ IF_ASSIGN(var, ent, struct syscall_trace_exit, \ TRACE_SYSCALL_EXIT); \ - IF_ASSIGN(var, ent, struct trace_ksym, TRACE_KSYM); \ + IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ __ftrace_bad_type(); \ } while (0) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index eef97e7c8db..085ff055fdf 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -37,6 +37,15 @@ #define KSYM_TRACER_OP_LEN 3 /* rw- */ #define KSYM_FILTER_ENTRY_LEN (KSYM_NAME_LEN + KSYM_TRACER_OP_LEN + 1) +struct trace_ksym { + struct hw_breakpoint *ksym_hbp; + unsigned long ksym_addr; +#ifdef CONFIG_PROFILE_KSYM_TRACER + unsigned long counter; +#endif + struct hlist_node ksym_hlist; +}; + static struct trace_array *ksym_trace_array; static unsigned int ksym_filter_entry_count; @@ -71,7 +80,7 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) { struct ring_buffer_event *event; struct trace_array *tr; - struct trace_ksym *entry; + struct ksym_trace_entry *entry; int pc; if (!ksym_tracing_enabled) @@ -85,11 +94,12 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) if (!event) return; - entry = ring_buffer_event_data(event); + entry = ring_buffer_event_data(event); + entry->ip = instruction_pointer(regs); + entry->type = hbp->info.type; strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); - entry->ksym_hbp = hbp; - entry->ip = instruction_pointer(regs); - strlcpy(entry->p_name, current->comm, TASK_COMM_LEN); + strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); + #ifdef CONFIG_PROFILE_KSYM_TRACER ksym_collect_stats(hbp->info.address); #endif /* CONFIG_PROFILE_KSYM_TRACER */ @@ -380,7 +390,7 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) { struct trace_entry *entry = iter->ent; struct trace_seq *s = &iter->seq; - struct trace_ksym *field; + struct ksym_trace_entry *field; char str[KSYM_SYMBOL_LEN]; int ret; @@ -389,12 +399,12 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) trace_assign_type(field, entry); - ret = trace_seq_printf(s, "%-15s %-5d %-3d %-20s ", field->p_name, + ret = trace_seq_printf(s, "%-15s %-5d %-3d %-20s ", field->cmd, entry->pid, iter->cpu, field->ksym_name); if (!ret) return TRACE_TYPE_PARTIAL_LINE; - switch (field->ksym_hbp->info.type) { + switch (field->type) { case HW_BREAKPOINT_WRITE: ret = trace_seq_printf(s, " W "); break; From be9742e6cb107fe1d77db7a081ea4eb25e79e1ad Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:52:52 +0800 Subject: [PATCH 017/470] ksym_tracer: Rewrite ksym_trace_filter_read() Reading ksym_trace_filter gave me some arbitrary characters, when it should show nothing. It's because buf is not initialized when there's no filter. Also reduce stack usage by about 512 bytes. Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E2B4.6030706@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 085ff055fdf..b6710d31bdf 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -35,7 +35,6 @@ #define KSYM_TRACER_MAX HBP_NUM #define KSYM_TRACER_OP_LEN 3 /* rw- */ -#define KSYM_FILTER_ENTRY_LEN (KSYM_NAME_LEN + KSYM_TRACER_OP_LEN + 1) struct trace_ksym { struct hw_breakpoint *ksym_hbp; @@ -230,25 +229,33 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, { struct trace_ksym *entry; struct hlist_node *node; - char buf[KSYM_FILTER_ENTRY_LEN * KSYM_TRACER_MAX]; - ssize_t ret, cnt = 0; + struct trace_seq *s; + ssize_t cnt = 0; + int ret; + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + trace_seq_init(s); mutex_lock(&ksym_tracer_mutex); hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, "%s:", - entry->ksym_hbp->info.name); + ret = trace_seq_printf(s, "%s:", entry->ksym_hbp->info.name); if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) - cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, - "-w-\n"); + ret = trace_seq_puts(s, "-w-\n"); else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) - cnt += snprintf(&buf[cnt], KSYM_FILTER_ENTRY_LEN - cnt, - "rw-\n"); + ret = trace_seq_puts(s, "rw-\n"); + WARN_ON_ONCE(!ret); } - ret = simple_read_from_buffer(ubuf, count, ppos, buf, strlen(buf)); + + cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); + mutex_unlock(&ksym_tracer_mutex); - return ret; + kfree(s); + + return cnt; } static ssize_t ksym_trace_filter_write(struct file *file, From f088e5471297cc78d7465e1fd997cb1a91a48019 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:53:18 +0800 Subject: [PATCH 018/470] ksym_tracer: Fix validation of access type # echo 'pid_max:rw-' > ksym_trace_filter # cat ksym_trace_filter pid_max:rw- # echo 'pid_max:ww-' > ksym_trace_filter (should return -EINVAL) # cat ksym_trace_filter (but it ended up removing filter entry) Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E2CE.6080409@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index b6710d31bdf..95560092990 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -114,24 +114,22 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) * --x : Set Execution Break points (Not available yet) * */ -static int ksym_trace_get_access_type(char *access_str) +static int ksym_trace_get_access_type(char *str) { - int pos, access = 0; + int access = 0; - for (pos = 0; pos < KSYM_TRACER_OP_LEN; pos++) { - switch (access_str[pos]) { - case 'r': - access += (pos == 0) ? 4 : -1; - break; - case 'w': - access += (pos == 1) ? 2 : -1; - break; - case '-': - break; - default: - return -EINVAL; - } - } + if (str[0] == 'r') + access += 4; + else if (str[0] != '-') + return -EINVAL; + + if (str[1] == 'w') + access += 2; + else if (str[1] != '-') + return -EINVAL; + + if (str[2] != '-') + return -EINVAL; switch (access) { case 6: @@ -140,8 +138,6 @@ static int ksym_trace_get_access_type(char *access_str) case 2: access = HW_BREAKPOINT_WRITE; break; - case 0: - access = 0; } return access; From 92cf9f8f7e89c6bdbb1a724f879b8b18fc0dfe0f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:53:47 +0800 Subject: [PATCH 019/470] ksym_tracer: Fix validation of length of access type Don't take newline into account, otherwise: # echo 'pid_max:-w-' > ksym_trace_filter # echo -n 'pid_max:rw-' > ksym_trace_filter bash: echo: write error: Invalid argument Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E2EB.9070503@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 95560092990..72fcb46c39c 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -158,21 +158,21 @@ static int ksym_trace_get_access_type(char *str) static int parse_ksym_trace_str(char *input_string, char **ksymname, unsigned long *addr) { - char *delimiter = ":"; int ret; - ret = -EINVAL; - *ksymname = strsep(&input_string, delimiter); + strstrip(input_string); + + *ksymname = strsep(&input_string, ":"); *addr = kallsyms_lookup_name(*ksymname); /* Check for malformed request: (2), (1) and (5) */ if ((!input_string) || - (strlen(input_string) != (KSYM_TRACER_OP_LEN + 1)) || - (*addr == 0)) - goto return_code; + (strlen(input_string) != KSYM_TRACER_OP_LEN) || + (*addr == 0)) + return -EINVAL;; + ret = ksym_trace_get_access_type(input_string); -return_code: return ret; } From 011ed56853e07e30653d6f1bfddc56b396218664 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:54:08 +0800 Subject: [PATCH 020/470] ksym_tracer: NIL-terminate user input filter Make sure the user input string is NULL-terminated. Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E300.7020601@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 72fcb46c39c..8cbed5a6286 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -264,11 +264,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, unsigned long ksym_addr = 0; int ret, op, changed = 0; - /* Ignore echo "" > ksym_trace_filter */ - if (count == 0) - return 0; - - input_string = kzalloc(count, GFP_KERNEL); + input_string = kzalloc(count + 1, GFP_KERNEL); if (!input_string) return -ENOMEM; @@ -276,6 +272,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, kfree(input_string); return -EFAULT; } + input_string[count] = '\0'; ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); if (ret < 0) { From 0d109c8f70eab8b9f693bd5caea23012394e4876 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:54:28 +0800 Subject: [PATCH 021/470] ksym_tracer: Report error when failed to re-register hbp When access type is changed, the hw break point will be unregistered and then be registered again with new access type. But the registration may fail, in this case, -errno should be returned. Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E314.7070004@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 8cbed5a6286..891e3b86b3f 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -302,13 +302,13 @@ static ssize_t ksym_trace_filter_write(struct file *file, ret = count; goto unlock_ret_path; } - } + } else + ret = count; ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); kfree(entry->ksym_hbp); kfree(entry); - ret = count; goto err_ret; } else { /* Check for malformed request: (4) */ From 558df6c8f74ac4a0b9026ef85b0028280f364d96 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:54:48 +0800 Subject: [PATCH 022/470] ksym_tracer: Fix memory leak - When remove a filter, we leak entry->ksym_hbp->info.name. - With CONFIG_FTRAC_SELFTEST enabled, we leak ->info.name: # echo ksym_tracer > current_tracer # echo 'ksym_selftest_dummy:rw-' > ksym_trace_filter # echo nop > current_tracer Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E328.8010200@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 61 +++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 34 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 891e3b86b3f..7d349d34a0d 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -179,7 +179,7 @@ static int parse_ksym_trace_str(char *input_string, char **ksymname, int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) { struct trace_ksym *entry; - int ret; + int ret = -ENOMEM; if (ksym_filter_entry_count >= KSYM_TRACER_MAX) { printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No" @@ -193,12 +193,13 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) return -ENOMEM; entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); - if (!entry->ksym_hbp) { - kfree(entry); - return -ENOMEM; - } + if (!entry->ksym_hbp) + goto err; + + entry->ksym_hbp->info.name = kstrdup(ksymname, GFP_KERNEL); + if (!entry->ksym_hbp->info.name) + goto err; - entry->ksym_hbp->info.name = ksymname; entry->ksym_hbp->info.type = op; entry->ksym_addr = entry->ksym_hbp->info.address = addr; #ifdef CONFIG_X86 @@ -210,14 +211,18 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) if (ret < 0) { printk(KERN_INFO "ksym_tracer request failed. Try again" " later!!\n"); - kfree(entry->ksym_hbp); - kfree(entry); - return -EAGAIN; + ret = -EAGAIN; + goto err; } hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); ksym_filter_entry_count++; - return 0; +err: + if (entry->ksym_hbp) + kfree(entry->ksym_hbp->info.name); + kfree(entry->ksym_hbp); + kfree(entry); + return ret; } static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, @@ -289,7 +294,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, if (entry->ksym_hbp->info.type != op) changed = 1; else - goto err_ret; + goto out; break; } } @@ -298,34 +303,29 @@ static ssize_t ksym_trace_filter_write(struct file *file, entry->ksym_hbp->info.type = op; if (op > 0) { ret = register_kernel_hw_breakpoint(entry->ksym_hbp); - if (ret == 0) { - ret = count; - goto unlock_ret_path; - } - } else - ret = count; + if (ret == 0) + goto out; + } ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); + kfree(entry->ksym_hbp->info.name); kfree(entry->ksym_hbp); kfree(entry); - goto err_ret; + goto out; } else { /* Check for malformed request: (4) */ if (op == 0) - goto err_ret; + goto out; ret = process_new_ksym_entry(ksymname, op, ksym_addr); - if (ret) - goto err_ret; } - ret = count; - goto unlock_ret_path; +out: + mutex_unlock(&ksym_tracer_mutex); -err_ret: kfree(input_string); -unlock_ret_path: - mutex_unlock(&ksym_tracer_mutex); + if (!ret) + ret = count; return ret; } @@ -349,14 +349,7 @@ static void ksym_trace_reset(struct trace_array *tr) ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); - /* Free the 'input_string' only if reset - * after startup self-test - */ -#ifdef CONFIG_FTRACE_SELFTEST - if (strncmp(entry->ksym_hbp->info.name, KSYM_SELFTEST_ENTRY, - strlen(KSYM_SELFTEST_ENTRY)) != 0) -#endif /* CONFIG_FTRACE_SELFTEST*/ - kfree(entry->ksym_hbp->info.name); + kfree(entry->ksym_hbp->info.name); kfree(entry->ksym_hbp); kfree(entry); } From 9d7e934408b52cd53dd85270eb36941a6a318cc5 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 7 Jul 2009 13:55:18 +0800 Subject: [PATCH 023/470] ksym_tracer: Fix the output of stat tracing - make ksym_tracer_stat_start() return head->first instead of &head->first - make the output properly aligned Before: Access type Symbol Counter NA 0 RW pid_max 0 After: Access Type Symbol Counter ----------- ------ ------- RW pid_max 0 Signed-off-by: Li Zefan Acked-by: Frederic Weisbecker Cc: "K.Prasad" Cc: Alan Stern Cc: Steven Rostedt LKML-Reference: <4A52E346.5050608@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 7d349d34a0d..1256a6e8ee2 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -453,8 +453,10 @@ device_initcall(init_ksym_trace); #ifdef CONFIG_PROFILE_KSYM_TRACER static int ksym_tracer_stat_headers(struct seq_file *m) { - seq_printf(m, " Access type "); - seq_printf(m, " Symbol Counter \n"); + seq_puts(m, " Access Type "); + seq_puts(m, " Symbol Counter\n"); + seq_puts(m, " ----------- "); + seq_puts(m, " ------ -------\n"); return 0; } @@ -472,27 +474,27 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) switch (access_type) { case HW_BREAKPOINT_WRITE: - seq_printf(m, " W "); + seq_puts(m, " W "); break; case HW_BREAKPOINT_RW: - seq_printf(m, " RW "); + seq_puts(m, " RW "); break; default: - seq_printf(m, " NA "); + seq_puts(m, " NA "); } if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0) - seq_printf(m, " %s ", fn_name); + seq_printf(m, " %-36s", fn_name); else - seq_printf(m, " "); + seq_printf(m, " %-36s", ""); + seq_printf(m, " %15lu\n", entry->counter); - seq_printf(m, "%15lu\n", entry->counter); return 0; } static void *ksym_tracer_stat_start(struct tracer_stat *trace) { - return &(ksym_filter_head.first); + return ksym_filter_head.first; } static void * From d857ace143df3884954887e1899a65831ca72ece Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 22 Jul 2009 11:21:31 +0800 Subject: [PATCH 024/470] tracing/ksym_tracer: fix the output of ksym tracer Fix the output format of ksym tracer, make it properly aligned Befor patch: # tracer: ksym_tracer # # TASK-PID CPU# Symbol Type Function # | | | | | bash 1378 1 ksym_tracer_mutex W mutex_lock+0x11/0x27 bash 1378 1 ksym_filter_head W process_new_ksym_entry+0xd2/0x10c bash 1378 1 ksym_tracer_mutex W mutex_unlock+0x12/0x1b cat 1429 0 ksym_tracer_mutex W mutex_lock+0x11/0x27 After patch: # tracer: ksym_tracer # # TASK-PID CPU# Symbol Type Function # | | | | | cat-1423 [000] ksym_tracer_mutex RW mutex_lock+0x11/0x27 cat-1423 [000] ksym_filter_head RW ksym_trace_filter_read+0x6e/0x10d cat-1423 [000] ksym_tracer_mutex RW mutex_unlock+0x12/0x1b cat-1423 [000] ksym_tracer_mutex RW mutex_lock+0x11/0x27 cat-1423 [000] ksym_filter_head RW ksym_trace_filter_read+0x6e/0x10d cat-1423 [000] ksym_tracer_mutex RW mutex_unlock+0x12/0x1b Signed-off-by: Xiao Guangrong LKML-Reference: <4A6685BB.2090809@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_ksym.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 1256a6e8ee2..fbf3a8e13bc 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -370,13 +370,12 @@ static int ksym_trace_init(struct trace_array *tr) static void ksym_trace_print_header(struct seq_file *m) { - seq_puts(m, - "# TASK-PID CPU# Symbol Type " - "Function \n"); + "# TASK-PID CPU# Symbol " + "Type Function\n"); seq_puts(m, - "# | | | | " - "| \n"); + "# | | | " + " | |\n"); } static enum print_line_t ksym_trace_output(struct trace_iterator *iter) @@ -392,7 +391,7 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) trace_assign_type(field, entry); - ret = trace_seq_printf(s, "%-15s %-5d %-3d %-20s ", field->cmd, + ret = trace_seq_printf(s, "%11s-%-5d [%03d] %-30s ", field->cmd, entry->pid, iter->cpu, field->ksym_name); if (!ret) return TRACE_TYPE_PARTIAL_LINE; @@ -412,7 +411,7 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) return TRACE_TYPE_PARTIAL_LINE; sprint_symbol(str, field->ip); - ret = trace_seq_printf(s, "%-20s\n", str); + ret = trace_seq_printf(s, "%s\n", str); if (!ret) return TRACE_TYPE_PARTIAL_LINE; From 8e068542a8d9efec55126284d2f5cb32f003d507 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 22 Jul 2009 11:23:41 +0800 Subject: [PATCH 025/470] tracing/ksym_tracer: fix write operation of ksym_trace_filter This patch fix 2 bugs: - fix the return value of ksym_trace_filter_write() when we want to clear symbol in ksym_trace_filter file for example: # echo global_trace:rw- > /debug/tracing/ksym_trace_filter # echo global_trace:--- > /debug/tracing/ksym_trace_filter -bash: echo: write error: Invalid argument # cat /debug/tracing/ksym_trace_filter # We want to clear 'global_trace' in ksym_trace_filter, it complain with "Invalid argument", but the operation is successful - the "r--" access types is not allowed, but ksym_trace_filter file think it OK for example: # echo ksym_tracer_mutex:r-- > ksym_trace_filter -bash: echo: write error: Resource temporarily unavailable # dmesg ksym_tracer request failed. Try again later!! The error occur at register_kernel_hw_breakpoint(), but It's should at access types parser Signed-off-by: Xiao Guangrong LKML-Reference: <4A66863D.5090802@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_ksym.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index fbf3a8e13bc..cd5cb656c3d 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -135,6 +135,9 @@ static int ksym_trace_get_access_type(char *str) case 6: access = HW_BREAKPOINT_RW; break; + case 4: + access = -EINVAL; + break; case 2: access = HW_BREAKPOINT_WRITE; break; @@ -312,6 +315,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, kfree(entry->ksym_hbp->info.name); kfree(entry->ksym_hbp); kfree(entry); + ret = 0; goto out; } else { /* Check for malformed request: (4) */ From 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 23 Jul 2009 12:01:22 +0800 Subject: [PATCH 026/470] tracing/ksym_tracer: support quick clear for ksym_trace_filter -- v2 It's rather boring to clear symbol one by one in ksym_trace_filter file, so, this patch will let ksym_trace_filter file support quickly clear all break points. We can write "0" to this file and it will clear all symbols for example: # cat ksym_trace_filter ksym_filter_head:rw- global_trace:rw- # echo 0 > ksym_trace_filter # cat ksym_trace_filter # Changelog v1->v2: Add other ways to clear all breakpoints by writing NULL or "*:---" to ksym_trace_filter file base on K.Prasad's suggestion Signed-off-by: Xiao Guangrong LKML-Reference: <4A67E092.3080202@cn.fujitsu.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace_ksym.c | 53 ++++++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index cd5cb656c3d..2fde875ead4 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -163,8 +163,6 @@ static int parse_ksym_trace_str(char *input_string, char **ksymname, { int ret; - strstrip(input_string); - *ksymname = strsep(&input_string, ":"); *addr = kallsyms_lookup_name(*ksymname); @@ -262,6 +260,25 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, return cnt; } +static void __ksym_trace_reset(void) +{ + struct trace_ksym *entry; + struct hlist_node *node, *node1; + + mutex_lock(&ksym_tracer_mutex); + hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, + ksym_hlist) { + unregister_kernel_hw_breakpoint(entry->ksym_hbp); + ksym_filter_entry_count--; + hlist_del_rcu(&(entry->ksym_hlist)); + synchronize_rcu(); + kfree(entry->ksym_hbp->info.name); + kfree(entry->ksym_hbp); + kfree(entry); + } + mutex_unlock(&ksym_tracer_mutex); +} + static ssize_t ksym_trace_filter_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) @@ -282,6 +299,21 @@ static ssize_t ksym_trace_filter_write(struct file *file, } input_string[count] = '\0'; + strstrip(input_string); + + /* + * Clear all breakpoints if: + * 1: echo > ksym_trace_filter + * 2: echo 0 > ksym_trace_filter + * 3: echo "*:---" > ksym_trace_filter + */ + if (!input_string[0] || !strcmp(input_string, "0") || + !strcmp(input_string, "*:---")) { + __ksym_trace_reset(); + kfree(input_string); + return count; + } + ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); if (ret < 0) { kfree(input_string); @@ -341,23 +373,8 @@ static const struct file_operations ksym_tracing_fops = { static void ksym_trace_reset(struct trace_array *tr) { - struct trace_ksym *entry; - struct hlist_node *node, *node1; - ksym_tracing_enabled = 0; - - mutex_lock(&ksym_tracer_mutex); - hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, - ksym_hlist) { - unregister_kernel_hw_breakpoint(entry->ksym_hbp); - ksym_filter_entry_count--; - hlist_del_rcu(&(entry->ksym_hlist)); - synchronize_rcu(); - kfree(entry->ksym_hbp->info.name); - kfree(entry->ksym_hbp); - kfree(entry); - } - mutex_unlock(&ksym_tracer_mutex); + __ksym_trace_reset(); } static int ksym_trace_init(struct trace_array *tr) From eb13296cfaf6c699566473669a96a38a90562384 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:13 -0400 Subject: [PATCH 027/470] x86: Instruction decoder API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add x86 instruction decoder to arch-specific libraries. This decoder can decode x86 instructions used in kernel into prefix, opcode, modrm, sib, displacement and immediates. This can also show the length of instructions. This version introduces instruction attributes for decoding instructions. The instruction attribute tables are generated from the opcode map file (x86-opcode-map.txt) by the generator script(gen-insn-attr-x86.awk). Currently, the opcode maps are based on opcode maps in Intel(R) 64 and IA-32 Architectures Software Developers Manual Vol.2: Appendix.A, and consist of below two types of opcode tables. 1-byte/2-bytes/3-bytes opcodes, which has 256 elements, are written as below; Table: table-name Referrer: escaped-name opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] (or) opcode: escape # escaped-name EndTable Group opcodes, which has 8 elements, are written as below; GrpTable: GrpXXX reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] EndTable These opcode maps include a few SSE and FP opcodes (for setup), because those opcodes are used in the kernel. Signed-off-by: Masami Hiramatsu Signed-off-by: Jim Keniston Acked-by: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203413.31965.49709.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/inat.h | 188 +++++++ arch/x86/include/asm/inat_types.h | 29 ++ arch/x86/include/asm/insn.h | 143 ++++++ arch/x86/lib/Makefile | 13 + arch/x86/lib/inat.c | 78 +++ arch/x86/lib/insn.c | 464 +++++++++++++++++ arch/x86/lib/x86-opcode-map.txt | 719 +++++++++++++++++++++++++++ arch/x86/tools/gen-insn-attr-x86.awk | 314 ++++++++++++ 8 files changed, 1948 insertions(+) create mode 100644 arch/x86/include/asm/inat.h create mode 100644 arch/x86/include/asm/inat_types.h create mode 100644 arch/x86/include/asm/insn.h create mode 100644 arch/x86/lib/inat.c create mode 100644 arch/x86/lib/insn.c create mode 100644 arch/x86/lib/x86-opcode-map.txt create mode 100644 arch/x86/tools/gen-insn-attr-x86.awk diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h new file mode 100644 index 00000000000..2866fddd184 --- /dev/null +++ b/arch/x86/include/asm/inat.h @@ -0,0 +1,188 @@ +#ifndef _ASM_X86_INAT_H +#define _ASM_X86_INAT_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include + +/* + * Internal bits. Don't use bitmasks directly, because these bits are + * unstable. You should use checking functions. + */ + +#define INAT_OPCODE_TABLE_SIZE 256 +#define INAT_GROUP_TABLE_SIZE 8 + +/* Legacy instruction prefixes */ +#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ +#define INAT_PFX_REPNE 2 /* 0xF2 */ /* LPFX2 */ +#define INAT_PFX_REPE 3 /* 0xF3 */ /* LPFX3 */ +#define INAT_PFX_LOCK 4 /* 0xF0 */ +#define INAT_PFX_CS 5 /* 0x2E */ +#define INAT_PFX_DS 6 /* 0x3E */ +#define INAT_PFX_ES 7 /* 0x26 */ +#define INAT_PFX_FS 8 /* 0x64 */ +#define INAT_PFX_GS 9 /* 0x65 */ +#define INAT_PFX_SS 10 /* 0x36 */ +#define INAT_PFX_ADDRSZ 11 /* 0x67 */ + +#define INAT_LPREFIX_MAX 3 + +/* Immediate size */ +#define INAT_IMM_BYTE 1 +#define INAT_IMM_WORD 2 +#define INAT_IMM_DWORD 3 +#define INAT_IMM_QWORD 4 +#define INAT_IMM_PTR 5 +#define INAT_IMM_VWORD32 6 +#define INAT_IMM_VWORD 7 + +/* Legacy prefix */ +#define INAT_PFX_OFFS 0 +#define INAT_PFX_BITS 4 +#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) +#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) +/* Escape opcodes */ +#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) +#define INAT_ESC_BITS 2 +#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) +#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) +/* Group opcodes (1-16) */ +#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) +#define INAT_GRP_BITS 5 +#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) +#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) +/* Immediates */ +#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) +#define INAT_IMM_BITS 3 +#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) +/* Flags */ +#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) +#define INAT_REXPFX (1 << INAT_FLAG_OFFS) +#define INAT_MODRM (1 << (INAT_FLAG_OFFS + 1)) +#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 2)) +#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 3)) +#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 4)) +#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 5)) +/* Attribute making macros for attribute tables */ +#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) +#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) +#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) +#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) + +/* Attribute search APIs */ +extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); +extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, + insn_byte_t last_pfx, + insn_attr_t esc_attr); +extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, + insn_byte_t last_pfx, + insn_attr_t esc_attr); + +/* Attribute checking functions */ +static inline int inat_is_prefix(insn_attr_t attr) +{ + return attr & INAT_PFX_MASK; +} + +static inline int inat_is_address_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; +} + +static inline int inat_is_operand_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; +} + +static inline int inat_last_prefix_id(insn_attr_t attr) +{ + if ((attr & INAT_PFX_MASK) > INAT_LPREFIX_MAX) + return 0; + else + return attr & INAT_PFX_MASK; +} + +static inline int inat_is_escape(insn_attr_t attr) +{ + return attr & INAT_ESC_MASK; +} + +static inline int inat_escape_id(insn_attr_t attr) +{ + return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; +} + +static inline int inat_is_group(insn_attr_t attr) +{ + return attr & INAT_GRP_MASK; +} + +static inline int inat_group_id(insn_attr_t attr) +{ + return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; +} + +static inline int inat_group_common_attribute(insn_attr_t attr) +{ + return attr & ~INAT_GRP_MASK; +} + +static inline int inat_has_immediate(insn_attr_t attr) +{ + return attr & INAT_IMM_MASK; +} + +static inline int inat_immediate_size(insn_attr_t attr) +{ + return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; +} + +static inline int inat_is_rex_prefix(insn_attr_t attr) +{ + return attr & INAT_REXPFX; +} + +static inline int inat_has_modrm(insn_attr_t attr) +{ + return attr & INAT_MODRM; +} + +static inline int inat_is_force64(insn_attr_t attr) +{ + return attr & INAT_FORCE64; +} + +static inline int inat_has_second_immediate(insn_attr_t attr) +{ + return attr & INAT_SCNDIMM; +} + +static inline int inat_has_moffset(insn_attr_t attr) +{ + return attr & INAT_MOFFSET; +} + +static inline int inat_has_variant(insn_attr_t attr) +{ + return attr & INAT_VARIANT; +} + +#endif diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h new file mode 100644 index 00000000000..cb3c20ce39c --- /dev/null +++ b/arch/x86/include/asm/inat_types.h @@ -0,0 +1,29 @@ +#ifndef _ASM_X86_INAT_TYPES_H +#define _ASM_X86_INAT_TYPES_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Instruction attributes */ +typedef unsigned int insn_attr_t; +typedef unsigned char insn_byte_t; +typedef signed int insn_value_t; + +#endif diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h new file mode 100644 index 00000000000..12b4e3751d3 --- /dev/null +++ b/arch/x86/include/asm/insn.h @@ -0,0 +1,143 @@ +#ifndef _ASM_X86_INSN_H +#define _ASM_X86_INSN_H +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +/* insn_attr_t is defined in inat.h */ +#include + +struct insn_field { + union { + insn_value_t value; + insn_byte_t bytes[4]; + }; + /* !0 if we've run insn_get_xxx() for this field */ + unsigned char got; + unsigned char nbytes; +}; + +struct insn { + struct insn_field prefixes; /* + * Prefixes + * prefixes.bytes[3]: last prefix + */ + struct insn_field rex_prefix; /* REX prefix */ + struct insn_field opcode; /* + * opcode.bytes[0]: opcode1 + * opcode.bytes[1]: opcode2 + * opcode.bytes[2]: opcode3 + */ + struct insn_field modrm; + struct insn_field sib; + struct insn_field displacement; + union { + struct insn_field immediate; + struct insn_field moffset1; /* for 64bit MOV */ + struct insn_field immediate1; /* for 64bit imm or off16/32 */ + }; + union { + struct insn_field moffset2; /* for 64bit MOV */ + struct insn_field immediate2; /* for 64bit imm or seg16 */ + }; + + insn_attr_t attr; + unsigned char opnd_bytes; + unsigned char addr_bytes; + unsigned char length; + unsigned char x86_64; + + const insn_byte_t *kaddr; /* kernel address of insn to analyze */ + const insn_byte_t *next_byte; +}; + +#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) +#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) +#define X86_MODRM_RM(modrm) ((modrm) & 0x07) + +#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) +#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) +#define X86_SIB_BASE(sib) ((sib) & 0x07) + +#define X86_REX_W(rex) ((rex) & 8) +#define X86_REX_R(rex) ((rex) & 4) +#define X86_REX_X(rex) ((rex) & 2) +#define X86_REX_B(rex) ((rex) & 1) + +/* The last prefix is needed for two-byte and three-byte opcodes */ +static inline insn_byte_t insn_last_prefix(struct insn *insn) +{ + return insn->prefixes.bytes[3]; +} + +extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); +extern void insn_get_prefixes(struct insn *insn); +extern void insn_get_opcode(struct insn *insn); +extern void insn_get_modrm(struct insn *insn); +extern void insn_get_sib(struct insn *insn); +extern void insn_get_displacement(struct insn *insn); +extern void insn_get_immediate(struct insn *insn); +extern void insn_get_length(struct insn *insn); + +/* Attribute will be determined after getting ModRM (for opcode groups) */ +static inline void insn_get_attribute(struct insn *insn) +{ + insn_get_modrm(insn); +} + +/* Instruction uses RIP-relative addressing */ +extern int insn_rip_relative(struct insn *insn); + +/* Init insn for kernel text */ +static inline void kernel_insn_init(struct insn *insn, const void *kaddr) +{ +#ifdef CONFIG_X86_64 + insn_init(insn, kaddr, 1); +#else /* CONFIG_X86_32 */ + insn_init(insn, kaddr, 0); +#endif +} + +/* Offset of each field from kaddr */ +static inline int insn_offset_rex_prefix(struct insn *insn) +{ + return insn->prefixes.nbytes; +} +static inline int insn_offset_opcode(struct insn *insn) +{ + return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; +} +static inline int insn_offset_modrm(struct insn *insn) +{ + return insn_offset_opcode(insn) + insn->opcode.nbytes; +} +static inline int insn_offset_sib(struct insn *insn) +{ + return insn_offset_modrm(insn) + insn->modrm.nbytes; +} +static inline int insn_offset_displacement(struct insn *insn) +{ + return insn_offset_sib(insn) + insn->sib.nbytes; +} +static inline int insn_offset_immediate(struct insn *insn) +{ + return insn_offset_displacement(insn) + insn->displacement.nbytes; +} + +#endif /* _ASM_X86_INSN_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 07c31899c9c..c77f8a7c531 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -2,12 +2,25 @@ # Makefile for x86 specific library files. # +inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk +inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt +quiet_cmd_inat_tables = GEN $@ + cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ + +$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) + $(call cmd,inat_tables) + +$(obj)/inat.o: $(obj)/inat-tables.c + +clean-files := inat-tables.c + obj-$(CONFIG_SMP) := msr.o lib-y := delay.o lib-y += thunk_$(BITS).o lib-y += usercopy_$(BITS).o getuser.o putuser.o lib-y += memcpy_$(BITS).o +lib-y += insn.o inat.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c new file mode 100644 index 00000000000..054656a01df --- /dev/null +++ b/arch/x86/lib/inat.c @@ -0,0 +1,78 @@ +/* + * x86 instruction attribute tables + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include + +/* Attribute tables are generated from opcode map */ +#include "inat-tables.c" + +/* Attribute search APIs */ +insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) +{ + return inat_primary_table[opcode]; +} + +insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, + insn_attr_t esc_attr) +{ + const insn_attr_t *table; + insn_attr_t lpfx_attr; + int n, m = 0; + + n = inat_escape_id(esc_attr); + if (last_pfx) { + lpfx_attr = inat_get_opcode_attribute(last_pfx); + m = inat_last_prefix_id(lpfx_attr); + } + table = inat_escape_tables[n][0]; + if (!table) + return 0; + if (inat_has_variant(table[opcode]) && m) { + table = inat_escape_tables[n][m]; + if (!table) + return 0; + } + return table[opcode]; +} + +insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, + insn_attr_t grp_attr) +{ + const insn_attr_t *table; + insn_attr_t lpfx_attr; + int n, m = 0; + + n = inat_group_id(grp_attr); + if (last_pfx) { + lpfx_attr = inat_get_opcode_attribute(last_pfx); + m = inat_last_prefix_id(lpfx_attr); + } + table = inat_group_tables[n][0]; + if (!table) + return inat_group_common_attribute(grp_attr); + if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { + table = inat_escape_tables[n][m]; + if (!table) + return inat_group_common_attribute(grp_attr); + } + return table[X86_MODRM_REG(modrm)] | + inat_group_common_attribute(grp_attr); +} + diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c new file mode 100644 index 00000000000..dfd56a30053 --- /dev/null +++ b/arch/x86/lib/insn.c @@ -0,0 +1,464 @@ +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004, 2009 + */ + +#include +#include +#include + +#define get_next(t, insn) \ + ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) + +#define peek_next(t, insn) \ + ({t r; r = *(t*)insn->next_byte; r; }) + +/** + * insn_init() - initialize struct insn + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @x86_64: !0 for 64-bit kernel or 64-bit app + */ +void insn_init(struct insn *insn, const void *kaddr, int x86_64) +{ + memset(insn, 0, sizeof(*insn)); + insn->kaddr = kaddr; + insn->next_byte = kaddr; + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) + insn->addr_bytes = 8; + else + insn->addr_bytes = 4; +} + +/** + * insn_get_prefixes - scan x86 instruction prefix bytes + * @insn: &struct insn containing instruction + * + * Populates the @insn->prefixes bitmap, and updates @insn->next_byte + * to point to the (first) opcode. No effect if @insn->prefixes.got + * is already set. + */ +void insn_get_prefixes(struct insn *insn) +{ + struct insn_field *prefixes = &insn->prefixes; + insn_attr_t attr; + insn_byte_t b, lb; + int i, nb; + + if (prefixes->got) + return; + + nb = 0; + lb = 0; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + while (inat_is_prefix(attr)) { + /* Skip if same prefix */ + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == b) + goto found; + if (nb == 4) + /* Invalid instruction */ + break; + prefixes->bytes[nb++] = b; + if (inat_is_address_size_prefix(attr)) { + /* address size switches 2/4 or 4/8 */ + if (insn->x86_64) + insn->addr_bytes ^= 12; + else + insn->addr_bytes ^= 6; + } else if (inat_is_operand_size_prefix(attr)) { + /* oprand size switches 2/4 */ + insn->opnd_bytes ^= 6; + } +found: + prefixes->nbytes++; + insn->next_byte++; + lb = b; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + } + /* Set the last prefix */ + if (lb && lb != insn->prefixes.bytes[3]) { + if (unlikely(insn->prefixes.bytes[3])) { + /* Swap the last prefix */ + b = insn->prefixes.bytes[3]; + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == lb) + prefixes->bytes[i] = b; + } + insn->prefixes.bytes[3] = lb; + } + + if (insn->x86_64) { + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + if (inat_is_rex_prefix(attr)) { + insn->rex_prefix.value = b; + insn->rex_prefix.nbytes = 1; + insn->next_byte++; + if (X86_REX_W(b)) + /* REX.W overrides opnd_size */ + insn->opnd_bytes = 8; + } + } + insn->rex_prefix.got = 1; + prefixes->got = 1; + return; +} + +/** + * insn_get_opcode - collect opcode(s) + * @insn: &struct insn containing instruction + * + * Populates @insn->opcode, updates @insn->next_byte to point past the + * opcode byte(s), and set @insn->attr (except for groups). + * If necessary, first collects any preceding (prefix) bytes. + * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got + * is already 1. + */ +void insn_get_opcode(struct insn *insn) +{ + struct insn_field *opcode = &insn->opcode; + insn_byte_t op, pfx; + if (opcode->got) + return; + if (!insn->prefixes.got) + insn_get_prefixes(insn); + + /* Get first opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[0] = op; + opcode->nbytes = 1; + insn->attr = inat_get_opcode_attribute(op); + while (inat_is_escape(insn->attr)) { + /* Get escaped opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[opcode->nbytes++] = op; + pfx = insn_last_prefix(insn); + insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); + } + opcode->got = 1; +} + +/** + * insn_get_modrm - collect ModRM byte, if any + * @insn: &struct insn containing instruction + * + * Populates @insn->modrm and updates @insn->next_byte to point past the + * ModRM byte, if any. If necessary, first collects the preceding bytes + * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + */ +void insn_get_modrm(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + insn_byte_t pfx, mod; + if (modrm->got) + return; + if (!insn->opcode.got) + insn_get_opcode(insn); + + if (inat_has_modrm(insn->attr)) { + mod = get_next(insn_byte_t, insn); + modrm->value = mod; + modrm->nbytes = 1; + if (inat_is_group(insn->attr)) { + pfx = insn_last_prefix(insn); + insn->attr = inat_get_group_attribute(mod, pfx, + insn->attr); + } + } + + if (insn->x86_64 && inat_is_force64(insn->attr)) + insn->opnd_bytes = 8; + modrm->got = 1; +} + + +/** + * insn_rip_relative() - Does instruction use RIP-relative addressing mode? + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. No effect if @insn->x86_64 is 0. + */ +int insn_rip_relative(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + + if (!insn->x86_64) + return 0; + if (!modrm->got) + insn_get_modrm(insn); + /* + * For rip-relative instructions, the mod field (top 2 bits) + * is zero and the r/m field (bottom 3 bits) is 0x5. + */ + return (modrm->nbytes && (modrm->value & 0xc7) == 0x5); +} + +/** + * insn_get_sib() - Get the SIB byte of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. + */ +void insn_get_sib(struct insn *insn) +{ + insn_byte_t modrm; + + if (insn->sib.got) + return; + if (!insn->modrm.got) + insn_get_modrm(insn); + if (insn->modrm.nbytes) { + modrm = (insn_byte_t)insn->modrm.value; + if (insn->addr_bytes != 2 && + X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { + insn->sib.value = get_next(insn_byte_t, insn); + insn->sib.nbytes = 1; + } + } + insn->sib.got = 1; +} + + +/** + * insn_get_displacement() - Get the displacement of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * SIB byte. + * Displacement value is sign-expanded. + */ +void insn_get_displacement(struct insn *insn) +{ + insn_byte_t mod, rm, base; + + if (insn->displacement.got) + return; + if (!insn->sib.got) + insn_get_sib(insn); + if (insn->modrm.nbytes) { + /* + * Interpreting the modrm byte: + * mod = 00 - no displacement fields (exceptions below) + * mod = 01 - 1-byte displacement field + * mod = 10 - displacement field is 4 bytes, or 2 bytes if + * address size = 2 (0x67 prefix in 32-bit mode) + * mod = 11 - no memory operand + * + * If address size = 2... + * mod = 00, r/m = 110 - displacement field is 2 bytes + * + * If address size != 2... + * mod != 11, r/m = 100 - SIB byte exists + * mod = 00, SIB base = 101 - displacement field is 4 bytes + * mod = 00, r/m = 101 - rip-relative addressing, displacement + * field is 4 bytes + */ + mod = X86_MODRM_MOD(insn->modrm.value); + rm = X86_MODRM_RM(insn->modrm.value); + base = X86_SIB_BASE(insn->sib.value); + if (mod == 3) + goto out; + if (mod == 1) { + insn->displacement.value = get_next(char, insn); + insn->displacement.nbytes = 1; + } else if (insn->addr_bytes == 2) { + if ((mod == 0 && rm == 6) || mod == 2) { + insn->displacement.value = + get_next(short, insn); + insn->displacement.nbytes = 2; + } + } else { + if ((mod == 0 && rm == 5) || mod == 2 || + (mod == 0 && base == 5)) { + insn->displacement.value = get_next(int, insn); + insn->displacement.nbytes = 4; + } + } + } +out: + insn->displacement.got = 1; +} + +/* Decode moffset16/32/64 */ +static void __get_moffset(struct insn *insn) +{ + switch (insn->addr_bytes) { + case 2: + insn->moffset1.value = get_next(short, insn); + insn->moffset1.nbytes = 2; + break; + case 4: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + break; + case 8: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + insn->moffset2.value = get_next(int, insn); + insn->moffset2.nbytes = 4; + break; + } + insn->moffset1.got = insn->moffset2.got = 1; +} + +/* Decode imm v32(Iz) */ +static void __get_immv32(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case 4: + case 8: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + } +} + +/* Decode imm v64(Iv/Ov) */ +static void __get_immv(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + } + insn->immediate1.got = insn->immediate2.got = 1; +} + +/* Decode ptr16:16/32(Ap) */ +static void __get_immptr(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + /* ptr16:64 is not exist (no segment) */ + return; + } + insn->immediate2.value = get_next(unsigned short, insn); + insn->immediate2.nbytes = 2; + insn->immediate1.got = insn->immediate2.got = 1; +} + +/** + * insn_get_immediate() - Get the immediates of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * displacement bytes. + * Basically, most of immediates are sign-expanded. Unsigned-value can be + * get by bit masking with ((1 << (nbytes * 8)) - 1) + */ +void insn_get_immediate(struct insn *insn) +{ + if (insn->immediate.got) + return; + if (!insn->displacement.got) + insn_get_displacement(insn); + + if (inat_has_moffset(insn->attr)) { + __get_moffset(insn); + goto done; + } + + if (!inat_has_immediate(insn->attr)) + /* no immediates */ + goto done; + + switch (inat_immediate_size(insn->attr)) { + case INAT_IMM_BYTE: + insn->immediate.value = get_next(char, insn); + insn->immediate.nbytes = 1; + break; + case INAT_IMM_WORD: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case INAT_IMM_DWORD: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + case INAT_IMM_QWORD: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + case INAT_IMM_PTR: + __get_immptr(insn); + break; + case INAT_IMM_VWORD32: + __get_immv32(insn); + break; + case INAT_IMM_VWORD: + __get_immv(insn); + break; + default: + break; + } + if (inat_has_second_immediate(insn->attr)) { + insn->immediate2.value = get_next(char, insn); + insn->immediate2.nbytes = 1; + } +done: + insn->immediate.got = 1; +} + +/** + * insn_get_length() - Get the length of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * immediates bytes. + */ +void insn_get_length(struct insn *insn) +{ + if (insn->length) + return; + if (!insn->immediate.got) + insn_get_immediate(insn); + insn->length = (unsigned char)((unsigned long)insn->next_byte + - (unsigned long)insn->kaddr); +} diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt new file mode 100644 index 00000000000..083dd59dd74 --- /dev/null +++ b/arch/x86/lib/x86-opcode-map.txt @@ -0,0 +1,719 @@ +# x86 Opcode Maps +# +# +# Table: table-name +# Referrer: escaped-name +# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# (or) +# opcode: escape # escaped-name +# EndTable +# +# +# GrpTable: GrpXXX +# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# EndTable +# + +Table: one byte opcode +Referrer: +# 0x00 - 0x0f +00: ADD Eb,Gb +01: ADD Ev,Gv +02: ADD Gb,Eb +03: ADD Gv,Ev +04: ADD AL,Ib +05: ADD rAX,Iz +06: PUSH ES (i64) +07: POP ES (i64) +08: OR Eb,Gb +09: OR Ev,Gv +0a: OR Gb,Eb +0b: OR Gv,Ev +0c: OR AL,Ib +0d: OR rAX,Iz +0e: PUSH CS (i64) +0f: escape # 2-byte escape +# 0x10 - 0x1f +10: ADC Eb,Gb +11: ADC Ev,Gv +12: ADC Gb,Eb +13: ADC Gv,Ev +14: ADC AL,Ib +15: ADC rAX,Iz +16: PUSH SS (i64) +17: POP SS (i64) +18: SBB Eb,Gb +19: SBB Ev,Gv +1a: SBB Gb,Eb +1b: SBB Gv,Ev +1c: SBB AL,Ib +1d: SBB rAX,Iz +1e: PUSH DS (i64) +1f: POP DS (i64) +# 0x20 - 0x2f +20: AND Eb,Gb +21: AND Ev,Gv +22: AND Gb,Eb +23: AND Gv,Ev +24: AND AL,Ib +25: AND rAx,Iz +26: SEG=ES (Prefix) +27: DAA (i64) +28: SUB Eb,Gb +29: SUB Ev,Gv +2a: SUB Gb,Eb +2b: SUB Gv,Ev +2c: SUB AL,Ib +2d: SUB rAX,Iz +2e: SEG=CS (Prefix) +2f: DAS (i64) +# 0x30 - 0x3f +30: XOR Eb,Gb +31: XOR Ev,Gv +32: XOR Gb,Eb +33: XOR Gv,Ev +34: XOR AL,Ib +35: XOR rAX,Iz +36: SEG=SS (Prefix) +37: AAA (i64) +38: CMP Eb,Gb +39: CMP Ev,Gv +3a: CMP Gb,Eb +3b: CMP Gv,Ev +3c: CMP AL,Ib +3d: CMP rAX,Iz +3e: SEG=DS (Prefix) +3f: AAS (i64) +# 0x40 - 0x4f +40: INC eAX (i64) | REX (o64) +41: INC eCX (i64) | REX.B (o64) +42: INC eDX (i64) | REX.X (o64) +43: INC eBX (i64) | REX.XB (o64) +44: INC eSP (i64) | REX.R (o64) +45: INC eBP (i64) | REX.RB (o64) +46: INC eSI (i64) | REX.RX (o64) +47: INC eDI (i64) | REX.RXB (o64) +48: DEC eAX (i64) | REX.W (o64) +49: DEC eCX (i64) | REX.WB (o64) +4a: DEC eDX (i64) | REX.WX (o64) +4b: DEC eBX (i64) | REX.WXB (o64) +4c: DEC eSP (i64) | REX.WR (o64) +4d: DEC eBP (i64) | REX.WRB (o64) +4e: DEC eSI (i64) | REX.WRX (o64) +4f: DEC eDI (i64) | REX.WRXB (o64) +# 0x50 - 0x5f +50: PUSH rAX/r8 (d64) +51: PUSH rCX/r9 (d64) +52: PUSH rDX/r10 (d64) +53: PUSH rBX/r11 (d64) +54: PUSH rSP/r12 (d64) +55: PUSH rBP/r13 (d64) +56: PUSH rSI/r14 (d64) +57: PUSH rDI/r15 (d64) +58: POP rAX/r8 (d64) +59: POP rCX/r9 (d64) +5a: POP rDX/r10 (d64) +5b: POP rBX/r11 (d64) +5c: POP rSP/r12 (d64) +5d: POP rBP/r13 (d64) +5e: POP rSI/r14 (d64) +5f: POP rDI/r15 (d64) +# 0x60 - 0x6f +60: PUSHA/PUSHAD (i64) +61: POPA/POPAD (i64) +62: BOUND Gv,Ma (i64) +63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) +64: SEG=FS (Prefix) +65: SEG=GS (Prefix) +66: Operand-Size (Prefix) +67: Address-Size (Prefix) +68: PUSH Iz (d64) +69: IMUL Gv,Ev,Iz +6a: PUSH Ib (d64) +6b: IMUL Gv,Ev,Ib +6c: INS/INSB Yb,DX +6d: INS/INSW/INSD Yz,DX +6e: OUTS/OUTSB DX,Xb +6f: OUTS/OUTSW/OUTSD DX,Xz +# 0x70 - 0x7f +70: JO Jb +71: JNO Jb +72: JB/JNAE/JC Jb +73: JNB/JAE/JNC Jb +74: JZ/JE Jb +75: JNZ/JNE Jb +76: JBE/JNA Jb +77: JNBE/JA Jb +78: JS Jb +79: JNS Jb +7a: JP/JPE Jb +7b: JNP/JPO Jb +7c: JL/JNGE Jb +7d: JNL/JGE Jb +7e: JLE/JNG Jb +7f: JNLE/JG Jb +# 0x80 - 0x8f +80: Grp1 Eb,Ib (1A) +81: Grp1 Ev,Iz (1A) +82: Grp1 Eb,Ib (1A),(i64) +83: Grp1 Ev,Ib (1A) +84: TEST Eb,Gb +85: TEST Ev,Gv +86: XCHG Eb,Gb +87: XCHG Ev,Gv +88: MOV Eb,Gb +89: MOV Ev,Gv +8a: MOV Gb,Eb +8b: MOV Gv,Ev +8c: MOV Ev,Sw +8d: LEA Gv,M +8e: MOV Sw,Ew +8f: Grp1A (1A) | POP Ev (d64) +# 0x90 - 0x9f +90: NOP | PAUSE (F3) | XCHG r8,rAX +91: XCHG rCX/r9,rAX +92: XCHG rDX/r10,rAX +93: XCHG rBX/r11,rAX +94: XCHG rSP/r12,rAX +95: XCHG rBP/r13,rAX +96: XCHG rSI/r14,rAX +97: XCHG rDI/r15,rAX +98: CBW/CWDE/CDQE +99: CWD/CDQ/CQO +9a: CALLF Ap (i64) +9b: FWAIT/WAIT +9c: PUSHF/D/Q Fv (d64) +9d: POPF/D/Q Fv (d64) +9e: SAHF +9f: LAHF +# 0xa0 - 0xaf +a0: MOV AL,Ob +a1: MOV rAX,Ov +a2: MOV Ob,AL +a3: MOV Ov,rAX +a4: MOVS/B Xb,Yb +a5: MOVS/W/D/Q Xv,Yv +a6: CMPS/B Xb,Yb +a7: CMPS/W/D Xv,Yv +a8: TEST AL,Ib +a9: TEST rAX,Iz +aa: STOS/B Yb,AL +ab: STOS/W/D/Q Yv,rAX +ac: LODS/B AL,Xb +ad: LODS/W/D/Q rAX,Xv +ae: SCAS/B AL,Yb +af: SCAS/W/D/Q rAX,Xv +# 0xb0 - 0xbf +b0: MOV AL/R8L,Ib +b1: MOV CL/R9L,Ib +b2: MOV DL/R10L,Ib +b3: MOV BL/R11L,Ib +b4: MOV AH/R12L,Ib +b5: MOV CH/R13L,Ib +b6: MOV DH/R14L,Ib +b7: MOV BH/R15L,Ib +b8: MOV rAX/r8,Iv +b9: MOV rCX/r9,Iv +ba: MOV rDX/r10,Iv +bb: MOV rBX/r11,Iv +bc: MOV rSP/r12,Iv +bd: MOV rBP/r13,Iv +be: MOV rSI/r14,Iv +bf: MOV rDI/r15,Iv +# 0xc0 - 0xcf +c0: Grp2 Eb,Ib (1A) +c1: Grp2 Ev,Ib (1A) +c2: RETN Iw (f64) +c3: RETN +c4: LES Gz,Mp (i64) +c5: LDS Gz,Mp (i64) +c6: Grp11 Eb,Ib (1A) +c7: Grp11 Ev,Iz (1A) +c8: ENTER Iw,Ib +c9: LEAVE (d64) +ca: RETF Iw +cb: RETF +cc: INT3 +cd: INT Ib +ce: INTO (i64) +cf: IRET/D/Q +# 0xd0 - 0xdf +d0: Grp2 Eb,1 (1A) +d1: Grp2 Ev,1 (1A) +d2: Grp2 Eb,CL (1A) +d3: Grp2 Ev,CL (1A) +d4: AAM Ib (i64) +d5: AAD Ib (i64) +d6: +d7: XLAT/XLATB +d8: ESC +d9: ESC +da: ESC +db: ESC +dc: ESC +dd: ESC +de: ESC +df: ESC +# 0xe0 - 0xef +e0: LOOPNE/LOOPNZ Jb (f64) +e1: LOOPE/LOOPZ Jb (f64) +e2: LOOP Jb (f64) +e3: JrCXZ Jb (f64) +e4: IN AL,Ib +e5: IN eAX,Ib +e6: OUT Ib,AL +e7: OUT Ib,eAX +e8: CALL Jz (f64) +e9: JMP-near Jz (f64) +ea: JMP-far Ap (i64) +eb: JMP-short Jb (f64) +ec: IN AL,DX +ed: IN eAX,DX +ee: OUT DX,AL +ef: OUT DX,eAX +# 0xf0 - 0xff +f0: LOCK (Prefix) +f1: +f2: REPNE (Prefix) +f3: REP/REPE (Prefix) +f4: HLT +f5: CMC +f6: Grp3_1 Eb (1A) +f7: Grp3_2 Ev (1A) +f8: CLC +f9: STC +fa: CLI +fb: STI +fc: CLD +fd: STD +fe: Grp4 (1A) +ff: Grp5 (1A) +EndTable + +Table: 2-byte opcode # First Byte is 0x0f +Referrer: 2-byte escape +# 0x0f 0x00-0x0f +00: Grp6 (1A) +01: Grp7 (1A) +02: LAR Gv,Ew +03: LSL Gv,Ew +04: +05: SYSCALL (o64) +06: CLTS +07: SYSRET (o64) +08: INVD +09: WBINVD +0a: +0b: UD2 (1B) +0c: +0d: NOP Ev +0e: +0f: +# 0x0f 0x10-0x1f +10: +11: +12: +13: +14: +15: +16: +17: +18: Grp16 (1A) +19: +1a: +1b: +1c: +1d: +1e: +1f: NOP Ev +# 0x0f 0x20-0x2f +20: MOV Rd,Cd +21: MOV Rd,Dd +22: MOV Cd,Rd +23: MOV Dd,Rd +24: +25: +26: +27: +28: movaps Vps,Wps | movapd Vpd,Wpd (66) +29: movaps Wps,Vps | movapd Wpd,Vpd (66) +2a: +2b: +2c: +2d: +2e: +2f: +# 0x0f 0x30-0x3f +30: WRMSR +31: RDTSC +32: RDMSR +33: RDPMC +34: SYSENTER +35: SYSEXIT +36: +37: GETSEC +38: escape # 3-byte escape 1 +39: +3a: escape # 3-byte escape 2 +3b: +3c: +3d: +3e: +3f: +# 0x0f 0x40-0x4f +40: CMOVO Gv,Ev +41: CMOVNO Gv,Ev +42: CMOVB/C/NAE Gv,Ev +43: CMOVAE/NB/NC Gv,Ev +44: CMOVE/Z Gv,Ev +45: CMOVNE/NZ Gv,Ev +46: CMOVBE/NA Gv,Ev +47: CMOVA/NBE Gv,Ev +48: CMOVS Gv,Ev +49: CMOVNS Gv,Ev +4a: CMOVP/PE Gv,Ev +4b: CMOVNP/PO Gv,Ev +4c: CMOVL/NGE Gv,Ev +4d: CMOVNL/GE Gv,Ev +4e: CMOVLE/NG Gv,Ev +4f: CMOVNLE/G Gv,Ev +# 0x0f 0x50-0x5f +50: +51: +52: +53: +54: +55: +56: +57: +58: +59: +5a: +5b: +5c: +5d: +5e: +5f: +# 0x0f 0x60-0x6f +60: +61: +62: +63: +64: +65: +66: +67: +68: +69: +6a: +6b: +6c: +6d: +6e: +6f: +# 0x0f 0x70-0x7f +70: +71: Grp12 (1A) +72: Grp13 (1A) +73: Grp14 (1A) +74: +75: +76: +77: +78: VMREAD Ed/q,Gd/q +79: VMWRITE Gd/q,Ed/q +7a: +7b: +7c: +7d: +7e: +7f: +# 0x0f 0x80-0x8f +80: JO Jz (f64) +81: JNO Jz (f64) +82: JB/JNAE/JC Jz (f64) +83: JNB/JAE/JNC Jz (f64) +84: JZ/JE Jz (f64) +85: JNZ/JNE Jz (f64) +86: JBE/JNA Jz (f64) +87: JNBE/JA Jz (f64) +88: JS Jz (f64) +89: JNS Jz (f64) +8a: JP/JPE Jz (f64) +8b: JNP/JPO Jz (f64) +8c: JL/JNGE Jz (f64) +8d: JNL/JGE Jz (f64) +8e: JLE/JNG Jz (f64) +8f: JNLE/JG Jz (f64) +# 0x0f 0x90-0x9f +90: SETO Eb +91: SETNO Eb +92: SETB/C/NAE Eb +93: SETAE/NB/NC Eb +94: SETE/Z Eb +95: SETNE/NZ Eb +96: SETBE/NA Eb +97: SETA/NBE Eb +98: SETS Eb +99: SETNS Eb +9a: SETP/PE Eb +9b: SETNP/PO Eb +9c: SETL/NGE Eb +9d: SETNL/GE Eb +9e: SETLE/NG Eb +9f: SETNLE/G Eb +# 0x0f 0xa0-0xaf +a0: PUSH FS (d64) +a1: POP FS (d64) +a2: CPUID +a3: BT Ev,Gv +a4: SHLD Ev,Gv,Ib +a5: SHLD Ev,Gv,CL +a6: +a7: GrpRNG +a8: PUSH GS (d64) +a9: POP GS (d64) +aa: RSM +ab: BTS Ev,Gv +ac: SHRD Ev,Gv,Ib +ad: SHRD Ev,Gv,CL +ae: Grp15 (1A),(1C) +af: IMUL Gv,Ev +# 0x0f 0xb0-0xbf +b0: CMPXCHG Eb,Gb +b1: CMPXCHG Ev,Gv +b2: LSS Gv,Mp +b3: BTR Ev,Gv +b4: LFS Gv,Mp +b5: LGS Gv,Mp +b6: MOVZX Gv,Eb +b7: MOVZX Gv,Ew +b8: JMPE | POPCNT Gv,Ev (F3) +b9: Grp10 (1A) +ba: Grp8 Ev,Ib (1A) +bb: BTC Ev,Gv +bc: BSF Gv,Ev +bd: BSR Gv,Ev +be: MOVSX Gv,Eb +bf: MOVSX Gv,Ew +# 0x0f 0xc0-0xcf +c0: XADD Eb,Gb +c1: XADD Ev,Gv +c2: +c3: movnti Md/q,Gd/q +c4: +c5: +c6: +c7: Grp9 (1A) +c8: BSWAP RAX/EAX/R8/R8D +c9: BSWAP RCX/ECX/R9/R9D +ca: BSWAP RDX/EDX/R10/R10D +cb: BSWAP RBX/EBX/R11/R11D +cc: BSWAP RSP/ESP/R12/R12D +cd: BSWAP RBP/EBP/R13/R13D +ce: BSWAP RSI/ESI/R14/R14D +cf: BSWAP RDI/EDI/R15/R15D +# 0x0f 0xd0-0xdf +d0: +d1: +d2: +d3: +d4: +d5: +d6: +d7: +d8: +d9: +da: +db: +dc: +dd: +de: +df: +# 0x0f 0xe0-0xef +e0: +e1: +e2: +e3: +e4: +e5: +e6: +e7: +e8: +e9: +ea: +eb: +ec: +ed: +ee: +ef: +# 0x0f 0xf0-0xff +f0: +f1: +f2: +f3: +f4: +f5: +f6: +f7: +f8: +f9: +fa: +fb: +fc: +fd: +fe: +ff: +EndTable + +Table: 3-byte opcode 1 +Referrer: 3-byte escape 1 +80: INVEPT Gd/q,Mdq (66) +81: INVPID Gd/q,Mdq (66) +f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) +f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) +EndTable + +Table: 3-byte opcode 2 +Referrer: 3-byte escape 2 +# all opcode is for SSE +EndTable + +GrpTable: Grp1 +0: ADD +1: OR +2: ADC +3: SBB +4: AND +5: SUB +6: XOR +7: CMP +EndTable + +GrpTable: Grp1A +0: POP +EndTable + +GrpTable: Grp2 +0: ROL +1: ROR +2: RCL +3: RCR +4: SHL/SAL +5: SHR +6: +7: SAR +EndTable + +GrpTable: Grp3_1 +0: TEST Eb,Ib +1: +2: NOT Eb +3: NEG Eb +4: MUL AL,Eb +5: IMUL AL,Eb +6: DIV AL,Eb +7: IDIV AL,Eb +EndTable + +GrpTable: Grp3_2 +0: TEST Ev,Iz +1: +2: NOT Ev +3: NEG Ev +4: MUL rAX,Ev +5: IMUL rAX,Ev +6: DIV rAX,Ev +7: IDIV rAX,Ev +EndTable + +GrpTable: Grp4 +0: INC Eb +1: DEC Eb +EndTable + +GrpTable: Grp5 +0: INC Ev +1: DEC Ev +2: CALLN Ev (f64) +3: CALLF Ep +4: JMPN Ev (f64) +5: JMPF Ep +6: PUSH Ev (d64) +7: +EndTable + +GrpTable: Grp6 +0: SLDT Rv/Mw +1: STR Rv/Mw +2: LLDT Ew +3: LTR Ew +4: VERR Ew +5: VERW Ew +EndTable + +GrpTable: Grp7 +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) +3: LIDT Ms +4: SMSW Mw/Rv +5: +6: LMSW Ew +7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) +EndTable + +GrpTable: Grp8 +4: BT +5: BTS +6: BTR +7: BTC +EndTable + +GrpTable: Grp9 +1: CMPXCHG8B/16B Mq/Mdq +6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) +7: VMPTRST Mq +EndTable + +GrpTable: Grp10 +EndTable + +GrpTable: Grp11 +0: MOV +EndTable + +GrpTable: Grp12 +EndTable + +GrpTable: Grp13 +EndTable + +GrpTable: Grp14 +EndTable + +GrpTable: Grp15 +0: fxsave +1: fxstor +2: ldmxcsr +3: stmxcsr +4: XSAVE +5: XRSTOR | lfence (11B) +6: mfence (11B) +7: clflush | sfence (11B) +EndTable + +GrpTable: Grp16 +0: prefetch NTA +1: prefetch T0 +2: prefetch T1 +3: prefetch T2 +EndTable + +GrpTable: GrpRNG +0: xstore-rng +1: xcrypt-ecb +2: xcrypt-cbc +4: xcrypt-cfb +5: xcrypt-ofb +EndTable diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk new file mode 100644 index 00000000000..93b62c92d04 --- /dev/null +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -0,0 +1,314 @@ +#!/bin/awk -f +# gen-insn-attr-x86.awk: Instruction attribute table generator +# Written by Masami Hiramatsu +# +# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c + +BEGIN { + print "/* x86 opcode map generated from x86-opcode-map.txt */" + print "/* Do not change this code. */" + ggid = 1 + geid = 1 + + opnd_expr = "^[[:alpha:]]" + ext_expr = "^\\(" + sep_expr = "^\\|$" + group_expr = "^Grp[[:alnum:]]+" + + imm_expr = "^[IJAO][[:lower:]]" + imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" + imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)" + imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)" + imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)" + imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" + imm_flag["Ob"] = "INAT_MOFFSET" + imm_flag["Ov"] = "INAT_MOFFSET" + + modrm_expr = "^([CDEGMNPQRSUVW][[:lower:]]+|NTA|T[012])" + force64_expr = "\\([df]64\\)" + rex_expr = "^REX(\\.[XRWB]+)*" + fpu_expr = "^ESC" # TODO + + lprefix1_expr = "\\(66\\)" + delete lptable1 + lprefix2_expr = "\\(F2\\)" + delete lptable2 + lprefix3_expr = "\\(F3\\)" + delete lptable3 + max_lprefix = 4 + + prefix_expr = "\\(Prefix\\)" + prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" + prefix_num["REPNE"] = "INAT_PFX_REPNE" + prefix_num["REP/REPE"] = "INAT_PFX_REPE" + prefix_num["LOCK"] = "INAT_PFX_LOCK" + prefix_num["SEG=CS"] = "INAT_PFX_CS" + prefix_num["SEG=DS"] = "INAT_PFX_DS" + prefix_num["SEG=ES"] = "INAT_PFX_ES" + prefix_num["SEG=FS"] = "INAT_PFX_FS" + prefix_num["SEG=GS"] = "INAT_PFX_GS" + prefix_num["SEG=SS"] = "INAT_PFX_SS" + prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" + + delete table + delete etable + delete gtable + eid = -1 + gid = -1 +} + +function semantic_error(msg) { + print "Semantic error at " NR ": " msg > "/dev/stderr" + exit 1 +} + +function debug(msg) { + print "DEBUG: " msg +} + +function array_size(arr, i,c) { + c = 0 + for (i in arr) + c++ + return c +} + +/^Table:/ { + print "/* " $0 " */" +} + +/^Referrer:/ { + if (NF == 1) { + # primary opcode table + tname = "inat_primary_table" + eid = -1 + } else { + # escape opcode table + ref = "" + for (i = 2; i <= NF; i++) + ref = ref $i + eid = escape[ref] + tname = sprintf("inat_escape_table_%d", eid) + } +} + +/^GrpTable:/ { + print "/* " $0 " */" + if (!($2 in group)) + semantic_error("No group: " $2 ) + gid = group[$2] + tname = "inat_group_table_" gid +} + +function print_table(tbl,name,fmt,n) +{ + print "const insn_attr_t " name " = {" + for (i = 0; i < n; i++) { + id = sprintf(fmt, i) + if (tbl[id]) + print " [" id "] = " tbl[id] "," + } + print "};" +} + +/^EndTable/ { + if (gid != -1) { + # print group tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,3] = tname "_3" + } + } else { + # print primary/escaped tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,3] = tname "_3" + } + } + print "" + delete table + delete lptable1 + delete lptable2 + delete lptable3 + gid = -1 + eid = -1 +} + +function add_flags(old,new) { + if (old && new) + return old " | " new + else if (old) + return old + else + return new +} + +# convert operands to flags. +function convert_operands(opnd, i,imm,mod) +{ + imm = null + mod = null + for (i in opnd) { + i = opnd[i] + if (match(i, imm_expr) == 1) { + if (!imm_flag[i]) + semantic_error("Unknown imm opnd: " i) + if (imm) { + if (i != "Ib") + semantic_error("Second IMM error") + imm = add_flags(imm, "INAT_SCNDIMM") + } else + imm = imm_flag[i] + } else if (match(i, modrm_expr)) + mod = "INAT_MODRM" + } + return add_flags(imm, mod) +} + +/^[0-9a-f]+\:/ { + if (NR == 1) + next + # get index + idx = "0x" substr($1, 1, index($1,":") - 1) + if (idx in table) + semantic_error("Redefine " idx " in " tname) + + # check if escaped opcode + if ("escape" == $2) { + if ($3 != "#") + semantic_error("No escaped name") + ref = "" + for (i = 4; i <= NF; i++) + ref = ref $i + if (ref in escape) + semantic_error("Redefine escape (" ref ")") + escape[ref] = geid + geid++ + table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")" + next + } + + variant = null + # converts + i = 2 + while (i <= NF) { + opcode = $(i++) + delete opnds + ext = null + flags = null + opnd = null + # parse one opcode + if (match($i, opnd_expr)) { + opnd = $i + split($(i++), opnds, ",") + flags = convert_operands(opnds) + } + if (match($i, ext_expr)) + ext = $(i++) + if (match($i, sep_expr)) + i++ + else if (i < NF) + semantic_error($i " is not a separator") + + # check if group opcode + if (match(opcode, group_expr)) { + if (!(opcode in group)) { + group[opcode] = ggid + ggid++ + } + flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")") + } + # check force(or default) 64bit + if (match(ext, force64_expr)) + flags = add_flags(flags, "INAT_FORCE64") + + # check REX prefix + if (match(opcode, rex_expr)) + flags = add_flags(flags, "INAT_REXPFX") + + # check coprocessor escape : TODO + if (match(opcode, fpu_expr)) + flags = add_flags(flags, "INAT_MODRM") + + # check prefixes + if (match(ext, prefix_expr)) { + if (!prefix_num[opcode]) + semantic_error("Unknown prefix: " opcode) + flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")") + } + if (length(flags) == 0) + continue + # check if last prefix + if (match(ext, lprefix1_expr)) { + lptable1[idx] = add_flags(lptable1[idx],flags) + variant = "INAT_VARIANT" + } else if (match(ext, lprefix2_expr)) { + lptable2[idx] = add_flags(lptable2[idx],flags) + variant = "INAT_VARIANT" + } else if (match(ext, lprefix3_expr)) { + lptable3[idx] = add_flags(lptable3[idx],flags) + variant = "INAT_VARIANT" + } else { + table[idx] = add_flags(table[idx],flags) + } + } + if (variant) + table[idx] = add_flags(table[idx],variant) +} + +END { + # print escape opcode map's array + print "/* Escape opcode map array */" + print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ + "[INAT_LPREFIX_MAX + 1] = {" + for (i = 0; i < geid; i++) + for (j = 0; j < max_lprefix; j++) + if (etable[i,j]) + print " ["i"]["j"] = "etable[i,j]"," + print "};\n" + # print group opcode map's array + print "/* Group opcode map array */" + print "const insn_attr_t const *inat_group_tables[INAT_GRP_MAX + 1]"\ + "[INAT_LPREFIX_MAX + 1] = {" + for (i = 0; i < ggid; i++) + for (j = 0; j < max_lprefix; j++) + if (gtable[i,j]) + print " ["i"]["j"] = "gtable[i,j]"," + print "};" +} From ca0e9badd1a39fecdd235f4bf1481b9da756e27b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:21 -0400 Subject: [PATCH 028/470] x86: X86 instruction decoder build-time selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a user-space selftest of x86 instruction decoder at kernel build time. When CONFIG_X86_DECODER_SELFTEST=y, Kbuild builds a test harness of x86 instruction decoder and performs it after building vmlinux. The test compares the results of objdump and x86 instruction decoder code and check there are no differences. Signed-off-by: Masami Hiramatsu Signed-off-by: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203421.31965.29006.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/Kconfig.debug | 9 +++ arch/x86/Makefile | 3 + arch/x86/tools/Makefile | 15 +++++ arch/x86/tools/distill.awk | 42 +++++++++++++ arch/x86/tools/test_get_len.c | 113 ++++++++++++++++++++++++++++++++++ 5 files changed, 182 insertions(+) create mode 100644 arch/x86/tools/Makefile create mode 100644 arch/x86/tools/distill.awk create mode 100644 arch/x86/tools/test_get_len.c diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index d105f29bb6b..7d0b681a132 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -186,6 +186,15 @@ config X86_DS_SELFTEST config HAVE_MMIOTRACE_SUPPORT def_bool y +config X86_DECODER_SELFTEST + bool "x86 instruction decoder selftest" + depends on DEBUG_KERNEL + ---help--- + Perform x86 instruction decoder selftests at build time. + This option is useful for checking the sanity of x86 instruction + decoder code. + If unsure, say "N". + # # IO delay types: # diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 1b68659c41b..5fe16bfd15a 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -154,6 +154,9 @@ all: bzImage KBUILD_IMAGE := $(boot)/bzImage bzImage: vmlinux +ifeq ($(CONFIG_X86_DECODER_SELFTEST),y) + $(Q)$(MAKE) $(build)=arch/x86/tools posttest +endif $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile new file mode 100644 index 00000000000..3dd626b99dc --- /dev/null +++ b/arch/x86/tools/Makefile @@ -0,0 +1,15 @@ +PHONY += posttest +quiet_cmd_posttest = TEST $@ + cmd_posttest = $(OBJDUMP) -d $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + +posttest: $(obj)/test_get_len vmlinux + $(call cmd,posttest) + +hostprogs-y := test_get_len + +# -I needed for generated C source and C source which in the kernel tree. +HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ + +# Dependancies are also needed. +$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c + diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk new file mode 100644 index 00000000000..d433619bb86 --- /dev/null +++ b/arch/x86/tools/distill.awk @@ -0,0 +1,42 @@ +#!/bin/awk -f +# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len +# Distills the disassembly as follows: +# - Removes all lines except the disassembled instructions. +# - For instructions that exceed 1 line (7 bytes), crams all the hex bytes +# into a single line. +# - Remove bad(or prefix only) instructions + +BEGIN { + prev_addr = "" + prev_hex = "" + prev_mnemonic = "" + bad_expr = "(\\(bad\\)|^rex|^.byte|^rep(z|nz)$|^lock$|^es$|^cs$|^ss$|^ds$|^fs$|^gs$|^data(16|32)$|^addr(16|32|64))" + fwait_expr = "^9b " + fwait_str="9b\tfwait" +} + +/^ *[0-9a-f]+:/ { + if (split($0, field, "\t") < 3) { + # This is a continuation of the same insn. + prev_hex = prev_hex field[2] + } else { + # Skip bad instructions + if (match(prev_mnemonic, bad_expr)) + prev_addr = "" + # Split fwait from other f* instructions + if (match(prev_hex, fwait_expr) && prev_mnemonic != "fwait") { + printf "%s\t%s\n", prev_addr, fwait_str + sub(fwait_expr, "", prev_hex) + } + if (prev_addr != "") + printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic + prev_addr = field[1] + prev_hex = field[2] + prev_mnemonic = field[3] + } +} + +END { + if (prev_addr != "") + printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic +} diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c new file mode 100644 index 00000000000..1e81adb2d8a --- /dev/null +++ b/arch/x86/tools/test_get_len.c @@ -0,0 +1,113 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +#include +#include +#include +#include + +#ifdef __x86_64__ +#define CONFIG_X86_64 +#else +#define CONFIG_X86_32 +#endif +#define unlikely(cond) (cond) + +#include +#include +#include + +/* + * Test of instruction analysis in general and insn_get_length() in + * particular. See if insn_get_length() and the disassembler agree + * on the length of each instruction in an elf disassembly. + * + * Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len + */ + +const char *prog; + +static void usage(void) +{ + fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" + " ./test_get_len\n"); + exit(1); +} + +static void malformed_line(const char *line, int line_nr) +{ + fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line); + exit(3); +} + +#define BUFSIZE 256 + +int main(int argc, char **argv) +{ + char line[BUFSIZE]; + unsigned char insn_buf[16]; + struct insn insn; + int insns = 0; + + prog = argv[0]; + if (argc > 1) + usage(); + + while (fgets(line, BUFSIZE, stdin)) { + char copy[BUFSIZE], *s, *tab1, *tab2; + int nb = 0; + unsigned int b; + + insns++; + memset(insn_buf, 0, 16); + strcpy(copy, line); + tab1 = strchr(copy, '\t'); + if (!tab1) + malformed_line(line, insns); + s = tab1 + 1; + s += strspn(s, " "); + tab2 = strchr(s, '\t'); + if (!tab2) + malformed_line(line, insns); + *tab2 = '\0'; /* Characters beyond tab2 aren't examined */ + while (s < tab2) { + if (sscanf(s, "%x", &b) == 1) { + insn_buf[nb++] = (unsigned char) b; + s += 3; + } else + break; + } + /* Decode an instruction */ +#ifdef __x86_64__ + insn_init(&insn, insn_buf, 1); +#else + insn_init(&insn, insn_buf, 0); +#endif + insn_get_length(&insn); + if (insn.length != nb) { + fprintf(stderr, "Error: %s", line); + fprintf(stderr, "Error: objdump says %d bytes, but " + "insn_get_length() says %d (attr:%x)\n", nb, + insn.length, insn.attr); + exit(2); + } + } + fprintf(stderr, "Succeed: decoded and checked %d instructions\n", + insns); + return 0; +} From b46b3d70c9c017d7c4ec49f7f3ffd0af5a622277 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:28 -0400 Subject: [PATCH 029/470] kprobes: Checks probe address is instruction boudary on x86 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure safeness of inserting kprobes by checking whether the specified address is at the first byte of an instruction on x86. This is done by decoding probed function from its head to the probe point. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203428.31965.21939.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 73 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b00..aa15f3e1f64 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -48,12 +48,14 @@ #include #include #include +#include #include #include #include #include #include +#include void jprobe_return_end(void); @@ -244,6 +246,75 @@ retry: } } +/* Recover the probed instruction at addr for further analysis. */ +static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) +{ + struct kprobe *kp; + kp = get_kprobe((void *)addr); + if (!kp) + return -EINVAL; + + /* + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, fix_riprel() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. + * + * On the other hand, kp->opcode has a copy of the first byte of + * the probed instruction, which is overwritten by int3. And + * the instruction at kp->addr is not modified by kprobes except + * for the first byte, we can recover the original instruction + * from it and kp->opcode. + */ + memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + buf[0] = kp->opcode; + return 0; +} + +/* Dummy buffers for kallsyms_lookup */ +static char __dummy_buf[KSYM_NAME_LEN]; + +/* Check if paddr is at an instruction boundary */ +static int __kprobes can_probe(unsigned long paddr) +{ + int ret; + unsigned long addr, offset = 0; + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) + return 0; + + /* Decode instructions */ + addr = paddr - offset; + while (addr < paddr) { + kernel_insn_init(&insn, (void *)addr); + insn_get_opcode(&insn); + + /* + * Check if the instruction has been modified by another + * kprobe, in which case we replace the breakpoint by the + * original instruction in our buffer. + */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { + ret = recover_probed_instruction(buf, addr); + if (ret) + /* + * Another debugging subsystem might insert + * this breakpoint. In that case, we can't + * recover it. + */ + return 0; + kernel_insn_init(&insn, buf); + } + insn_get_length(&insn); + addr += insn.length; + } + + return (addr == paddr); +} + /* * Returns non-zero if opcode modifies the interrupt flag. */ @@ -359,6 +430,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p) { + if (!can_probe((unsigned long)p->addr)) + return -EILSEQ; /* insn: must be on special executable page on x86. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) From 89ae465b0ee470f7d3f8a1c61353445c3acbbe2a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:36 -0400 Subject: [PATCH 030/470] kprobes: Cleanup fix_riprel() using insn decoder on x86 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cleanup fix_riprel() in arch/x86/kernel/kprobes.c by using the new x86 instruction decoder instead of using comparisons with raw ad hoc numeric opcodes. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203436.31965.34374.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 128 +++++++------------------------------- 1 file changed, 23 insertions(+), 105 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index aa15f3e1f64..16ae9610f6f 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -108,50 +108,6 @@ static const u32 twobyte_is_boostable[256 / 32] = { /* ----------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -static const u32 onebyte_has_modrm[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ----------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */ - W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */ - W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */ - W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ - W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ - W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */ - W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */ - W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */ - W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */ - W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */ - W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */ - W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */ - /* ----------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; -static const u32 twobyte_has_modrm[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ----------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */ - W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */ - W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */ - W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */ - W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */ - W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */ - W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */ - W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */ - W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */ - W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */ - W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */ - W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */ - /* ----------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; #undef W struct kretprobe_blackpoint kretprobe_blacklist[] = { @@ -348,68 +304,30 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) static void __kprobes fix_riprel(struct kprobe *p) { #ifdef CONFIG_X86_64 - u8 *insn = p->ainsn.insn; - s64 disp; - int need_modrm; + struct insn insn; + kernel_insn_init(&insn, p->ainsn.insn); - /* Skip legacy instruction prefixes. */ - while (1) { - switch (*insn) { - case 0x66: - case 0x67: - case 0x2e: - case 0x3e: - case 0x26: - case 0x64: - case 0x65: - case 0x36: - case 0xf0: - case 0xf3: - case 0xf2: - ++insn; - continue; - } - break; - } - - /* Skip REX instruction prefix. */ - if (is_REX_prefix(insn)) - ++insn; - - if (*insn == 0x0f) { - /* Two-byte opcode. */ - ++insn; - need_modrm = test_bit(*insn, - (unsigned long *)twobyte_has_modrm); - } else - /* One-byte opcode. */ - need_modrm = test_bit(*insn, - (unsigned long *)onebyte_has_modrm); - - if (need_modrm) { - u8 modrm = *++insn; - if ((modrm & 0xc7) == 0x05) { - /* %rip+disp32 addressing mode */ - /* Displacement follows ModRM byte. */ - ++insn; - /* - * The copied instruction uses the %rip-relative - * addressing mode. Adjust the displacement for the - * difference between the original location of this - * instruction and the location of the copy that will - * actually be run. The tricky bit here is making sure - * that the sign extension happens correctly in this - * calculation, since we need a signed 32-bit result to - * be sign-extended to 64 bits when it's added to the - * %rip value and yield the same 64-bit result that the - * sign-extension of the original signed 32-bit - * displacement would have given. - */ - disp = (u8 *) p->addr + *((s32 *) insn) - - (u8 *) p->ainsn.insn; - BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ - *(s32 *)insn = (s32) disp; - } + if (insn_rip_relative(&insn)) { + s64 newdisp; + u8 *disp; + insn_get_displacement(&insn); + /* + * The copied instruction uses the %rip-relative addressing + * mode. Adjust the displacement for the difference between + * the original location of this instruction and the location + * of the copy that will actually be run. The tricky bit here + * is making sure that the sign extension happens correctly in + * this calculation, since we need a signed 32-bit result to + * be sign-extended to 64 bits when it's added to the %rip + * value and yield the same 64-bit result that the sign- + * extension of the original signed 32-bit displacement would + * have given. + */ + newdisp = (u8 *) p->addr + (s64) insn.displacement.value - + (u8 *) p->ainsn.insn; + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); + *(s32 *) disp = (s32) newdisp; } #endif } From b1cf540f0e5278ecfe8532557e547d833ed269d7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:44 -0400 Subject: [PATCH 031/470] x86: Add pt_regs register and stack access APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add following APIs for accessing registers and stack entries from pt_regs. These APIs are required by kprobes-based event tracer on ftrace. Some other debugging tools might be able to use it too. - regs_query_register_offset(const char *name) Query the offset of "name" register. - regs_query_register_name(unsigned int offset) Query the name of register by its offset. - regs_get_register(struct pt_regs *regs, unsigned int offset) Get the value of a register by its offset. - regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) Check the address is in the kernel stack. - regs_get_kernel_stack_nth(struct pt_regs *reg, unsigned int nth) Get Nth entry of the kernel stack. (N >= 0) - regs_get_argument_nth(struct pt_regs *reg, unsigned int nth) Get Nth argument at function call. (N >= 0) Signed-off-by: Masami Hiramatsu Cc: linux-arch@vger.kernel.org Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203444.31965.26374.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/ptrace.h | 62 +++++++++++++++++++ arch/x86/kernel/ptrace.c | 112 ++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 0f0d908349a..a3d49dd7d26 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -7,6 +7,7 @@ #ifdef __KERNEL__ #include +#include #endif #ifndef __ASSEMBLY__ @@ -216,6 +217,67 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) return regs->sp; } +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register whose offset from @regs + * is @offset. The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + return *(unsigned long *)((unsigned long)regs + offset); +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kenel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static inline int regs_within_kernel_stack(struct pt_regs *regs, + unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +/* Get Nth argument at function call */ +extern unsigned long regs_get_argument_nth(struct pt_regs *regs, + unsigned int n); + /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8d7d5c9c1be..a33a17d5d5c 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -49,6 +49,118 @@ enum x86_regset { REGSET_IOPERM32, }; +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { +#ifdef CONFIG_X86_64 + REG_OFFSET_NAME(r15), + REG_OFFSET_NAME(r14), + REG_OFFSET_NAME(r13), + REG_OFFSET_NAME(r12), + REG_OFFSET_NAME(r11), + REG_OFFSET_NAME(r10), + REG_OFFSET_NAME(r9), + REG_OFFSET_NAME(r8), +#endif + REG_OFFSET_NAME(bx), + REG_OFFSET_NAME(cx), + REG_OFFSET_NAME(dx), + REG_OFFSET_NAME(si), + REG_OFFSET_NAME(di), + REG_OFFSET_NAME(bp), + REG_OFFSET_NAME(ax), +#ifdef CONFIG_X86_32 + REG_OFFSET_NAME(ds), + REG_OFFSET_NAME(es), + REG_OFFSET_NAME(fs), + REG_OFFSET_NAME(gs), +#endif + REG_OFFSET_NAME(orig_ax), + REG_OFFSET_NAME(ip), + REG_OFFSET_NAME(cs), + REG_OFFSET_NAME(flags), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(ss), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} + +static const int arg_offs_table[] = { +#ifdef CONFIG_X86_32 + [0] = offsetof(struct pt_regs, ax), + [1] = offsetof(struct pt_regs, dx), + [2] = offsetof(struct pt_regs, cx) +#else /* CONFIG_X86_64 */ + [0] = offsetof(struct pt_regs, di), + [1] = offsetof(struct pt_regs, si), + [2] = offsetof(struct pt_regs, dx), + [3] = offsetof(struct pt_regs, cx), + [4] = offsetof(struct pt_regs, r8), + [5] = offsetof(struct pt_regs, r9) +#endif +}; + +/** + * regs_get_argument_nth() - get Nth argument at function call + * @regs: pt_regs which contains registers at function entry. + * @n: argument number. + * + * regs_get_argument_nth() returns @n th argument of a function call. + * Since usually the kernel stack will be changed right after function entry, + * you must use this at function entry. If the @n th entry is NOT in the + * kernel stack or pt_regs, this returns 0. + */ +unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) +{ + if (n < ARRAY_SIZE(arg_offs_table)) + return *((unsigned long *)regs + arg_offs_table[n]); + else { + /* + * The typical case: arg n is on the stack. + * (Note: stack[0] = return address, so skip it) + */ + n -= ARRAY_SIZE(arg_offs_table); + return regs_get_kernel_stack_nth(regs, 1 + n); + } +} + /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. From bd1a5c849bdcc5c89e4a6a18216cd2b9a7a8a78f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:53 -0400 Subject: [PATCH 032/470] tracing: Ftrace dynamic ftrace_event_call support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add dynamic ftrace_event_call support to ftrace. Trace engines can add new ftrace_event_call to ftrace on the fly. Each operator function of the call takes an ftrace_event_call data structure as an argument, because these functions may be shared among several ftrace_event_calls. Changes from v13: - Define remove_subsystem_dir() always (revirt a2ca5e03), because trace_remove_event_call() uses it. - Modify syscall tracer because of ftrace_event_call change. [fweisbec@gmail.com: Fixed conflict against latest tracing/core] Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203453.31965.71901.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- include/linux/ftrace_event.h | 19 +++--- include/linux/syscalls.h | 4 +- include/trace/ftrace.h | 16 ++--- include/trace/syscall.h | 11 ++-- kernel/trace/trace_events.c | 121 +++++++++++++++++++++++----------- kernel/trace/trace_export.c | 18 ++--- kernel/trace/trace_syscalls.c | 20 +++--- 7 files changed, 130 insertions(+), 79 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ace2da9e0a0..1ab3089b5c5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -112,12 +112,12 @@ struct ftrace_event_call { struct dentry *dir; struct trace_event *event; int enabled; - int (*regfunc)(void *); - void (*unregfunc)(void *); + int (*regfunc)(struct ftrace_event_call *); + void (*unregfunc)(struct ftrace_event_call *); int id; - int (*raw_init)(void); - int (*show_format)(struct ftrace_event_call *call, - struct trace_seq *s); + int (*raw_init)(struct ftrace_event_call *); + int (*show_format)(struct ftrace_event_call *, + struct trace_seq *); int (*define_fields)(struct ftrace_event_call *); struct list_head fields; int filter_active; @@ -147,11 +147,12 @@ enum { FILTER_PTR_STRING, }; -extern int trace_define_field(struct ftrace_event_call *call, - const char *type, const char *name, - int offset, int size, int is_signed, - int filter_type); extern int trace_define_common_fields(struct ftrace_event_call *call); +extern int trace_define_field(struct ftrace_event_call *call, char *type, + char *name, int offset, int size, int is_signed, + int filter_type); +extern int trace_add_event_call(struct ftrace_event_call *call); +extern void trace_remove_event_call(struct ftrace_event_call *call); #define is_signed_type(type) (((type)(-1)) < 0) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index f124c899555..646102eeff9 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -165,7 +165,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ - static int init_enter_##sname(void) \ + static int init_enter_##sname(struct ftrace_event_call *call) \ { \ int num, id; \ num = syscall_name_to_nr("sys"#sname); \ @@ -202,7 +202,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ - static int init_exit_##sname(void) \ + static int init_exit_##sname(struct ftrace_event_call *call) \ { \ int num, id; \ num = syscall_name_to_nr("sys"#sname); \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 360a77ad79e..f2bd7a8f8e8 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -434,7 +434,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ * event_trace_printk(_RET_IP_, ": " ); * } * - * static int ftrace_reg_event_(void) + * static int ftrace_reg_event_(struct ftrace_event_call *unused) * { * int ret; * @@ -445,7 +445,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ * return ret; * } * - * static void ftrace_unreg_event_(void) + * static void ftrace_unreg_event_(struct ftrace_event_call *unused) * { * unregister_trace_(ftrace_event_); * } @@ -478,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ * trace_current_buffer_unlock_commit(event, irq_flags, pc); * } * - * static int ftrace_raw_reg_event_(void) + * static int ftrace_raw_reg_event_(struct ftrace_event_call *unused) * { * int ret; * @@ -489,7 +489,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ * return ret; * } * - * static void ftrace_unreg_event_(void) + * static void ftrace_unreg_event_(struct ftrace_event_call *unused) * { * unregister_trace_(ftrace_raw_event_); * } @@ -498,7 +498,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ * .trace = ftrace_raw_output_, <-- stage 2 * }; * - * static int ftrace_raw_init_event_(void) + * static int ftrace_raw_init_event_(struct ftrace_event_call *unused) * { * int id; * @@ -592,7 +592,7 @@ static void ftrace_raw_event_##call(proto) \ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ } \ \ -static int ftrace_raw_reg_event_##call(void *ptr) \ +static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ { \ int ret; \ \ @@ -603,7 +603,7 @@ static int ftrace_raw_reg_event_##call(void *ptr) \ return ret; \ } \ \ -static void ftrace_raw_unreg_event_##call(void *ptr) \ +static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ { \ unregister_trace_##call(ftrace_raw_event_##call); \ } \ @@ -612,7 +612,7 @@ static struct trace_event ftrace_event_type_##call = { \ .trace = ftrace_raw_output_##call, \ }; \ \ -static int ftrace_raw_init_event_##call(void) \ +static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ { \ int id; \ \ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 5dc283ba5ae..e290b86f616 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -39,16 +39,19 @@ void set_syscall_enter_id(int num, int id); void set_syscall_exit_id(int num, int id); extern struct trace_event event_syscall_enter; extern struct trace_event event_syscall_exit; -extern int reg_event_syscall_enter(void *ptr); -extern void unreg_event_syscall_enter(void *ptr); -extern int reg_event_syscall_exit(void *ptr); -extern void unreg_event_syscall_exit(void *ptr); + extern int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s); extern int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s); extern int syscall_enter_define_fields(struct ftrace_event_call *call); extern int syscall_exit_define_fields(struct ftrace_event_call *call); +extern int reg_event_syscall_enter(struct ftrace_event_call *call); +extern void unreg_event_syscall_enter(struct ftrace_event_call *call); +extern int reg_event_syscall_exit(struct ftrace_event_call *call); +extern void unreg_event_syscall_exit(struct ftrace_event_call *call); +extern int +ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d33bcdeffe6..8079bb511c4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -27,8 +27,8 @@ DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); -int trace_define_field(struct ftrace_event_call *call, const char *type, - const char *name, int offset, int size, int is_signed, +int trace_define_field(struct ftrace_event_call *call, char *type, + char *name, int offset, int size, int is_signed, int filter_type) { struct ftrace_event_field *field; @@ -92,9 +92,7 @@ int trace_define_common_fields(struct ftrace_event_call *call) } EXPORT_SYMBOL_GPL(trace_define_common_fields); -#ifdef CONFIG_MODULES - -static void trace_destroy_fields(struct ftrace_event_call *call) +void trace_destroy_fields(struct ftrace_event_call *call) { struct ftrace_event_field *field, *next; @@ -106,8 +104,6 @@ static void trace_destroy_fields(struct ftrace_event_call *call) } } -#endif /* CONFIG_MODULES */ - static void ftrace_event_enable_disable(struct ftrace_event_call *call, int enable) { @@ -116,14 +112,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, if (call->enabled) { call->enabled = 0; tracing_stop_cmdline_record(); - call->unregfunc(call->data); + call->unregfunc(call); } break; case 1: if (!call->enabled) { call->enabled = 1; tracing_start_cmdline_record(); - call->regfunc(call->data); + call->regfunc(call); } break; } @@ -991,27 +987,43 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return 0; } -#define for_each_event(event, start, end) \ - for (event = start; \ - (unsigned long)event < (unsigned long)end; \ - event++) +static int __trace_add_event_call(struct ftrace_event_call *call) +{ + struct dentry *d_events; + int ret; -#ifdef CONFIG_MODULES + if (!call->name) + return -EINVAL; -static LIST_HEAD(ftrace_module_file_list); + if (call->raw_init) { + ret = call->raw_init(call); + if (ret < 0) { + if (ret != -ENOSYS) + pr_warning("Could not initialize trace " + "events/%s\n", call->name); + return ret; + } + } -/* - * Modules must own their file_operations to keep up with - * reference counting. - */ -struct ftrace_module_file_ops { - struct list_head list; - struct module *mod; - struct file_operations id; - struct file_operations enable; - struct file_operations format; - struct file_operations filter; -}; + d_events = event_trace_events_dir(); + if (!d_events) + return -ENOENT; + + list_add(&call->list, &ftrace_events); + return event_create_dir(call, d_events, &ftrace_event_id_fops, + &ftrace_enable_fops, &ftrace_event_filter_fops, + &ftrace_event_format_fops); +} + +/* Add an additional event_call dynamically */ +int trace_add_event_call(struct ftrace_event_call *call) +{ + int ret; + mutex_lock(&event_mutex); + ret = __trace_add_event_call(call); + mutex_unlock(&event_mutex); + return ret; +} static void remove_subsystem_dir(const char *name) { @@ -1039,6 +1051,48 @@ static void remove_subsystem_dir(const char *name) } } +static void __trace_remove_event_call(struct ftrace_event_call *call) +{ + ftrace_event_enable_disable(call, 0); + if (call->event) + __unregister_ftrace_event(call->event); + debugfs_remove_recursive(call->dir); + list_del(&call->list); + trace_destroy_fields(call); + destroy_preds(call); + remove_subsystem_dir(call->system); +} + +/* Remove an event_call */ +void trace_remove_event_call(struct ftrace_event_call *call) +{ + mutex_lock(&event_mutex); + __trace_remove_event_call(call); + mutex_unlock(&event_mutex); +} + +#define for_each_event(event, start, end) \ + for (event = start; \ + (unsigned long)event < (unsigned long)end; \ + event++) + +#ifdef CONFIG_MODULES + +static LIST_HEAD(ftrace_module_file_list); + +/* + * Modules must own their file_operations to keep up with + * reference counting. + */ +struct ftrace_module_file_ops { + struct list_head list; + struct module *mod; + struct file_operations id; + struct file_operations enable; + struct file_operations format; + struct file_operations filter; +}; + static struct ftrace_module_file_ops * trace_create_file_ops(struct module *mod) { @@ -1096,7 +1150,7 @@ static void trace_module_add_events(struct module *mod) if (!call->name) continue; if (call->raw_init) { - ret = call->raw_init(); + ret = call->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) pr_warning("Could not initialize trace " @@ -1131,14 +1185,7 @@ static void trace_module_remove_events(struct module *mod) list_for_each_entry_safe(call, p, &ftrace_events, list) { if (call->mod == mod) { found = true; - ftrace_event_enable_disable(call, 0); - if (call->event) - __unregister_ftrace_event(call->event); - debugfs_remove_recursive(call->dir); - list_del(&call->list); - trace_destroy_fields(call); - destroy_preds(call); - remove_subsystem_dir(call->system); + __trace_remove_event_call(call); } } @@ -1256,7 +1303,7 @@ static __init int event_trace_init(void) if (!call->name) continue; if (call->raw_init) { - ret = call->raw_init(); + ret = call->raw_init(call); if (ret < 0) { if (ret != -ENOSYS) pr_warning("Could not initialize trace " diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 029a91f4228..9cbe7f1930e 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -117,10 +117,16 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \ cmd; +static int ftrace_raw_init_event(struct ftrace_event_call *event_call) +{ + INIT_LIST_HEAD(&event_call->fields); + init_preds(event_call); + return 0; +} + #undef TRACE_EVENT_FORMAT #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ int ftrace_define_fields_##call(struct ftrace_event_call *event_call); \ -static int ftrace_raw_init_event_##call(void); \ \ struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ @@ -128,16 +134,10 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ .id = proto, \ .system = __stringify(TRACE_SYSTEM), \ - .raw_init = ftrace_raw_init_event_##call, \ + .raw_init = ftrace_raw_init_event, \ .show_format = ftrace_format_##call, \ .define_fields = ftrace_define_fields_##call, \ -}; \ -static int ftrace_raw_init_event_##call(void) \ -{ \ - INIT_LIST_HEAD(&event_##call.fields); \ - init_preds(&event_##call); \ - return 0; \ -} \ +}; #undef TRACE_EVENT_FORMAT_NOFILTER #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 85291c4de40..5931933587e 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -193,8 +193,8 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) return ret; for (i = 0; i < meta->nb_args; i++) { - ret = trace_define_field(call, meta->types[i], - meta->args[i], offset, + ret = trace_define_field(call, (char *)meta->types[i], + (char *)meta->args[i], offset, sizeof(unsigned long), 0, FILTER_OTHER); offset += sizeof(unsigned long); @@ -277,13 +277,13 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) trace_current_buffer_unlock_commit(event, 0, 0); } -int reg_event_syscall_enter(void *ptr) +int reg_event_syscall_enter(struct ftrace_event_call *call) { int ret = 0; int num; char *name; - name = (char *)ptr; + name = (char *)call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return -ENOSYS; @@ -301,12 +301,12 @@ int reg_event_syscall_enter(void *ptr) return ret; } -void unreg_event_syscall_enter(void *ptr) +void unreg_event_syscall_enter(struct ftrace_event_call *call) { int num; char *name; - name = (char *)ptr; + name = (char *)call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return; @@ -318,13 +318,13 @@ void unreg_event_syscall_enter(void *ptr) mutex_unlock(&syscall_trace_lock); } -int reg_event_syscall_exit(void *ptr) +int reg_event_syscall_exit(struct ftrace_event_call *call) { int ret = 0; int num; char *name; - name = (char *)ptr; + name = (char *)call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return -ENOSYS; @@ -342,12 +342,12 @@ int reg_event_syscall_exit(void *ptr) return ret; } -void unreg_event_syscall_exit(void *ptr) +void unreg_event_syscall_exit(struct ftrace_event_call *call) { int num; char *name; - name = (char *)ptr; + name = (char *)call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return; From d93f12f3f417e49a175800da85c6fcb2a5096e03 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:01 -0400 Subject: [PATCH 033/470] tracing: Introduce TRACE_FIELD_ZERO() macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use TRACE_FIELD_ZERO(type, item) instead of TRACE_FIELD_ZERO_CHAR(item). This also includes a typo fix of TRACE_ZERO_CHAR() macro. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203501.31965.30172.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_event_types.h | 4 ++-- kernel/trace/trace_export.c | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 6db005e1248..e74f0906ab1 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h @@ -109,7 +109,7 @@ TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ip, ip) TRACE_FIELD(char *, fmt, fmt) - TRACE_FIELD_ZERO_CHAR(buf) + TRACE_FIELD_ZERO(char, buf) ), TP_RAW_FMT("%08lx (%d) fmt:%p %s") ); @@ -117,7 +117,7 @@ TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ip, ip) - TRACE_FIELD_ZERO_CHAR(buf) + TRACE_FIELD_ZERO(char, buf) ), TP_RAW_FMT("%08lx (%d) fmt:%p %s") ); diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 9cbe7f1930e..f75faeccf68 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -42,9 +42,9 @@ extern void __bad_type_size(void); if (!ret) \ return 0; -#undef TRACE_FIELD_ZERO_CHAR -#define TRACE_FIELD_ZERO_CHAR(item) \ - ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \ +#undef TRACE_FIELD_ZERO +#define TRACE_FIELD_ZERO(type, item) \ + ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ "offset:%u;\tsize:0;\n", \ (unsigned int)offsetof(typeof(field), item)); \ if (!ret) \ @@ -92,9 +92,6 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #include "trace_event_types.h" -#undef TRACE_ZERO_CHAR -#define TRACE_ZERO_CHAR(arg) - #undef TRACE_FIELD #define TRACE_FIELD(type, item, assign)\ entry->item = assign; @@ -107,6 +104,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ TRACE_FIELD(type, item, assign) +#undef TRACE_FIELD_ZERO +#define TRACE_FIELD_ZERO(type, item) + #undef TP_CMD #define TP_CMD(cmd...) cmd @@ -180,8 +180,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ if (ret) \ return ret; -#undef TRACE_FIELD_ZERO_CHAR -#define TRACE_FIELD_ZERO_CHAR(item) +#undef TRACE_FIELD_ZERO +#define TRACE_FIELD_ZERO(type, item) #undef TRACE_EVENT_FORMAT #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ From 413d37d1eb69c1765b9ace0a612dac9b6c990e66 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:11 -0400 Subject: [PATCH 034/470] tracing: Add kprobe-based event tracer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add kprobes-based event tracer on ftrace. This tracer is similar to the events tracer which is based on Tracepoint infrastructure. Instead of Tracepoint, this tracer is based on kprobes (kprobe and kretprobe). It probes anywhere where kprobes can probe(this means, all functions body except for __kprobes functions). Similar to the events tracer, this tracer doesn't need to be activated via current_tracer, instead of that, just set probe points via /sys/kernel/debug/tracing/kprobe_events. And you can set filters on each probe events via /sys/kernel/debug/tracing/events/kprobes//filter. This tracer supports following probe arguments for each probe. %REG : Fetch register REG sN : Fetch Nth entry of stack (N >= 0) sa : Fetch stack address. @ADDR : Fetch memory at ADDR (ADDR should be in kernel) @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) aN : Fetch function argument. (N >= 0) rv : Fetch return value. ra : Fetch return address. +|-offs(FETCHARG) : fetch memory at FETCHARG +|- offs address. See Documentation/trace/kprobetrace.txt in the next patch for details. Changes from v13: - Support 'sa' for stack address. - Use call->data instead of container_of() macro. [fweisbec@gmail.com: Fixed conflict against latest tracing/core] Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203510.31965.29123.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/trace/Kconfig | 12 + kernel/trace/Makefile | 1 + kernel/trace/trace.h | 30 + kernel/trace/trace_event_types.h | 18 + kernel/trace/trace_kprobe.c | 1202 ++++++++++++++++++++++++++++++ 5 files changed, 1263 insertions(+) create mode 100644 kernel/trace/trace_kprobe.c diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 06be85a7ef8..fb5fbf75f27 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -411,6 +411,18 @@ config BLK_DEV_IO_TRACE If unsure, say N. +config KPROBE_TRACER + depends on KPROBES + depends on X86 + bool "Trace kprobes" + select TRACING + select GENERIC_TRACER + help + This tracer probes everywhere where kprobes can probe it, and + records various registers and memories specified by user. + This also allows you to trace kprobe probe points as a dynamic + defined events. It provides per-probe event filtering interface. + config DYNAMIC_FTRACE bool "enable/disable ftrace tracepoints dynamically" depends on FUNCTION_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90..7c00a1ec149 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -54,5 +54,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o +obj-$(CONFIG_KPROBE_TRACER) += trace_kprobe.o libftrace-y := ftrace.o diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 654fd657bd0..667f832d16b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -38,6 +38,8 @@ enum trace_type { TRACE_KMEM_FREE, TRACE_POWER, TRACE_BLK, + TRACE_KPROBE, + TRACE_KRETPROBE, __TRACE_LAST_TYPE, }; @@ -205,6 +207,30 @@ struct syscall_trace_exit { unsigned long ret; }; +struct kprobe_trace_entry { + struct trace_entry ent; + unsigned long ip; + int nargs; + unsigned long args[]; +}; + +#define SIZEOF_KPROBE_TRACE_ENTRY(n) \ + (offsetof(struct kprobe_trace_entry, args) + \ + (sizeof(unsigned long) * (n))) + +struct kretprobe_trace_entry { + struct trace_entry ent; + unsigned long func; + unsigned long ret_ip; + int nargs; + unsigned long args[]; +}; + +#define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ + (offsetof(struct kretprobe_trace_entry, args) + \ + (sizeof(unsigned long) * (n))) + + /* * trace_flag_type is an enumeration that holds different @@ -317,6 +343,10 @@ extern void __ftrace_bad_type(void); TRACE_KMEM_ALLOC); \ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ TRACE_KMEM_FREE); \ + IF_ASSIGN(var, ent, struct kprobe_trace_entry, \ + TRACE_KPROBE); \ + IF_ASSIGN(var, ent, struct kretprobe_trace_entry, \ + TRACE_KRETPROBE); \ __ftrace_bad_type(); \ } while (0) diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index e74f0906ab1..186b598a1f1 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h @@ -175,4 +175,22 @@ TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore, TP_RAW_FMT("type:%u call_site:%lx ptr:%p") ); +TRACE_EVENT_FORMAT(kprobe, TRACE_KPROBE, kprobe_trace_entry, ignore, + TRACE_STRUCT( + TRACE_FIELD(unsigned long, ip, ip) + TRACE_FIELD(int, nargs, nargs) + TRACE_FIELD_ZERO(unsigned long, args) + ), + TP_RAW_FMT("%08lx: args:0x%lx ...") +); + +TRACE_EVENT_FORMAT(kretprobe, TRACE_KRETPROBE, kretprobe_trace_entry, ignore, + TRACE_STRUCT( + TRACE_FIELD(unsigned long, func, func) + TRACE_FIELD(unsigned long, ret_ip, ret_ip) + TRACE_FIELD(int, nargs, nargs) + TRACE_FIELD_ZERO(unsigned long, args) + ), + TP_RAW_FMT("%08lx <- %08lx: args:0x%lx ...") +); #undef TRACE_SYSTEM diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c new file mode 100644 index 00000000000..0c4f00aafb9 --- /dev/null +++ b/kernel/trace/trace_kprobe.c @@ -0,0 +1,1202 @@ +/* + * kprobe based kernel tracer + * + * Created by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include "trace_output.h" + +#define TRACE_KPROBE_ARGS 6 +#define MAX_ARGSTR_LEN 63 + +/* currently, trace_kprobe only supports X86. */ + +struct fetch_func { + unsigned long (*func)(struct pt_regs *, void *); + void *data; +}; + +static __kprobes unsigned long call_fetch(struct fetch_func *f, + struct pt_regs *regs) +{ + return f->func(regs, f->data); +} + +/* fetch handlers */ +static __kprobes unsigned long fetch_register(struct pt_regs *regs, + void *offset) +{ + return regs_get_register(regs, (unsigned int)((unsigned long)offset)); +} + +static __kprobes unsigned long fetch_stack(struct pt_regs *regs, + void *num) +{ + return regs_get_kernel_stack_nth(regs, + (unsigned int)((unsigned long)num)); +} + +static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) +{ + unsigned long retval; + + if (probe_kernel_address(addr, retval)) + return 0; + return retval; +} + +static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) +{ + return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); +} + +static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, + void *dummy) +{ + return regs_return_value(regs); +} + +static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy) +{ + return instruction_pointer(regs); +} + +static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, + void *dummy) +{ + return kernel_stack_pointer(regs); +} + +/* Memory fetching by symbol */ +struct symbol_cache { + char *symbol; + long offset; + unsigned long addr; +}; + +static unsigned long update_symbol_cache(struct symbol_cache *sc) +{ + sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); + if (sc->addr) + sc->addr += sc->offset; + return sc->addr; +} + +static void free_symbol_cache(struct symbol_cache *sc) +{ + kfree(sc->symbol); + kfree(sc); +} + +static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) +{ + struct symbol_cache *sc; + + if (!sym || strlen(sym) == 0) + return NULL; + sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); + if (!sc) + return NULL; + + sc->symbol = kstrdup(sym, GFP_KERNEL); + if (!sc->symbol) { + kfree(sc); + return NULL; + } + sc->offset = offset; + + update_symbol_cache(sc); + return sc; +} + +static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) +{ + struct symbol_cache *sc = data; + + if (sc->addr) + return fetch_memory(regs, (void *)sc->addr); + else + return 0; +} + +/* Special indirect memory access interface */ +struct indirect_fetch_data { + struct fetch_func orig; + long offset; +}; + +static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) +{ + struct indirect_fetch_data *ind = data; + unsigned long addr; + + addr = call_fetch(&ind->orig, regs); + if (addr) { + addr += ind->offset; + return fetch_memory(regs, (void *)addr); + } else + return 0; +} + +static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) +{ + if (data->orig.func == fetch_indirect) + free_indirect_fetch_data(data->orig.data); + else if (data->orig.func == fetch_symbol) + free_symbol_cache(data->orig.data); + kfree(data); +} + +/** + * kprobe_trace_core + */ + +struct trace_probe { + struct list_head list; + union { + struct kprobe kp; + struct kretprobe rp; + }; + const char *symbol; /* symbol name */ + unsigned int nr_args; + struct fetch_func args[TRACE_KPROBE_ARGS]; + struct ftrace_event_call call; +}; + +static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs); +static int kretprobe_trace_func(struct kretprobe_instance *ri, + struct pt_regs *regs); + +static __kprobes int probe_is_return(struct trace_probe *tp) +{ + return (tp->rp.handler == kretprobe_trace_func); +} + +static __kprobes const char *probe_symbol(struct trace_probe *tp) +{ + return tp->symbol ? tp->symbol : "unknown"; +} + +static __kprobes long probe_offset(struct trace_probe *tp) +{ + return (probe_is_return(tp)) ? tp->rp.kp.offset : tp->kp.offset; +} + +static __kprobes void *probe_address(struct trace_probe *tp) +{ + return (probe_is_return(tp)) ? tp->rp.kp.addr : tp->kp.addr; +} + +static int trace_arg_string(char *buf, size_t n, struct fetch_func *ff) +{ + int ret = -EINVAL; + + if (ff->func == fetch_argument) + ret = snprintf(buf, n, "a%lu", (unsigned long)ff->data); + else if (ff->func == fetch_register) { + const char *name; + name = regs_query_register_name((unsigned int)((long)ff->data)); + ret = snprintf(buf, n, "%%%s", name); + } else if (ff->func == fetch_stack) + ret = snprintf(buf, n, "s%lu", (unsigned long)ff->data); + else if (ff->func == fetch_memory) + ret = snprintf(buf, n, "@0x%p", ff->data); + else if (ff->func == fetch_symbol) { + struct symbol_cache *sc = ff->data; + ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); + } else if (ff->func == fetch_retvalue) + ret = snprintf(buf, n, "rv"); + else if (ff->func == fetch_ip) + ret = snprintf(buf, n, "ra"); + else if (ff->func == fetch_stack_address) + ret = snprintf(buf, n, "sa"); + else if (ff->func == fetch_indirect) { + struct indirect_fetch_data *id = ff->data; + size_t l = 0; + ret = snprintf(buf, n, "%+ld(", id->offset); + if (ret >= n) + goto end; + l += ret; + ret = trace_arg_string(buf + l, n - l, &id->orig); + if (ret < 0) + goto end; + l += ret; + ret = snprintf(buf + l, n - l, ")"); + ret += l; + } +end: + if (ret >= n) + return -ENOSPC; + return ret; +} + +static int register_probe_event(struct trace_probe *tp); +static void unregister_probe_event(struct trace_probe *tp); + +static DEFINE_MUTEX(probe_lock); +static LIST_HEAD(probe_list); + +static struct trace_probe *alloc_trace_probe(const char *symbol, + const char *event) +{ + struct trace_probe *tp; + + tp = kzalloc(sizeof(struct trace_probe), GFP_KERNEL); + if (!tp) + return ERR_PTR(-ENOMEM); + + if (symbol) { + tp->symbol = kstrdup(symbol, GFP_KERNEL); + if (!tp->symbol) + goto error; + } + if (event) { + tp->call.name = kstrdup(event, GFP_KERNEL); + if (!tp->call.name) + goto error; + } + + INIT_LIST_HEAD(&tp->list); + return tp; +error: + kfree(tp->symbol); + kfree(tp); + return ERR_PTR(-ENOMEM); +} + +static void free_trace_probe(struct trace_probe *tp) +{ + int i; + + for (i = 0; i < tp->nr_args; i++) + if (tp->args[i].func == fetch_symbol) + free_symbol_cache(tp->args[i].data); + else if (tp->args[i].func == fetch_indirect) + free_indirect_fetch_data(tp->args[i].data); + + kfree(tp->call.name); + kfree(tp->symbol); + kfree(tp); +} + +static struct trace_probe *find_probe_event(const char *event) +{ + struct trace_probe *tp; + + list_for_each_entry(tp, &probe_list, list) + if (tp->call.name && !strcmp(tp->call.name, event)) + return tp; + return NULL; +} + +static void __unregister_trace_probe(struct trace_probe *tp) +{ + if (probe_is_return(tp)) + unregister_kretprobe(&tp->rp); + else + unregister_kprobe(&tp->kp); +} + +/* Unregister a trace_probe and probe_event: call with locking probe_lock */ +static void unregister_trace_probe(struct trace_probe *tp) +{ + if (tp->call.name) + unregister_probe_event(tp); + __unregister_trace_probe(tp); + list_del(&tp->list); +} + +/* Register a trace_probe and probe_event */ +static int register_trace_probe(struct trace_probe *tp) +{ + struct trace_probe *old_tp; + int ret; + + mutex_lock(&probe_lock); + + if (probe_is_return(tp)) + ret = register_kretprobe(&tp->rp); + else + ret = register_kprobe(&tp->kp); + + if (ret) { + pr_warning("Could not insert probe(%d)\n", ret); + if (ret == -EILSEQ) { + pr_warning("Probing address(0x%p) is not an " + "instruction boundary.\n", + probe_address(tp)); + ret = -EINVAL; + } + goto end; + } + /* register as an event */ + if (tp->call.name) { + old_tp = find_probe_event(tp->call.name); + if (old_tp) { + /* delete old event */ + unregister_trace_probe(old_tp); + free_trace_probe(old_tp); + } + ret = register_probe_event(tp); + if (ret) { + pr_warning("Faild to register probe event(%d)\n", ret); + __unregister_trace_probe(tp); + } + } + list_add_tail(&tp->list, &probe_list); +end: + mutex_unlock(&probe_lock); + return ret; +} + +/* Split symbol and offset. */ +static int split_symbol_offset(char *symbol, long *offset) +{ + char *tmp; + int ret; + + if (!offset) + return -EINVAL; + + tmp = strchr(symbol, '+'); + if (!tmp) + tmp = strchr(symbol, '-'); + + if (tmp) { + /* skip sign because strict_strtol doesn't accept '+' */ + ret = strict_strtol(tmp + 1, 0, offset); + if (ret) + return ret; + if (*tmp == '-') + *offset = -(*offset); + *tmp = '\0'; + } else + *offset = 0; + return 0; +} + +#define PARAM_MAX_ARGS 16 +#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) + +static int parse_trace_arg(char *arg, struct fetch_func *ff, int is_return) +{ + int ret = 0; + unsigned long param; + long offset; + char *tmp; + + switch (arg[0]) { + case 'a': /* argument */ + ret = strict_strtoul(arg + 1, 10, ¶m); + if (ret || param > PARAM_MAX_ARGS) + ret = -EINVAL; + else { + ff->func = fetch_argument; + ff->data = (void *)param; + } + break; + case 'r': /* retval or retaddr */ + if (is_return && arg[1] == 'v') { + ff->func = fetch_retvalue; + ff->data = NULL; + } else if (is_return && arg[1] == 'a') { + ff->func = fetch_ip; + ff->data = NULL; + } else + ret = -EINVAL; + break; + case '%': /* named register */ + ret = regs_query_register_offset(arg + 1); + if (ret >= 0) { + ff->func = fetch_register; + ff->data = (void *)(unsigned long)ret; + ret = 0; + } + break; + case 's': /* stack */ + if (arg[1] == 'a') { + ff->func = fetch_stack_address; + ff->data = NULL; + } else { + ret = strict_strtoul(arg + 1, 10, ¶m); + if (ret || param > PARAM_MAX_STACK) + ret = -EINVAL; + else { + ff->func = fetch_stack; + ff->data = (void *)param; + } + } + break; + case '@': /* memory or symbol */ + if (isdigit(arg[1])) { + ret = strict_strtoul(arg + 1, 0, ¶m); + if (ret) + break; + ff->func = fetch_memory; + ff->data = (void *)param; + } else { + ret = split_symbol_offset(arg + 1, &offset); + if (ret) + break; + ff->data = alloc_symbol_cache(arg + 1, + offset); + if (ff->data) + ff->func = fetch_symbol; + else + ret = -EINVAL; + } + break; + case '+': /* indirect memory */ + case '-': + tmp = strchr(arg, '('); + if (!tmp) { + ret = -EINVAL; + break; + } + *tmp = '\0'; + ret = strict_strtol(arg + 1, 0, &offset); + if (ret) + break; + if (arg[0] == '-') + offset = -offset; + arg = tmp + 1; + tmp = strrchr(arg, ')'); + if (tmp) { + struct indirect_fetch_data *id; + *tmp = '\0'; + id = kzalloc(sizeof(struct indirect_fetch_data), + GFP_KERNEL); + if (!id) + return -ENOMEM; + id->offset = offset; + ret = parse_trace_arg(arg, &id->orig, is_return); + if (ret) + kfree(id); + else { + ff->func = fetch_indirect; + ff->data = (void *)id; + } + } else + ret = -EINVAL; + break; + default: + /* TODO: support custom handler */ + ret = -EINVAL; + } + return ret; +} + +static int create_trace_probe(int argc, char **argv) +{ + /* + * Argument syntax: + * - Add kprobe: p[:EVENT] SYMBOL[+OFFS|-OFFS]|ADDRESS [FETCHARGS] + * - Add kretprobe: r[:EVENT] SYMBOL[+0] [FETCHARGS] + * Fetch args: + * aN : fetch Nth of function argument. (N:0-) + * rv : fetch return value + * ra : fetch return address + * sa : fetch stack address + * sN : fetch Nth of stack (N:0-) + * @ADDR : fetch memory at ADDR (ADDR should be in kernel) + * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) + * %REG : fetch register REG + * Indirect memory fetch: + * +|-offs(ARG) : fetch memory at ARG +|- offs address. + */ + struct trace_probe *tp; + struct kprobe *kp; + int i, ret = 0; + int is_return = 0; + char *symbol = NULL, *event = NULL; + long offset = 0; + void *addr = NULL; + + if (argc < 2) + return -EINVAL; + + if (argv[0][0] == 'p') + is_return = 0; + else if (argv[0][0] == 'r') + is_return = 1; + else + return -EINVAL; + + if (argv[0][1] == ':') { + event = &argv[0][2]; + if (strlen(event) == 0) { + pr_info("Event name is not specifiled\n"); + return -EINVAL; + } + } + + if (isdigit(argv[1][0])) { + if (is_return) + return -EINVAL; + /* an address specified */ + ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); + if (ret) + return ret; + } else { + /* a symbol specified */ + symbol = argv[1]; + /* TODO: support .init module functions */ + ret = split_symbol_offset(symbol, &offset); + if (ret) + return ret; + if (offset && is_return) + return -EINVAL; + } + + /* setup a probe */ + tp = alloc_trace_probe(symbol, event); + if (IS_ERR(tp)) + return PTR_ERR(tp); + + if (is_return) { + kp = &tp->rp.kp; + tp->rp.handler = kretprobe_trace_func; + } else { + kp = &tp->kp; + tp->kp.pre_handler = kprobe_trace_func; + } + + if (tp->symbol) { + kp->symbol_name = tp->symbol; + kp->offset = offset; + } else + kp->addr = addr; + + /* parse arguments */ + argc -= 2; argv += 2; ret = 0; + for (i = 0; i < argc && i < TRACE_KPROBE_ARGS; i++) { + if (strlen(argv[i]) > MAX_ARGSTR_LEN) { + pr_info("Argument%d(%s) is too long.\n", i, argv[i]); + ret = -ENOSPC; + goto error; + } + ret = parse_trace_arg(argv[i], &tp->args[i], is_return); + if (ret) + goto error; + } + tp->nr_args = i; + + ret = register_trace_probe(tp); + if (ret) + goto error; + return 0; + +error: + free_trace_probe(tp); + return ret; +} + +static void cleanup_all_probes(void) +{ + struct trace_probe *tp; + + mutex_lock(&probe_lock); + /* TODO: Use batch unregistration */ + while (!list_empty(&probe_list)) { + tp = list_entry(probe_list.next, struct trace_probe, list); + unregister_trace_probe(tp); + free_trace_probe(tp); + } + mutex_unlock(&probe_lock); +} + + +/* Probes listing interfaces */ +static void *probes_seq_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&probe_lock); + return seq_list_start(&probe_list, *pos); +} + +static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + return seq_list_next(v, &probe_list, pos); +} + +static void probes_seq_stop(struct seq_file *m, void *v) +{ + mutex_unlock(&probe_lock); +} + +static int probes_seq_show(struct seq_file *m, void *v) +{ + struct trace_probe *tp = v; + int i, ret; + char buf[MAX_ARGSTR_LEN + 1]; + + seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); + if (tp->call.name) + seq_printf(m, ":%s", tp->call.name); + + if (tp->symbol) + seq_printf(m, " %s%+ld", probe_symbol(tp), probe_offset(tp)); + else + seq_printf(m, " 0x%p", probe_address(tp)); + + for (i = 0; i < tp->nr_args; i++) { + ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + if (ret < 0) { + pr_warning("Argument%d decoding error(%d).\n", i, ret); + return ret; + } + seq_printf(m, " %s", buf); + } + seq_printf(m, "\n"); + return 0; +} + +static const struct seq_operations probes_seq_op = { + .start = probes_seq_start, + .next = probes_seq_next, + .stop = probes_seq_stop, + .show = probes_seq_show +}; + +static int probes_open(struct inode *inode, struct file *file) +{ + if ((file->f_mode & FMODE_WRITE) && + (file->f_flags & O_TRUNC)) + cleanup_all_probes(); + + return seq_open(file, &probes_seq_op); +} + +static int command_trace_probe(const char *buf) +{ + char **argv; + int argc = 0, ret = 0; + + argv = argv_split(GFP_KERNEL, buf, &argc); + if (!argv) + return -ENOMEM; + + if (argc) + ret = create_trace_probe(argc, argv); + + argv_free(argv); + return ret; +} + +#define WRITE_BUFSIZE 128 + +static ssize_t probes_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + char *kbuf, *tmp; + int ret; + size_t done; + size_t size; + + kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + ret = done = 0; + while (done < count) { + size = count - done; + if (size >= WRITE_BUFSIZE) + size = WRITE_BUFSIZE - 1; + if (copy_from_user(kbuf, buffer + done, size)) { + ret = -EFAULT; + goto out; + } + kbuf[size] = '\0'; + tmp = strchr(kbuf, '\n'); + if (tmp) { + *tmp = '\0'; + size = tmp - kbuf + 1; + } else if (done + size < count) { + pr_warning("Line length is too long: " + "Should be less than %d.", WRITE_BUFSIZE); + ret = -EINVAL; + goto out; + } + done += size; + /* Remove comments */ + tmp = strchr(kbuf, '#'); + if (tmp) + *tmp = '\0'; + + ret = command_trace_probe(kbuf); + if (ret) + goto out; + } + ret = done; +out: + kfree(kbuf); + return ret; +} + +static const struct file_operations kprobe_events_ops = { + .owner = THIS_MODULE, + .open = probes_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = probes_write, +}; + +/* Kprobe handler */ +static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(kp, struct trace_probe, kp); + struct kprobe_trace_entry *entry; + struct ring_buffer_event *event; + int size, i, pc; + unsigned long irq_flags; + struct ftrace_event_call *call = &event_kprobe; + + if (&tp->call.name) + call = &tp->call; + + local_save_flags(irq_flags); + pc = preempt_count(); + + size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); + + event = trace_current_buffer_lock_reserve(TRACE_KPROBE, size, + irq_flags, pc); + if (!event) + return 0; + + entry = ring_buffer_event_data(event); + entry->nargs = tp->nr_args; + entry->ip = (unsigned long)kp->addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i], regs); + + if (!filter_current_check_discard(call, entry, event)) + trace_nowake_buffer_unlock_commit(event, irq_flags, pc); + return 0; +} + +/* Kretprobe handler */ +static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); + struct kretprobe_trace_entry *entry; + struct ring_buffer_event *event; + int size, i, pc; + unsigned long irq_flags; + struct ftrace_event_call *call = &event_kretprobe; + + if (&tp->call.name) + call = &tp->call; + + local_save_flags(irq_flags); + pc = preempt_count(); + + size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); + + event = trace_current_buffer_lock_reserve(TRACE_KRETPROBE, size, + irq_flags, pc); + if (!event) + return 0; + + entry = ring_buffer_event_data(event); + entry->nargs = tp->nr_args; + entry->func = (unsigned long)probe_address(tp); + entry->ret_ip = (unsigned long)ri->ret_addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i], regs); + + if (!filter_current_check_discard(call, entry, event)) + trace_nowake_buffer_unlock_commit(event, irq_flags, pc); + + return 0; +} + +/* Event entry printers */ +enum print_line_t +print_kprobe_event(struct trace_iterator *iter, int flags) +{ + struct kprobe_trace_entry *field; + struct trace_seq *s = &iter->seq; + int i; + + trace_assign_type(field, iter->ent); + + if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) + goto partial; + + if (!trace_seq_puts(s, ":")) + goto partial; + + for (i = 0; i < field->nargs; i++) + if (!trace_seq_printf(s, " 0x%lx", field->args[i])) + goto partial; + + if (!trace_seq_puts(s, "\n")) + goto partial; + + return TRACE_TYPE_HANDLED; +partial: + return TRACE_TYPE_PARTIAL_LINE; +} + +enum print_line_t +print_kretprobe_event(struct trace_iterator *iter, int flags) +{ + struct kretprobe_trace_entry *field; + struct trace_seq *s = &iter->seq; + int i; + + trace_assign_type(field, iter->ent); + + if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) + goto partial; + + if (!trace_seq_puts(s, " <- ")) + goto partial; + + if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) + goto partial; + + if (!trace_seq_puts(s, ":")) + goto partial; + + for (i = 0; i < field->nargs; i++) + if (!trace_seq_printf(s, " 0x%lx", field->args[i])) + goto partial; + + if (!trace_seq_puts(s, "\n")) + goto partial; + + return TRACE_TYPE_HANDLED; +partial: + return TRACE_TYPE_PARTIAL_LINE; +} + +static struct trace_event kprobe_trace_event = { + .type = TRACE_KPROBE, + .trace = print_kprobe_event, +}; + +static struct trace_event kretprobe_trace_event = { + .type = TRACE_KRETPROBE, + .trace = print_kretprobe_event, +}; + +static int probe_event_enable(struct ftrace_event_call *call) +{ + struct trace_probe *tp = (struct trace_probe *)call->data; + + if (probe_is_return(tp)) + return enable_kretprobe(&tp->rp); + else + return enable_kprobe(&tp->kp); +} + +static void probe_event_disable(struct ftrace_event_call *call) +{ + struct trace_probe *tp = (struct trace_probe *)call->data; + + if (probe_is_return(tp)) + disable_kretprobe(&tp->rp); + else + disable_kprobe(&tp->kp); +} + +static int probe_event_raw_init(struct ftrace_event_call *event_call) +{ + INIT_LIST_HEAD(&event_call->fields); + init_preds(event_call); + return 0; +} + +#undef DEFINE_FIELD +#define DEFINE_FIELD(type, item, name, is_signed) \ + do { \ + ret = trace_define_field(event_call, #type, name, \ + offsetof(typeof(field), item), \ + sizeof(field.item), is_signed, \ + FILTER_OTHER); \ + if (ret) \ + return ret; \ + } while (0) + +static int kprobe_event_define_fields(struct ftrace_event_call *event_call) +{ + int ret, i; + struct kprobe_trace_entry field; + char buf[MAX_ARGSTR_LEN + 1]; + struct trace_probe *tp = (struct trace_probe *)event_call->data; + + ret = trace_define_common_fields(event_call); + if (!ret) + return ret; + + DEFINE_FIELD(unsigned long, ip, "ip", 0); + DEFINE_FIELD(int, nargs, "nargs", 1); + for (i = 0; i < tp->nr_args; i++) { + /* Set argN as a field */ + sprintf(buf, "arg%d", i); + DEFINE_FIELD(unsigned long, args[i], buf, 0); + /* Set argument string as an alias field */ + ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + if (ret < 0) + return ret; + DEFINE_FIELD(unsigned long, args[i], buf, 0); + } + return 0; +} + +static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) +{ + int ret, i; + struct kretprobe_trace_entry field; + char buf[MAX_ARGSTR_LEN + 1]; + struct trace_probe *tp = (struct trace_probe *)event_call->data; + + ret = trace_define_common_fields(event_call); + if (!ret) + return ret; + + DEFINE_FIELD(unsigned long, func, "func", 0); + DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0); + DEFINE_FIELD(int, nargs, "nargs", 1); + for (i = 0; i < tp->nr_args; i++) { + /* Set argN as a field */ + sprintf(buf, "arg%d", i); + DEFINE_FIELD(unsigned long, args[i], buf, 0); + /* Set argument string as an alias field */ + ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + if (ret < 0) + return ret; + DEFINE_FIELD(unsigned long, args[i], buf, 0); + } + return 0; +} + +static int __probe_event_show_format(struct trace_seq *s, + struct trace_probe *tp, const char *fmt, + const char *arg) +{ + int i, ret; + char buf[MAX_ARGSTR_LEN + 1]; + + /* Show aliases */ + for (i = 0; i < tp->nr_args; i++) { + ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + if (ret < 0) + return ret; + if (!trace_seq_printf(s, "\talias: %s;\toriginal: arg%d;\n", + buf, i)) + return 0; + } + /* Show format */ + if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) + return 0; + + for (i = 0; i < tp->nr_args; i++) + if (!trace_seq_puts(s, " 0x%lx")) + return 0; + + if (!trace_seq_printf(s, "\", %s", arg)) + return 0; + + for (i = 0; i < tp->nr_args; i++) + if (!trace_seq_printf(s, ", arg%d", i)) + return 0; + + return trace_seq_puts(s, "\n"); +} + +#undef SHOW_FIELD +#define SHOW_FIELD(type, item, name) \ + do { \ + ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ + "offset:%u;tsize:%u;\n", name, \ + (unsigned int)offsetof(typeof(field), item),\ + (unsigned int)sizeof(type)); \ + if (!ret) \ + return 0; \ + } while (0) + +static int kprobe_event_show_format(struct ftrace_event_call *call, + struct trace_seq *s) +{ + struct kprobe_trace_entry field __attribute__((unused)); + int ret, i; + char buf[8]; + struct trace_probe *tp = (struct trace_probe *)call->data; + + SHOW_FIELD(unsigned long, ip, "ip"); + SHOW_FIELD(int, nargs, "nargs"); + + /* Show fields */ + for (i = 0; i < tp->nr_args; i++) { + sprintf(buf, "arg%d", i); + SHOW_FIELD(unsigned long, args[i], buf); + } + trace_seq_puts(s, "\n"); + + return __probe_event_show_format(s, tp, "%lx:", "ip"); +} + +static int kretprobe_event_show_format(struct ftrace_event_call *call, + struct trace_seq *s) +{ + struct kretprobe_trace_entry field __attribute__((unused)); + int ret, i; + char buf[8]; + struct trace_probe *tp = (struct trace_probe *)call->data; + + SHOW_FIELD(unsigned long, func, "func"); + SHOW_FIELD(unsigned long, ret_ip, "ret_ip"); + SHOW_FIELD(int, nargs, "nargs"); + + /* Show fields */ + for (i = 0; i < tp->nr_args; i++) { + sprintf(buf, "arg%d", i); + SHOW_FIELD(unsigned long, args[i], buf); + } + trace_seq_puts(s, "\n"); + + return __probe_event_show_format(s, tp, "%lx <- %lx:", + "func, ret_ip"); +} + +static int register_probe_event(struct trace_probe *tp) +{ + struct ftrace_event_call *call = &tp->call; + int ret; + + /* Initialize ftrace_event_call */ + call->system = "kprobes"; + if (probe_is_return(tp)) { + call->event = &kretprobe_trace_event; + call->id = TRACE_KRETPROBE; + call->raw_init = probe_event_raw_init; + call->show_format = kretprobe_event_show_format; + call->define_fields = kretprobe_event_define_fields; + } else { + call->event = &kprobe_trace_event; + call->id = TRACE_KPROBE; + call->raw_init = probe_event_raw_init; + call->show_format = kprobe_event_show_format; + call->define_fields = kprobe_event_define_fields; + } + call->enabled = 1; + call->regfunc = probe_event_enable; + call->unregfunc = probe_event_disable; + call->data = tp; + ret = trace_add_event_call(call); + if (ret) + pr_info("Failed to register kprobe event: %s\n", call->name); + return ret; +} + +static void unregister_probe_event(struct trace_probe *tp) +{ + /* + * Prevent to unregister event itself because the event is shared + * among other probes. + */ + tp->call.event = NULL; + trace_remove_event_call(&tp->call); +} + +/* Make a debugfs interface for controling probe points */ +static __init int init_kprobe_trace(void) +{ + struct dentry *d_tracer; + struct dentry *entry; + int ret; + + ret = register_ftrace_event(&kprobe_trace_event); + if (!ret) { + pr_warning("Could not register kprobe_trace_event type.\n"); + return 0; + } + ret = register_ftrace_event(&kretprobe_trace_event); + if (!ret) { + pr_warning("Could not register kretprobe_trace_event type.\n"); + return 0; + } + + d_tracer = tracing_init_dentry(); + if (!d_tracer) + return 0; + + entry = debugfs_create_file("kprobe_events", 0644, d_tracer, + NULL, &kprobe_events_ops); + + if (!entry) + pr_warning("Could not create debugfs " + "'kprobe_events' entry\n"); + return 0; +} +fs_initcall(init_kprobe_trace); + + +#ifdef CONFIG_FTRACE_STARTUP_TEST + +static int kprobe_trace_selftest_target(int a1, int a2, int a3, + int a4, int a5, int a6) +{ + return a1 + a2 + a3 + a4 + a5 + a6; +} + +static __init int kprobe_trace_self_tests_init(void) +{ + int ret; + int (*target)(int, int, int, int, int, int); + + target = kprobe_trace_selftest_target; + + pr_info("Testing kprobe tracing: "); + + ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " + "a1 a2 a3 a4 a5 a6"); + if (WARN_ON_ONCE(ret)) + pr_warning("error enabling function entry\n"); + + ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " + "ra rv"); + if (WARN_ON_ONCE(ret)) + pr_warning("error enabling function return\n"); + + ret = target(1, 2, 3, 4, 5, 6); + + cleanup_all_probes(); + + pr_cont("OK\n"); + return 0; +} + +late_initcall(kprobe_trace_self_tests_init); + +#endif From d8ec91850efaf6cee9234c80260fe03881242374 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 19 Aug 2009 21:13:57 +0200 Subject: [PATCH 035/470] tracing: Add kprobe-based event tracer documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the documentation to use the kprobe based event tracer. [fweisbec@gmail.com: Split tracer and its Documentation in two patchs] Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203510.31965.29123.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 139 ++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 Documentation/trace/kprobetrace.txt diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt new file mode 100644 index 00000000000..efff6eb1b3d --- /dev/null +++ b/Documentation/trace/kprobetrace.txt @@ -0,0 +1,139 @@ + Kprobe-based Event Tracer + ========================= + + Documentation is written by Masami Hiramatsu + + +Overview +-------- +This tracer is similar to the events tracer which is based on Tracepoint +infrastructure. Instead of Tracepoint, this tracer is based on kprobes(kprobe +and kretprobe). It probes anywhere where kprobes can probe(this means, all +functions body except for __kprobes functions). + +Unlike the function tracer, this tracer can probe instructions inside of +kernel functions. It allows you to check which instruction has been executed. + +Unlike the Tracepoint based events tracer, this tracer can add and remove +probe points on the fly. + +Similar to the events tracer, this tracer doesn't need to be activated via +current_tracer, instead of that, just set probe points via +/sys/kernel/debug/tracing/kprobe_events. And you can set filters on each +probe events via /sys/kernel/debug/tracing/events/kprobes//filter. + + +Synopsis of kprobe_events +------------------------- + p[:EVENT] SYMBOL[+offs|-offs]|MEMADDR [FETCHARGS] : Set a probe + r[:EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe + + EVENT : Event name. + SYMBOL[+offs|-offs] : Symbol+offset where the probe is inserted. + MEMADDR : Address where the probe is inserted. + + FETCHARGS : Arguments. + %REG : Fetch register REG + sN : Fetch Nth entry of stack (N >= 0) + sa : Fetch stack address. + @ADDR : Fetch memory at ADDR (ADDR should be in kernel) + @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) + aN : Fetch function argument. (N >= 0)(*) + rv : Fetch return value.(**) + ra : Fetch return address.(**) + +|-offs(FETCHARG) : fetch memory at FETCHARG +|- offs address.(***) + + (*) aN may not correct on asmlinkaged functions and at the middle of + function body. + (**) only for return probe. + (***) this is useful for fetching a field of data structures. + + +Per-Probe Event Filtering +------------------------- + Per-probe event filtering feature allows you to set different filter on each +probe and gives you what arguments will be shown in trace buffer. If an event +name is specified right after 'p:' or 'r:' in kprobe_events, the tracer adds +an event under tracing/events/kprobes/, at the directory you can see +'id', 'enabled', 'format' and 'filter'. + +enabled: + You can enable/disable the probe by writing 1 or 0 on it. + +format: + It shows the format of this probe event. It also shows aliases of arguments + which you specified to kprobe_events. + +filter: + You can write filtering rules of this event. And you can use both of aliase + names and field names for describing filters. + + +Usage examples +-------------- +To add a probe as a new event, write a new definition to kprobe_events +as below. + + echo p:myprobe do_sys_open a0 a1 a2 a3 > /sys/kernel/debug/tracing/kprobe_events + + This sets a kprobe on the top of do_sys_open() function with recording +1st to 4th arguments as "myprobe" event. + + echo r:myretprobe do_sys_open rv ra >> /sys/kernel/debug/tracing/kprobe_events + + This sets a kretprobe on the return point of do_sys_open() function with +recording return value and return address as "myretprobe" event. + You can see the format of these events via +/sys/kernel/debug/tracing/events/kprobes//format. + + cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format +name: myprobe +ID: 23 +format: + field:unsigned short common_type; offset:0; size:2; + field:unsigned char common_flags; offset:2; size:1; + field:unsigned char common_preempt_count; offset:3; size:1; + field:int common_pid; offset:4; size:4; + field:int common_tgid; offset:8; size:4; + + field: unsigned long ip; offset:16;tsize:8; + field: int nargs; offset:24;tsize:4; + field: unsigned long arg0; offset:32;tsize:8; + field: unsigned long arg1; offset:40;tsize:8; + field: unsigned long arg2; offset:48;tsize:8; + field: unsigned long arg3; offset:56;tsize:8; + + alias: a0; original: arg0; + alias: a1; original: arg1; + alias: a2; original: arg2; + alias: a3; original: arg3; + +print fmt: "%lx: 0x%lx 0x%lx 0x%lx 0x%lx", ip, arg0, arg1, arg2, arg3 + + + You can see that the event has 4 arguments and alias expressions +corresponding to it. + + echo > /sys/kernel/debug/tracing/kprobe_events + + This clears all probe points. and you can see the traced information via +/sys/kernel/debug/tracing/trace. + + cat /sys/kernel/debug/tracing/trace +# tracer: nop +# +# TASK-PID CPU# TIMESTAMP FUNCTION +# | | | | | + <...>-1447 [001] 1038282.286875: do_sys_open+0x0/0xd6: 0x3 0x7fffd1ec4440 0x8000 0x0 + <...>-1447 [001] 1038282.286878: sys_openat+0xc/0xe <- do_sys_open: 0xfffffffffffffffe 0xffffffff81367a3a + <...>-1447 [001] 1038282.286885: do_sys_open+0x0/0xd6: 0xffffff9c 0x40413c 0x8000 0x1b6 + <...>-1447 [001] 1038282.286915: sys_open+0x1b/0x1d <- do_sys_open: 0x3 0xffffffff81367a3a + <...>-1447 [001] 1038282.286969: do_sys_open+0x0/0xd6: 0xffffff9c 0x4041c6 0x98800 0x10 + <...>-1447 [001] 1038282.286976: sys_open+0x1b/0x1d <- do_sys_open: 0x3 0xffffffff81367a3a + + + Each line shows when the kernel hits a probe, and <- SYMBOL means kernel +returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel +returns from do_sys_open to sys_open+0x1b). + + From a82378d8802717b9776a7d9b54422f65c414d6cc Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:18 -0400 Subject: [PATCH 036/470] tracing: Kprobe-tracer supports more than 6 arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support up to 128 arguments to fetch for each kprobes event. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203518.31965.96979.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 2 +- kernel/trace/trace_kprobe.c | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index efff6eb1b3d..c9c09b45038 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -32,7 +32,7 @@ Synopsis of kprobe_events SYMBOL[+offs|-offs] : Symbol+offset where the probe is inserted. MEMADDR : Address where the probe is inserted. - FETCHARGS : Arguments. + FETCHARGS : Arguments. Each probe can have up to 128 args. %REG : Fetch register REG sN : Fetch Nth entry of stack (N >= 0) sa : Fetch stack address. diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 0c4f00aafb9..6d488efd16b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -32,7 +32,7 @@ #include "trace.h" #include "trace_output.h" -#define TRACE_KPROBE_ARGS 6 +#define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 /* currently, trace_kprobe only supports X86. */ @@ -184,11 +184,15 @@ struct trace_probe { struct kretprobe rp; }; const char *symbol; /* symbol name */ - unsigned int nr_args; - struct fetch_func args[TRACE_KPROBE_ARGS]; struct ftrace_event_call call; + unsigned int nr_args; + struct fetch_func args[]; }; +#define SIZEOF_TRACE_PROBE(n) \ + (offsetof(struct trace_probe, args) + \ + (sizeof(struct fetch_func) * (n))) + static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_trace_func(struct kretprobe_instance *ri, struct pt_regs *regs); @@ -263,11 +267,11 @@ static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); static struct trace_probe *alloc_trace_probe(const char *symbol, - const char *event) + const char *event, int nargs) { struct trace_probe *tp; - tp = kzalloc(sizeof(struct trace_probe), GFP_KERNEL); + tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); if (!tp) return ERR_PTR(-ENOMEM); @@ -573,9 +577,10 @@ static int create_trace_probe(int argc, char **argv) if (offset && is_return) return -EINVAL; } + argc -= 2; argv += 2; /* setup a probe */ - tp = alloc_trace_probe(symbol, event); + tp = alloc_trace_probe(symbol, event, argc); if (IS_ERR(tp)) return PTR_ERR(tp); @@ -594,8 +599,8 @@ static int create_trace_probe(int argc, char **argv) kp->addr = addr; /* parse arguments */ - argc -= 2; argv += 2; ret = 0; - for (i = 0; i < argc && i < TRACE_KPROBE_ARGS; i++) { + ret = 0; + for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { if (strlen(argv[i]) > MAX_ARGSTR_LEN) { pr_info("Argument%d(%s) is too long.\n", i, argv[i]); ret = -ENOSPC; From 4263565d491145b57621a761714f2ca6f1293a45 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:26 -0400 Subject: [PATCH 037/470] tracing: Generate names for each kprobe event automatically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Generate names for each kprobe event based on the probe point. (SYMBOL+offs or MEMADDR). Also remove generic k*probe event types because there is no user of those types. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203526.31965.56672.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 3 +- kernel/trace/trace_event_types.h | 18 -------- kernel/trace/trace_kprobe.c | 64 +++++++++++++++-------------- 3 files changed, 35 insertions(+), 50 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index c9c09b45038..5e59e854e71 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -28,7 +28,8 @@ Synopsis of kprobe_events p[:EVENT] SYMBOL[+offs|-offs]|MEMADDR [FETCHARGS] : Set a probe r[:EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe - EVENT : Event name. + EVENT : Event name. If omitted, the event name is generated + based on SYMBOL+offs or MEMADDR. SYMBOL[+offs|-offs] : Symbol+offset where the probe is inserted. MEMADDR : Address where the probe is inserted. diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 186b598a1f1..e74f0906ab1 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h @@ -175,22 +175,4 @@ TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore, TP_RAW_FMT("type:%u call_site:%lx ptr:%p") ); -TRACE_EVENT_FORMAT(kprobe, TRACE_KPROBE, kprobe_trace_entry, ignore, - TRACE_STRUCT( - TRACE_FIELD(unsigned long, ip, ip) - TRACE_FIELD(int, nargs, nargs) - TRACE_FIELD_ZERO(unsigned long, args) - ), - TP_RAW_FMT("%08lx: args:0x%lx ...") -); - -TRACE_EVENT_FORMAT(kretprobe, TRACE_KRETPROBE, kretprobe_trace_entry, ignore, - TRACE_STRUCT( - TRACE_FIELD(unsigned long, func, func) - TRACE_FIELD(unsigned long, ret_ip, ret_ip) - TRACE_FIELD(int, nargs, nargs) - TRACE_FIELD_ZERO(unsigned long, args) - ), - TP_RAW_FMT("%08lx <- %08lx: args:0x%lx ...") -); #undef TRACE_SYSTEM diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6d488efd16b..8aeb24cc295 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -34,6 +34,7 @@ #define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 +#define MAX_EVENT_NAME_LEN 64 /* currently, trace_kprobe only supports X86. */ @@ -280,11 +281,11 @@ static struct trace_probe *alloc_trace_probe(const char *symbol, if (!tp->symbol) goto error; } - if (event) { - tp->call.name = kstrdup(event, GFP_KERNEL); - if (!tp->call.name) - goto error; - } + if (!event) + goto error; + tp->call.name = kstrdup(event, GFP_KERNEL); + if (!tp->call.name) + goto error; INIT_LIST_HEAD(&tp->list); return tp; @@ -314,7 +315,7 @@ static struct trace_probe *find_probe_event(const char *event) struct trace_probe *tp; list_for_each_entry(tp, &probe_list, list) - if (tp->call.name && !strcmp(tp->call.name, event)) + if (!strcmp(tp->call.name, event)) return tp; return NULL; } @@ -330,8 +331,7 @@ static void __unregister_trace_probe(struct trace_probe *tp) /* Unregister a trace_probe and probe_event: call with locking probe_lock */ static void unregister_trace_probe(struct trace_probe *tp) { - if (tp->call.name) - unregister_probe_event(tp); + unregister_probe_event(tp); __unregister_trace_probe(tp); list_del(&tp->list); } @@ -360,18 +360,16 @@ static int register_trace_probe(struct trace_probe *tp) goto end; } /* register as an event */ - if (tp->call.name) { - old_tp = find_probe_event(tp->call.name); - if (old_tp) { - /* delete old event */ - unregister_trace_probe(old_tp); - free_trace_probe(old_tp); - } - ret = register_probe_event(tp); - if (ret) { - pr_warning("Faild to register probe event(%d)\n", ret); - __unregister_trace_probe(tp); - } + old_tp = find_probe_event(tp->call.name); + if (old_tp) { + /* delete old event */ + unregister_trace_probe(old_tp); + free_trace_probe(old_tp); + } + ret = register_probe_event(tp); + if (ret) { + pr_warning("Faild to register probe event(%d)\n", ret); + __unregister_trace_probe(tp); } list_add_tail(&tp->list, &probe_list); end: @@ -580,7 +578,18 @@ static int create_trace_probe(int argc, char **argv) argc -= 2; argv += 2; /* setup a probe */ - tp = alloc_trace_probe(symbol, event, argc); + if (!event) { + /* Make a new event name */ + char buf[MAX_EVENT_NAME_LEN]; + if (symbol) + snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", + is_return ? 'r' : 'p', symbol, offset); + else + snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", + is_return ? 'r' : 'p', addr); + tp = alloc_trace_probe(symbol, buf, argc); + } else + tp = alloc_trace_probe(symbol, event, argc); if (IS_ERR(tp)) return PTR_ERR(tp); @@ -661,8 +670,7 @@ static int probes_seq_show(struct seq_file *m, void *v) char buf[MAX_ARGSTR_LEN + 1]; seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); - if (tp->call.name) - seq_printf(m, ":%s", tp->call.name); + seq_printf(m, ":%s", tp->call.name); if (tp->symbol) seq_printf(m, " %s%+ld", probe_symbol(tp), probe_offset(tp)); @@ -780,10 +788,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) struct ring_buffer_event *event; int size, i, pc; unsigned long irq_flags; - struct ftrace_event_call *call = &event_kprobe; - - if (&tp->call.name) - call = &tp->call; + struct ftrace_event_call *call = &tp->call; local_save_flags(irq_flags); pc = preempt_count(); @@ -815,10 +820,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, struct ring_buffer_event *event; int size, i, pc; unsigned long irq_flags; - struct ftrace_event_call *call = &event_kretprobe; - - if (&tp->call.name) - call = &tp->call; + struct ftrace_event_call *call = &tp->call; local_save_flags(irq_flags); pc = preempt_count(); From ff50d99136c3315513ef3b2921e77f35ab04d081 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:34 -0400 Subject: [PATCH 038/470] tracing: Kprobe tracer assigns new event ids for each event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Assign new event ids for each kprobes event. This doesn't clear ring_buffer when unregistering each kprobe event. Thus, if you mind 'Unknown event' messages, clear the buffer manually after changing kprobe events. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203534.31965.49105.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace.h | 6 ----- kernel/trace/trace_kprobe.c | 51 +++++++++++-------------------------- 2 files changed, 15 insertions(+), 42 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 667f832d16b..f5362a0529e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -38,8 +38,6 @@ enum trace_type { TRACE_KMEM_FREE, TRACE_POWER, TRACE_BLK, - TRACE_KPROBE, - TRACE_KRETPROBE, __TRACE_LAST_TYPE, }; @@ -343,10 +341,6 @@ extern void __ftrace_bad_type(void); TRACE_KMEM_ALLOC); \ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ TRACE_KMEM_FREE); \ - IF_ASSIGN(var, ent, struct kprobe_trace_entry, \ - TRACE_KPROBE); \ - IF_ASSIGN(var, ent, struct kretprobe_trace_entry, \ - TRACE_KRETPROBE); \ __ftrace_bad_type(); \ } while (0) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8aeb24cc295..9c067bf47d5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -186,6 +186,7 @@ struct trace_probe { }; const char *symbol; /* symbol name */ struct ftrace_event_call call; + struct trace_event event; unsigned int nr_args; struct fetch_func args[]; }; @@ -795,7 +796,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); - event = trace_current_buffer_lock_reserve(TRACE_KPROBE, size, + event = trace_current_buffer_lock_reserve(call->id, size, irq_flags, pc); if (!event) return 0; @@ -827,7 +828,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); - event = trace_current_buffer_lock_reserve(TRACE_KRETPROBE, size, + event = trace_current_buffer_lock_reserve(call->id, size, irq_flags, pc); if (!event) return 0; @@ -853,7 +854,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags) struct trace_seq *s = &iter->seq; int i; - trace_assign_type(field, iter->ent); + field = (struct kprobe_trace_entry *)iter->ent; if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; @@ -880,7 +881,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) struct trace_seq *s = &iter->seq; int i; - trace_assign_type(field, iter->ent); + field = (struct kretprobe_trace_entry *)iter->ent; if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; @@ -906,16 +907,6 @@ partial: return TRACE_TYPE_PARTIAL_LINE; } -static struct trace_event kprobe_trace_event = { - .type = TRACE_KPROBE, - .trace = print_kprobe_event, -}; - -static struct trace_event kretprobe_trace_event = { - .type = TRACE_KRETPROBE, - .trace = print_kretprobe_event, -}; - static int probe_event_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1104,35 +1095,35 @@ static int register_probe_event(struct trace_probe *tp) /* Initialize ftrace_event_call */ call->system = "kprobes"; if (probe_is_return(tp)) { - call->event = &kretprobe_trace_event; - call->id = TRACE_KRETPROBE; + tp->event.trace = print_kretprobe_event; call->raw_init = probe_event_raw_init; call->show_format = kretprobe_event_show_format; call->define_fields = kretprobe_event_define_fields; } else { - call->event = &kprobe_trace_event; - call->id = TRACE_KPROBE; + tp->event.trace = print_kprobe_event; call->raw_init = probe_event_raw_init; call->show_format = kprobe_event_show_format; call->define_fields = kprobe_event_define_fields; } + call->event = &tp->event; + call->id = register_ftrace_event(&tp->event); + if (!call->id) + return -ENODEV; call->enabled = 1; call->regfunc = probe_event_enable; call->unregfunc = probe_event_disable; call->data = tp; ret = trace_add_event_call(call); - if (ret) + if (ret) { pr_info("Failed to register kprobe event: %s\n", call->name); + unregister_ftrace_event(&tp->event); + } return ret; } static void unregister_probe_event(struct trace_probe *tp) { - /* - * Prevent to unregister event itself because the event is shared - * among other probes. - */ - tp->call.event = NULL; + /* tp->event is unregistered in trace_remove_event_call() */ trace_remove_event_call(&tp->call); } @@ -1141,18 +1132,6 @@ static __init int init_kprobe_trace(void) { struct dentry *d_tracer; struct dentry *entry; - int ret; - - ret = register_ftrace_event(&kprobe_trace_event); - if (!ret) { - pr_warning("Could not register kprobe_trace_event type.\n"); - return 0; - } - ret = register_ftrace_event(&kretprobe_trace_event); - if (!ret) { - pr_warning("Could not register kretprobe_trace_event type.\n"); - return 0; - } d_tracer = tracing_init_dentry(); if (!d_tracer) From cd7e7bd5e44718c7625ce1e1f0fda53d77cd3797 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:35:42 -0400 Subject: [PATCH 039/470] tracing: Add kprobes event profiling interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add profiling interfaces for each kprobes event. This interface provides how many times each probe hit or missed. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203541.31965.8452.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 8 ++++++ kernel/trace/trace_kprobe.c | 43 +++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 5e59e854e71..3de75174716 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -70,6 +70,14 @@ filter: names and field names for describing filters. +Event Profiling +--------------- + You can check the total number of probe hits and probe miss-hits via +/sys/kernel/debug/tracing/kprobe_profile. + The first column is event name, the second is the number of probe hits, +the third is the number of probe miss-hits. + + Usage examples -------------- To add a probe as a new event, write a new definition to kprobe_events diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9c067bf47d5..ce68197767d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -184,6 +184,7 @@ struct trace_probe { struct kprobe kp; struct kretprobe rp; }; + unsigned long nhit; const char *symbol; /* symbol name */ struct ftrace_event_call call; struct trace_event event; @@ -781,6 +782,37 @@ static const struct file_operations kprobe_events_ops = { .write = probes_write, }; +/* Probes profiling interfaces */ +static int probes_profile_seq_show(struct seq_file *m, void *v) +{ + struct trace_probe *tp = v; + + seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, + probe_is_return(tp) ? tp->rp.kp.nmissed : tp->kp.nmissed); + + return 0; +} + +static const struct seq_operations profile_seq_op = { + .start = probes_seq_start, + .next = probes_seq_next, + .stop = probes_seq_stop, + .show = probes_profile_seq_show +}; + +static int profile_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &profile_seq_op); +} + +static const struct file_operations kprobe_profile_ops = { + .owner = THIS_MODULE, + .open = profile_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + /* Kprobe handler */ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) { @@ -791,6 +823,8 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) unsigned long irq_flags; struct ftrace_event_call *call = &tp->call; + tp->nhit++; + local_save_flags(irq_flags); pc = preempt_count(); @@ -1140,9 +1174,18 @@ static __init int init_kprobe_trace(void) entry = debugfs_create_file("kprobe_events", 0644, d_tracer, NULL, &kprobe_events_ops); + /* Event list interface */ if (!entry) pr_warning("Could not create debugfs " "'kprobe_events' entry\n"); + + /* Profile interface */ + entry = debugfs_create_file("kprobe_profile", 0444, d_tracer, + NULL, &kprobe_profile_ops); + + if (!entry) + pr_warning("Could not create debugfs " + "'kprobe_profile' entry\n"); return 0; } fs_initcall(init_kprobe_trace); From 8d7d14fb27818eb08ebedf9f4a6e286970fe9977 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:07 -0400 Subject: [PATCH 040/470] x86: Fix x86 instruction decoder selftest to check only .text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix x86 instruction decoder selftest to check only .text because other sections (e.g. .notes) will have random bytes which don't need to be checked. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194307.12478.76938.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 3dd626b99dc..95e9cc4bcd9 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,6 +1,6 @@ PHONY += posttest quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) From 69d991f32152283cbc373136fa45bbb152b32048 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:16 -0400 Subject: [PATCH 041/470] x86: Check awk features before generating inat-tables.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check some awk mandatory features to generate inat-tables.c that old mawk doesn't support. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194316.12478.57394.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/gen-insn-attr-x86.awk | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 93b62c92d04..19ba096b7dd 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -4,7 +4,25 @@ # # Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c +# Awk implementation sanity check +function check_awk_implement() { + if (!match("abc", "[[:lower:]]+")) + return "Your awk doesn't support charactor-class." + if (sprintf("%x", 0) != "0") + return "Your awk has a printf-format problem." + return "" +} + BEGIN { + # Implementation error checking + awkchecked = check_awk_implement() + if (awkchecked != "") { + print "Error: " awkchecked > "/dev/stderr" + print "Please try to use gawk." > "/dev/stderr" + exit 1 + } + + # Setup generating tables print "/* x86 opcode map generated from x86-opcode-map.txt */" print "/* Do not change this code. */" ggid = 1 @@ -293,6 +311,8 @@ function convert_operands(opnd, i,imm,mod) } END { + if (awkchecked != "") + exit 1 # print escape opcode map's array print "/* Escape opcode map array */" print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ From 38a47497d9e34632abbeb484603cedf10c4b05e4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:43 -0400 Subject: [PATCH 042/470] tracing/kprobes: Fix format typo in trace_kprobes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a format typo in kprobe-tracer. Currently, it shows 'tsize' in format; $ cat /debug/tracing/events/kprobes/event/format ... field: unsigned long ip; offset:16;tsize:8; field: int nargs; offset:24;tsize:4; ... This should be '\tsize'; $ cat /debug/tracing/events/kprobes/event/format ... field: unsigned long ip; offset:16; size:8; field: int nargs; offset:24; size:4; ... Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194343.12478.37618.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ce68197767d..1a9ca79fe64 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1070,7 +1070,7 @@ static int __probe_event_show_format(struct trace_seq *s, #define SHOW_FIELD(type, item, name) \ do { \ ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ - "offset:%u;tsize:%u;\n", name, \ + "offset:%u;\tsize:%u;\n", name, \ (unsigned int)offsetof(typeof(field), item),\ (unsigned int)sizeof(type)); \ if (!ret) \ From 30a7e073b590ebd1829a906164b0a637e77cc967 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:51 -0400 Subject: [PATCH 043/470] tracing/kprobes: Change trace_arg to probe_arg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change trace_arg_string() and parse_trace_arg() to probe_arg_string() and parse_probe_arg(), since those are kprobe-tracer local functions. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: PrzemysÅ‚aw PaweÅ‚czyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194351.12478.15247.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1a9ca79fe64..f4ec3fc87b2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -220,7 +220,7 @@ static __kprobes void *probe_address(struct trace_probe *tp) return (probe_is_return(tp)) ? tp->rp.kp.addr : tp->kp.addr; } -static int trace_arg_string(char *buf, size_t n, struct fetch_func *ff) +static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) { int ret = -EINVAL; @@ -250,7 +250,7 @@ static int trace_arg_string(char *buf, size_t n, struct fetch_func *ff) if (ret >= n) goto end; l += ret; - ret = trace_arg_string(buf + l, n - l, &id->orig); + ret = probe_arg_string(buf + l, n - l, &id->orig); if (ret < 0) goto end; l += ret; @@ -408,7 +408,7 @@ static int split_symbol_offset(char *symbol, long *offset) #define PARAM_MAX_ARGS 16 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) -static int parse_trace_arg(char *arg, struct fetch_func *ff, int is_return) +static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) { int ret = 0; unsigned long param; @@ -499,7 +499,7 @@ static int parse_trace_arg(char *arg, struct fetch_func *ff, int is_return) if (!id) return -ENOMEM; id->offset = offset; - ret = parse_trace_arg(arg, &id->orig, is_return); + ret = parse_probe_arg(arg, &id->orig, is_return); if (ret) kfree(id); else { @@ -617,7 +617,7 @@ static int create_trace_probe(int argc, char **argv) ret = -ENOSPC; goto error; } - ret = parse_trace_arg(argv[i], &tp->args[i], is_return); + ret = parse_probe_arg(argv[i], &tp->args[i], is_return); if (ret) goto error; } @@ -680,7 +680,7 @@ static int probes_seq_show(struct seq_file *m, void *v) seq_printf(m, " 0x%p", probe_address(tp)); for (i = 0; i < tp->nr_args; i++) { - ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); if (ret < 0) { pr_warning("Argument%d decoding error(%d).\n", i, ret); return ret; @@ -997,7 +997,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) sprintf(buf, "arg%d", i); DEFINE_FIELD(unsigned long, args[i], buf, 0); /* Set argument string as an alias field */ - ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); if (ret < 0) return ret; DEFINE_FIELD(unsigned long, args[i], buf, 0); @@ -1024,7 +1024,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) sprintf(buf, "arg%d", i); DEFINE_FIELD(unsigned long, args[i], buf, 0); /* Set argument string as an alias field */ - ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); if (ret < 0) return ret; DEFINE_FIELD(unsigned long, args[i], buf, 0); @@ -1041,7 +1041,7 @@ static int __probe_event_show_format(struct trace_seq *s, /* Show aliases */ for (i = 0; i < tp->nr_args; i++) { - ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); if (ret < 0) return ret; if (!trace_seq_printf(s, "\talias: %s;\toriginal: arg%d;\n", From 24851d2447830e6cba4c4b641cb73e713f312373 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 26 Aug 2009 23:38:30 +0200 Subject: [PATCH 044/470] tracing/kprobes: Dump the culprit kprobe in case of kprobe recursion Kprobes can enter into a probing recursion, ie: a kprobe that does an endless loop because one of its core mechanism function used during probing is also probed itself. This patch helps pinpointing the kprobe that raised such recursion by dumping it and raising a BUG instead of a warning (we also disarm the kprobe to try avoiding recursion in BUG itself). Having a BUG instead of a warning stops the stacktrace in the right place and doesn't pollute the logs with hundreds of traces that eventually end up in a stack overflow. Signed-off-by: Frederic Weisbecker Cc: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli --- arch/x86/kernel/kprobes.c | 8 ++++++-- include/linux/kprobes.h | 2 ++ kernel/kprobes.c | 7 +++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 16ae9610f6f..ecee3d23fef 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -490,9 +490,13 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, /* A probe has been hit in the codepath leading up * to, or just after, single-stepping of a probed * instruction. This entire codepath should strictly - * reside in .kprobes.text section. Raise a warning - * to highlight this peculiar case. + * reside in .kprobes.text section. + * Raise a BUG or we'll continue in an endless + * reentering loop and eventually a stack overflow. */ + arch_disarm_kprobe(p); + dump_kprobe(p); + BUG(); } default: /* impossible cases */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index bcd9c07848b..87eb79c9dd6 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -296,6 +296,8 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); +void dump_kprobe(struct kprobe *kp); + #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ef177d653b2..f72e96c25a3 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1141,6 +1141,13 @@ static void __kprobes kill_kprobe(struct kprobe *p) arch_remove_kprobe(p); } +void __kprobes dump_kprobe(struct kprobe *kp) +{ + printk(KERN_WARNING "Dumping kprobe:\n"); + printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", + kp->symbol_name, kp->addr, kp->offset); +} + /* Module notifier call back, checking kprobes on the module */ static int __kprobes kprobes_module_callback(struct notifier_block *nb, unsigned long val, void *data) From aeaeae1187d7520f1c5559623f0a149da6a1c96e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 27 Aug 2009 05:09:51 +0200 Subject: [PATCH 045/470] tracing: Restore the const qualifier for field names and types definition Restore the const qualifier in field's name and type parameters of trace_define_field that was lost while solving a conflict. Fields names and types are defined as builtin constant strings in static TRACE_EVENTs. But kprobes allocates these dynamically. That said, we still want to always pass these strings as const char * in trace_define_fields() to avoid any further accidental writes on the pointed strings. Reported-by: Li Zefan Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt --- include/linux/ftrace_event.h | 6 +++--- kernel/trace/trace_events.c | 4 ++-- kernel/trace/trace_syscalls.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 1ab3089b5c5..73edf5a52e3 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -148,9 +148,9 @@ enum { }; extern int trace_define_common_fields(struct ftrace_event_call *call); -extern int trace_define_field(struct ftrace_event_call *call, char *type, - char *name, int offset, int size, int is_signed, - int filter_type); +extern int trace_define_field(struct ftrace_event_call *call, const char *type, + const char *name, int offset, int size, + int is_signed, int filter_type); extern int trace_add_event_call(struct ftrace_event_call *call); extern void trace_remove_event_call(struct ftrace_event_call *call); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 8079bb511c4..197cdaa96c4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -27,8 +27,8 @@ DEFINE_MUTEX(event_mutex); LIST_HEAD(ftrace_events); -int trace_define_field(struct ftrace_event_call *call, char *type, - char *name, int offset, int size, int is_signed, +int trace_define_field(struct ftrace_event_call *call, const char *type, + const char *name, int offset, int size, int is_signed, int filter_type) { struct ftrace_event_field *field; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 5931933587e..a928dd00453 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -193,8 +193,8 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) return ret; for (i = 0; i < meta->nb_args; i++) { - ret = trace_define_field(call, (char *)meta->types[i], - (char *)meta->args[i], offset, + ret = trace_define_field(call, meta->types[i], + meta->args[i], offset, sizeof(unsigned long), 0, FILTER_OTHER); offset += sizeof(unsigned long); From f8468f3695209735c1595342f6bd95f7bdab66e1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 27 Aug 2009 05:23:29 +0200 Subject: [PATCH 046/470] tracing: Remove unneeded pointer casts Cleaup uneeded casts from void * to char * in syscalls tracing file. Reported-by: Li Zefan Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_syscalls.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a928dd00453..e7c676e50a7 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -324,7 +324,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) int num; char *name; - name = (char *)call->data; + name = call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return -ENOSYS; @@ -347,7 +347,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) int num; char *name; - name = (char *)call->data; + name = call->data; num = syscall_name_to_nr(name); if (num < 0 || num >= FTRACE_SYSCALL_MAX) return; From e9afe9e1b3fdbd56cca53959a2519e70db9c8095 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:22:58 -0400 Subject: [PATCH 047/470] kprobes/x86: Call BUG() when reentering probe into KPROBES_HIT_SS Call BUG() when a probe have been hit on the way of kprobe processing path, because that kind of probes are currently unrecoverable (recovering it will cause an infinite loop and stack overflow). The original code seems to assume that it's caused by an int3 which another subsystem inserted on out-of-line singlestep buffer if the hitting probe is same as current probe. However, in that case, int3-hitting-address is on the out-of-line buffer and should be different from first (current) int3 address. Thus, I decided to remove the code. I also removes arch_disarm_kprobe() because it will involve other stuffs in text_poke(). Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172258.8246.61889.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index ecee3d23fef..e0fb615ba1e 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -482,22 +482,16 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_status = KPROBE_REENTER; break; case KPROBE_HIT_SS: - if (p == kprobe_running()) { - regs->flags &= ~X86_EFLAGS_TF; - regs->flags |= kcb->kprobe_saved_flags; - return 0; - } else { - /* A probe has been hit in the codepath leading up - * to, or just after, single-stepping of a probed - * instruction. This entire codepath should strictly - * reside in .kprobes.text section. - * Raise a BUG or we'll continue in an endless - * reentering loop and eventually a stack overflow. - */ - arch_disarm_kprobe(p); - dump_kprobe(p); - BUG(); - } + /* A probe has been hit in the codepath leading up to, or just + * after, single-stepping of a probed instruction. This entire + * codepath should strictly reside in .kprobes.text section. + * Raise a BUG or we'll continue in an endless reentering loop + * and eventually a stack overflow. + */ + printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", + p->addr); + dump_kprobe(p); + BUG(); default: /* impossible cases */ WARN_ON(1); From f5ad31158d60946b9fd18c8a79c283a6bc432430 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:04 -0400 Subject: [PATCH 048/470] kprobes/x86-64: Allow to reenter probe on post_handler Allow to reenter probe on the post_handler of another probe on x86-64, because x86-64 already allows reentering int3. In that case, reentered probe just increases kp.nmissed and returns. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172304.8246.4822.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index e0fb615ba1e..c5f1f117e0c 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -463,17 +463,6 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: -#ifdef CONFIG_X86_64 - /* TODO: Provide re-entrancy from post_kprobes_handler() and - * avoid exception stack corruption while single-stepping on - * the instruction of the new probe. - */ - arch_disarm_kprobe(p); - regs->ip = (unsigned long)p->addr; - reset_current_kprobe(); - preempt_enable_no_resched(); - break; -#endif case KPROBE_HIT_ACTIVE: save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); From 62c9295f9dd250ea1bb2c8078642a275a9ce82f8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:11 -0400 Subject: [PATCH 049/470] kprobes/x86: Fix to add __kprobes to in-kernel fault handing functions Add __kprobes to the functions which handle in-kernel fixable page faults. Since kprobes can cause those in-kernel page faults by accessing kprobe data structures, probing those fault functions will cause fault-int3-loop (do_page_fault has already been marked as __kprobes). Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172311.8246.92725.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/mm/fault.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139182f..c322e59f2d1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -38,7 +38,8 @@ enum x86_pf_error_code { * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: */ -static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) +static inline int __kprobes +kmmio_fault(struct pt_regs *regs, unsigned long addr) { if (unlikely(is_kmmio_active())) if (kmmio_handler(regs, addr) == 1) @@ -46,7 +47,7 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) return 0; } -static inline int notify_page_fault(struct pt_regs *regs) +static inline int __kprobes notify_page_fault(struct pt_regs *regs) { int ret = 0; @@ -239,7 +240,7 @@ void vmalloc_sync_all(void) * * Handle a fault on the vmalloc or module mapping area */ -static noinline int vmalloc_fault(unsigned long address) +static noinline __kprobes int vmalloc_fault(unsigned long address) { unsigned long pgd_paddr; pmd_t *pmd_k; @@ -361,7 +362,7 @@ void vmalloc_sync_all(void) * * This assumes no large pages in there. */ -static noinline int vmalloc_fault(unsigned long address) +static noinline __kprobes int vmalloc_fault(unsigned long address) { pgd_t *pgd, *pgd_ref; pud_t *pud, *pud_ref; @@ -858,7 +859,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static noinline int +static noinline __kprobes int spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; From 8f270083587a4cb70fa14f0e2fd698eb08a4dd07 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:18 -0400 Subject: [PATCH 050/470] kprobes: Fix to add __kprobes to notify_die Add __kprobes to notify_die() because do_int3() calls notify_die() instead of atomic_notify_call_chain() which is already marked as __kprobes. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172318.8246.53702.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/notifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/notifier.c b/kernel/notifier.c index 61d5aa5eced..acd24e7643e 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c @@ -558,7 +558,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier); static ATOMIC_NOTIFIER_HEAD(die_chain); -int notrace notify_die(enum die_val val, const char *str, +int notrace __kprobes notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { From 8222d718b3ad3ae49c48f69ae4b6a1128c9a92cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:25 -0400 Subject: [PATCH 051/470] kprobes/x86-64: Fix to move common_interrupt to .kprobes.text Since nmi, debug and int3 returns to irq_return inside common_interrupt, probing this function will cause int3-loop, so it should be marked as __kprobes. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172325.8246.40000.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/entry_64.S | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c251be74510..36e2ef5cc83 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -809,6 +809,10 @@ END(interrupt) call \func .endm +/* + * Interrupt entry/exit should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. @@ -947,6 +951,10 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) +/* + * End of kprobes section + */ + .popsection /* * APIC interrupts. From 65e234ec2c4a0659ca22531dc1372a185f088517 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:32 -0400 Subject: [PATCH 052/470] kprobes: Prohibit to probe native_get_debugreg Since do_debug() calls get_debugreg(), native_get_debugreg() will be called from singlestepping. This can cause an int3 infinite loop. We can't put it in the .text.kprobes section because it is inlined, then we blacklist its name. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172332.8246.34194.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- kernel/kprobes.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f72e96c25a3..3267d90bc9d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -90,6 +90,7 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) */ static struct kprobe_blackpoint kprobe_blacklist[] = { {"preempt_schedule",}, + {"native_get_debugreg",}, {NULL} /* Terminator */ }; From 50a482fbd96943516b7a2783900e8fe61a6425e7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Aug 2009 18:13:19 -0400 Subject: [PATCH 053/470] x86: Allow x86-32 instruction decoder selftest on x86-64 Pass $(CONFIG_64BIT) to the x86 insn decoder selftest in case we are decoding 32bit code on x86-64, which will happen when building kernel with ARCH=i386 on x86-64. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090828221319.8778.88508.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/Makefile | 2 +- arch/x86/tools/test_get_len.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 95e9cc4bcd9..1bd006c8156 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,6 +1,6 @@ PHONY += posttest quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(CONFIG_64BIT) posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index 1e81adb2d8a..a3273f4244d 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -45,7 +45,7 @@ const char *prog; static void usage(void) { fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" - " ./test_get_len\n"); + " %s [y|n](64bit flag)\n", prog); exit(1); } @@ -63,11 +63,15 @@ int main(int argc, char **argv) unsigned char insn_buf[16]; struct insn insn; int insns = 0; + int x86_64 = 0; prog = argv[0]; - if (argc > 1) + if (argc > 2) usage(); + if (argc == 2 && argv[1][0] == 'y') + x86_64 = 1; + while (fgets(line, BUFSIZE, stdin)) { char copy[BUFSIZE], *s, *tab1, *tab2; int nb = 0; @@ -93,11 +97,7 @@ int main(int argc, char **argv) break; } /* Decode an instruction */ -#ifdef __x86_64__ - insn_init(&insn, insn_buf, 1); -#else - insn_init(&insn, insn_buf, 0); -#endif + insn_init(&insn, insn_buf, x86_64); insn_get_length(&insn); if (insn.length != nb) { fprintf(stderr, "Error: %s", line); From 70069577323e6f72b845166724f34b9858134437 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Aug 2009 18:13:26 -0400 Subject: [PATCH 054/470] x86: Remove unused config macros from instruction decoder selftest Remove dummy definitions of CONFIG_X86_64 and CONFIG_X86_32 because those macros are not used in the instruction decoder anymore. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090828221326.8778.70723.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/test_get_len.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index a3273f4244d..376d3385219 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -21,11 +21,6 @@ #include #include -#ifdef __x86_64__ -#define CONFIG_X86_64 -#else -#define CONFIG_X86_32 -#endif #define unlikely(cond) (cond) #include From d6a65dffb30d8636b1e5d4c201564ef401a246cf Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 7 Sep 2009 03:23:20 +0200 Subject: [PATCH 055/470] tracing: Fix ring-buffer and ksym tracer merge interaction The compiler warns us about: kernel/trace/trace_ksym.c: In function ksym_hbp_handler: kernel/trace/trace_ksym.c:92: attention : passing argument 1 of trace_buffer_lock_reserve from incompatible pointer type kernel/trace/trace_ksym.c:106: attention : passing argument 1 of trace_buffer_unlock_commit from incompatible pointer type Commit "e77405ad" (tracing: pass around ring buffer instead of tracer) has changed the central tracing APIs. And this change has updated every callsites of these APIs except those that aren't in tracing/core, such as the ksym tracer. Cc: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 2fde875ead4..6d5609c6737 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -78,17 +78,18 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) { struct ring_buffer_event *event; - struct trace_array *tr; struct ksym_trace_entry *entry; + struct ring_buffer *buffer; int pc; if (!ksym_tracing_enabled) return; - tr = ksym_trace_array; + buffer = ksym_trace_array->buffer; + pc = preempt_count(); - event = trace_buffer_lock_reserve(tr, TRACE_KSYM, + event = trace_buffer_lock_reserve(buffer, TRACE_KSYM, sizeof(*entry), 0, pc); if (!event) return; @@ -103,7 +104,7 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) ksym_collect_stats(hbp->info.address); #endif /* CONFIG_PROFILE_KSYM_TRACER */ - trace_buffer_unlock_commit(tr, event, 0, pc); + trace_buffer_unlock_commit(buffer, event, 0, pc); } /* Valid access types are represented as From f12b4f546b4e327d5620a544a2bddab68de66027 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 8 Sep 2009 12:32:46 -0400 Subject: [PATCH 056/470] x86: Add MMX support for instruction decoder Add MMX/SSE instructions to x86 opcode maps, since some of those instructions are used in the kernel. This also fixes failures in the x86 instruction decoder seftest. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Sam Ravnborg Cc: Frederic Weisbecker Cc: Ingo Molnar LKML-Reference: <20090908163246.23516.78835.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/lib/x86-opcode-map.txt | 307 +++++++++++++++++++++----------- 1 file changed, 200 insertions(+), 107 deletions(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 083dd59dd74..59e20d5c2a5 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -310,14 +310,14 @@ Referrer: 2-byte escape 0e: 0f: # 0x0f 0x10-0x1f -10: -11: -12: -13: -14: -15: -16: -17: +10: movups Vps,Wps | movss Vss,Wss (F3) | movupd Vpd,Wpd (66) | movsd Vsd,Wsd (F2) +11: movups Wps,Vps | movss Wss,Vss (F3) | movupd Wpd,Vpd (66) | movsd Wsd,Vsd (F2) +12: movlps Vq,Mq | movlpd Vq,Mq (66) | movhlps Vq,Uq | movddup Vq,Wq (F2) | movsldup Vq,Wq (F3) +13: mpvlps Mq,Vq | movlpd Mq,Vq (66) +14: unpcklps Vps,Wq | unpcklpd Vpd,Wq (66) +15: unpckhps Vps,Wq | unpckhpd Vpd,Wq (66) +16: movhps Vq,Mq | movhpd Vq,Mq (66) | movlsps Vq,Uq | movshdup Vq,Wq (F3) +17: movhps Mq,Vq | movhpd Mq,Vq (66) 18: Grp16 (1A) 19: 1a: @@ -337,12 +337,12 @@ Referrer: 2-byte escape 27: 28: movaps Vps,Wps | movapd Vpd,Wpd (66) 29: movaps Wps,Vps | movapd Wpd,Vpd (66) -2a: -2b: -2c: -2d: -2e: -2f: +2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2) +2b: movntps Mps,Vps | movntpd Mpd,Vpd (66) +2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2) +2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2) +2e: ucomiss Vss,Wss | ucomisd Vsd,Wsd (66) +2f: comiss Vss,Wss | comisd Vsd,Wsd (66) # 0x0f 0x30-0x3f 30: WRMSR 31: RDTSC @@ -378,56 +378,56 @@ Referrer: 2-byte escape 4e: CMOVLE/NG Gv,Ev 4f: CMOVNLE/G Gv,Ev # 0x0f 0x50-0x5f -50: -51: -52: -53: -54: -55: -56: -57: -58: -59: -5a: -5b: -5c: -5d: -5e: -5f: +50: movmskps Gd/q,Ups | movmskpd Gd/q,Upd (66) +51: sqrtps Vps,Wps | sqrtss Vss,Wss (F3) | sqrtpd Vpd,Wpd (66) | sqrtsd Vsd,Wsd (F2) +52: rsqrtps Vps,Wps | rsqrtss Vss,Wss (F3) +53: rcpps Vps,Wps | rcpss Vss,Wss (F3) +54: andps Vps,Wps | andpd Vpd,Wpd (66) +55: andnps Vps,Wps | andnpd Vpd,Wpd (66) +56: orps Vps,Wps | orpd Vpd,Wpd (66) +57: xorps Vps,Wps | xorpd Vpd,Wpd (66) +58: addps Vps,Wps | addss Vss,Wss (F3) | addpd Vpd,Wpd (66) | addsd Vsd,Wsd (F2) +59: mulps Vps,Wps | mulss Vss,Wss (F3) | mulpd Vpd,Wpd (66) | mulsd Vsd,Wsd (F2) +5a: cvtps2pd Vpd,Wps | cvtss2sd Vsd,Wss (F3) | cvtpd2ps Vps,Wpd (66) | cvtsd2ss Vsd,Wsd (F2) +5b: cvtdq2ps Vps,Wdq | cvtps2dq Vdq,Wps (66) | cvttps2dq Vdq,Wps (F3) +5c: subps Vps,Wps | subss Vss,Wss (F3) | subpd Vpd,Wpd (66) | subsd Vsd,Wsd (F2) +5d: minps Vps,Wps | minss Vss,Wss (F3) | minpd Vpd,Wpd (66) | minsd Vsd,Wsd (F2) +5e: divps Vps,Wps | divss Vss,Wss (F3) | divpd Vpd,Wpd (66) | divsd Vsd,Wsd (F2) +5f: maxps Vps,Wps | maxss Vss,Wss (F3) | maxpd Vpd,Wpd (66) | maxsd Vsd,Wsd (F2) # 0x0f 0x60-0x6f -60: -61: -62: -63: -64: -65: -66: -67: -68: -69: -6a: -6b: -6c: -6d: -6e: -6f: +60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66) +61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66) +62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66) +63: packsswb Pq,Qq | packsswb Vdq,Wdq (66) +64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66) +65: pcmpgtw Pq,Qq | pcmpgtw(66) Vdq,Wdq +66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66) +67: packuswb Pq,Qq | packuswb(66) Vdq,Wdq +68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66) +69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66) +6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66) +6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66) +6c: punpcklqdq Vdq,Wdq (66) +6d: punpckhqdq Vdq,Wdq (66) +6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66) +6f: movq Pq,Qq | movdqa Vdq,Wdq (66) | movdqu Vdq,Wdq (F3) # 0x0f 0x70-0x7f -70: +70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66) | pshufhw Vdq,Wdq,Ib (F3) | pshuflw VdqWdq,Ib (F2) 71: Grp12 (1A) 72: Grp13 (1A) 73: Grp14 (1A) -74: -75: -76: -77: +74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66) +75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66) +76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66) +77: emms 78: VMREAD Ed/q,Gd/q 79: VMWRITE Gd/q,Ed/q 7a: 7b: -7c: -7d: -7e: -7f: +7c: haddps(F2) Vps,Wps | haddpd(66) Vpd,Wpd +7d: hsubps(F2) Vps,Wps | hsubpd(66) Vpd,Wpd +7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66) | movq Vq,Wq (F3) +7f: movq Qq,Pq | movdqa Wdq,Vdq (66) | movdqu Wdq,Vdq (F3) # 0x0f 0x80-0x8f 80: JO Jz (f64) 81: JNO Jz (f64) @@ -499,11 +499,11 @@ bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf c0: XADD Eb,Gb c1: XADD Ev,Gv -c2: +c2: cmpps Vps,Wps,Ib | cmpss Vss,Wss,Ib (F3) | cmppd Vpd,Wpd,Ib (66) | cmpsd Vsd,Wsd,Ib (F2) c3: movnti Md/q,Gd/q -c4: -c5: -c6: +c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66) +c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66) +c6: shufps Vps,Wps,Ib | shufpd Vpd,Wpd,Ib (66) c7: Grp9 (1A) c8: BSWAP RAX/EAX/R8/R8D c9: BSWAP RCX/ECX/R9/R9D @@ -514,60 +514,131 @@ cd: BSWAP RBP/EBP/R13/R13D ce: BSWAP RSI/ESI/R14/R14D cf: BSWAP RDI/EDI/R15/R15D # 0x0f 0xd0-0xdf -d0: -d1: -d2: -d3: -d4: -d5: -d6: -d7: -d8: -d9: -da: -db: -dc: -dd: -de: -df: +d0: addsubps Vps,Wps (F2) | addsubpd Vpd,Wpd (66) +d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66) +d2: psrld Pq,Qq | psrld Vdq,Wdq (66) +d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66) +d4: paddq Pq,Qq | paddq Vdq,Wdq (66) +d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66) +d6: movq Wq,Vq (66) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66) +d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66) +d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66) +da: pminub Pq,Qq | pminub Vdq,Wdq (66) +db: pand Pq,Qq | pand Vdq,Wdq (66) +dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66) +dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66) +de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66) +df: pandn Pq,Qq | pandn Vdq,Wdq (66) # 0x0f 0xe0-0xef -e0: -e1: -e2: -e3: -e4: -e5: -e6: -e7: -e8: -e9: -ea: -eb: -ec: -ed: -ee: -ef: +e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66) +e1: psraw Pq,Qq | psraw Vdq,Wdq (66) +e2: psrad Pq,Qq | psrad Vdq,Wdq (66) +e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66) +e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66) +e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66) +e6: cvtpd2dq Vdq,Wpd (F2) | cvttpd2dq Vdq,Wpd (66) | cvtdq2pd Vpd,Wdq (F3) +e7: movntq Mq,Pq | movntdq Mdq,Vdq (66) +e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66) +e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66) +ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66) +eb: por Pq,Qq | por Vdq,Wdq (66) +ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66) +ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66) +ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66) +ef: pxor Pq,Qq | pxor Vdq,Wdq (66) # 0x0f 0xf0-0xff -f0: -f1: -f2: -f3: -f4: -f5: -f6: -f7: -f8: -f9: -fa: -fb: -fc: -fd: -fe: +f0: lddqu Vdq,Mdq (F2) +f1: psllw Pq,Qq | psllw Vdq,Wdq (66) +f2: pslld Pq,Qq | pslld Vdq,Wdq (66) +f3: psllq Pq,Qq | psllq Vdq,Wdq (66) +f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66) +f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66) +f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66) +f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66) +f8: psubb Pq,Qq | psubb Vdq,Wdq (66) +f9: psubw Pq,Qq | psubw Vdq,Wdq (66) +fa: psubd Pq,Qq | psubd Vdq,Wdq (66) +fb: psubq Pq,Qq | psubq Vdq,Wdq (66) +fc: paddb Pq,Qq | paddb Vdq,Wdq (66) +fd: paddw Pq,Qq | paddw Vdq,Wdq (66) +fe: paddd Pq,Qq | paddd Vdq,Wdq (66) ff: EndTable Table: 3-byte opcode 1 Referrer: 3-byte escape 1 +# 0x0f 0x38 0x00-0x0f +00: pshufb Pq,Qq | pshufb Vdq,Wdq (66) +01: phaddw Pq,Qq | phaddw Vdq,Wdq (66) +02: phaddd Pq,Qq | phaddd Vdq,Wdq (66) +03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66) +04: pmaddubsw Pq,Qq | pmaddubsw (66)Vdq,Wdq +05: phsubw Pq,Qq | phsubw Vdq,Wdq (66) +06: phsubd Pq,Qq | phsubd Vdq,Wdq (66) +07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66) +08: psignb Pq,Qq | psignb Vdq,Wdq (66) +09: psignw Pq,Qq | psignw Vdq,Wdq (66) +0a: psignd Pq,Qq | psignd Vdq,Wdq (66) +0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66) +0c: +0d: +0e: +0f: +# 0x0f 0x38 0x10-0x1f +10: pblendvb Vdq,Wdq (66) +11: +12: +13: +14: blendvps Vdq,Wdq (66) +15: blendvpd Vdq,Wdq (66) +16: +17: ptest Vdq,Wdq (66) +18: +19: +1a: +1b: +1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66) +1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66) +1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66) +1f: +# 0x0f 0x38 0x20-0x2f +20: pmovsxbw Vdq,Udq/Mq (66) +21: pmovsxbd Vdq,Udq/Md (66) +22: pmovsxbq Vdq,Udq/Mw (66) +23: pmovsxwd Vdq,Udq/Mq (66) +24: pmovsxwq Vdq,Udq/Md (66) +25: pmovsxdq Vdq,Udq/Mq (66) +26: +27: +28: pmuldq Vdq,Wdq (66) +29: pcmpeqq Vdq,Wdq (66) +2a: movntdqa Vdq,Mdq (66) +2b: packusdw Vdq,Wdq (66) +2c: +2d: +2e: +2f: +# 0x0f 0x38 0x30-0x3f +30: pmovzxbw Vdq,Udq/Mq (66) +31: pmovzxbd Vdq,Udq/Md (66) +32: pmovzxbq Vdq,Udq/Mw (66) +33: pmovzxwd Vdq,Udq/Mq (66) +34: pmovzxwq Vdq,Udq/Md (66) +35: pmovzxdq Vdq,Udq/Mq (66) +36: +37: pcmpgtq Vdq,Wdq (66) +38: pminsb Vdq,Wdq (66) +39: pminsd Vdq,Wdq (66) +3a: pminuw Vdq,Wdq (66) +3b: pminud Vdq,Wdq (66) +3c: pmaxsb Vdq,Wdq (66) +3d: pmaxsd Vdq,Wdq (66) +3e: pmaxuw Vdq,Wdq (66) +3f: pmaxud Vdq,Wdq (66) +# 0x0f 0x38 0x4f-0xff +40: pmulld Vdq,Wdq (66) +41: phminposuw Vdq,Wdq (66) 80: INVEPT Gd/q,Mdq (66) 81: INVPID Gd/q,Mdq (66) f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) @@ -576,7 +647,29 @@ EndTable Table: 3-byte opcode 2 Referrer: 3-byte escape 2 -# all opcode is for SSE +# 0x0f 0x3a 0x00-0xff +08: roundps Vdq,Wdq,Ib (66) +09: roundpd Vdq,Wdq,Ib (66) +0a: roundss Vss,Wss,Ib (66) +0b: roundsd Vsd,Wsd,Ib (66) +0c: blendps Vdq,Wdq,Ib (66) +0d: blendpd Vdq,Wdq,Ib (66) +0e: pblendw Vdq,Wdq,Ib (66) +0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66) +14: pextrb Rd/Mb,Vdq,Ib (66) +15: pextrw Rd/Mw,Vdq,Ib (66) +16: pextrd/pextrq Ed/q,Vdq,Ib (66) +17: extractps Ed,Vdq,Ib (66) +20: pinsrb Vdq,Rd/q/Mb,Ib (66) +21: insertps Vdq,Udq/Md,Ib (66) +22: pinsrd/pinsrq Vdq,Ed/q,Ib (66) +40: dpps Vdq,Wdq,Ib (66) +41: dppd Vdq,Wdq,Ib (66) +42: mpsadbw Vdq,Wdq,Ib (66) +60: pcmpestrm Vdq,Wdq,Ib (66) +61: pcmpestri Vdq,Wdq,Ib (66) +62: pcmpistrm Vdq,Wdq,Ib (66) +63: pcmpistri Vdq,Wdq,Ib (66) EndTable GrpTable: Grp1 From a00e817f42663941ea0aa5f85a9d1c4f8b212839 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 8 Sep 2009 12:47:55 -0400 Subject: [PATCH 057/470] kprobes/x86-32: Move irq-exit functions to kprobes section Move irq-exit functions to .kprobes.text section to protect against kprobes recursion. When I ran kprobe stress test on x86-32, I found below symbols cause unrecoverable recursive probing: ret_from_exception ret_from_intr check_userspace restore_all restore_all_notrace restore_nocheck irq_return And also, I found some interrupt/exception entry points that cause similar problems. This patch moves those symbols (including their container functions) to .kprobes.text section to prevent any kprobes probing. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090908164755.24050.81182.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/entry_32.S | 24 ++++++++++++++++++++++++ kernel/kprobes.c | 2 ++ 2 files changed, 26 insertions(+) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d607c..beb30da203d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -333,6 +333,10 @@ ENTRY(ret_from_fork) CFI_ENDPROC END(ret_from_fork) +/* + * Interrupt exit functions should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to @@ -383,6 +387,10 @@ need_resched: END(resume_kernel) #endif CFI_ENDPROC +/* + * End of kprobes section + */ + .popsection /* SYSENTER_RETURN points to after the "sysenter" instruction in the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ @@ -513,6 +521,10 @@ sysexit_audit: PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) +/* + * syscall stub including irq exit should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" # system call handler stub ENTRY(system_call) RING0_INT_FRAME # can't unwind into user space anyway @@ -705,6 +717,10 @@ syscall_badsys: jmp resume_userspace END(syscall_badsys) CFI_ENDPROC +/* + * End of kprobes section + */ + .popsection /* * System calls that need a pt_regs pointer. @@ -814,6 +830,10 @@ common_interrupt: ENDPROC(common_interrupt) CFI_ENDPROC +/* + * Irq entries should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ @@ -980,6 +1000,10 @@ ENTRY(spurious_interrupt_bug) jmp error_code CFI_ENDPROC END(spurious_interrupt_bug) +/* + * End of kprobes section + */ + .popsection ENTRY(kernel_thread_helper) pushl $0 # fake return address for unwinder diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3267d90bc9d..00d01b0f9fe 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -91,6 +91,8 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) static struct kprobe_blackpoint kprobe_blacklist[] = { {"preempt_schedule",}, {"native_get_debugreg",}, + {"irq_entries_start",}, + {"common_interrupt",}, {NULL} /* Terminator */ }; From ad5cafcdb09c57008c990edd309c0a563b09f238 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:06 -0400 Subject: [PATCH 058/470] x86/ptrace: Fix regs_get_argument_nth() to add correct offset Fix regs_get_argument_nth() to add correct offset bytes. Because offset_of() returns offset in byte, the offset should be added to char * instead of unsigned long *. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235306.22412.31613.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index a33a17d5d5c..caffb680945 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -150,7 +150,7 @@ static const int arg_offs_table[] = { unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) { if (n < ARRAY_SIZE(arg_offs_table)) - return *((unsigned long *)regs + arg_offs_table[n]); + return *(unsigned long *)((char *)regs + arg_offs_table[n]); else { /* * The typical case: arg n is on the stack. From 2fba0c8867af47f6455490e7b59e512dd180c027 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:14 -0400 Subject: [PATCH 059/470] tracing/kprobes: Fix probe offset to be unsigned Prohibit user to specify negative offset from symbols. Since kprobe.offset is unsigned int, the offset must be always positive value. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235314.22412.64631.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 14 +++++++------- kernel/trace/trace_kprobe.c | 19 +++++++------------ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 3de75174716..db553186564 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -25,15 +25,15 @@ probe events via /sys/kernel/debug/tracing/events/kprobes//filter. Synopsis of kprobe_events ------------------------- - p[:EVENT] SYMBOL[+offs|-offs]|MEMADDR [FETCHARGS] : Set a probe - r[:EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe + p[:EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe + r[:EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe - EVENT : Event name. If omitted, the event name is generated - based on SYMBOL+offs or MEMADDR. - SYMBOL[+offs|-offs] : Symbol+offset where the probe is inserted. - MEMADDR : Address where the probe is inserted. + EVENT : Event name. If omitted, the event name is generated + based on SYMBOL+offs or MEMADDR. + SYMBOL[+offs] : Symbol+offset where the probe is inserted. + MEMADDR : Address where the probe is inserted. - FETCHARGS : Arguments. Each probe can have up to 128 args. + FETCHARGS : Arguments. Each probe can have up to 128 args. %REG : Fetch register REG sN : Fetch Nth entry of stack (N >= 0) sa : Fetch stack address. diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 19a6de63b44..c24b7e9d97c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -210,7 +210,7 @@ static __kprobes const char *probe_symbol(struct trace_probe *tp) return tp->symbol ? tp->symbol : "unknown"; } -static __kprobes long probe_offset(struct trace_probe *tp) +static __kprobes unsigned int probe_offset(struct trace_probe *tp) { return (probe_is_return(tp)) ? tp->rp.kp.offset : tp->kp.offset; } @@ -380,7 +380,7 @@ end: } /* Split symbol and offset. */ -static int split_symbol_offset(char *symbol, long *offset) +static int split_symbol_offset(char *symbol, unsigned long *offset) { char *tmp; int ret; @@ -389,16 +389,11 @@ static int split_symbol_offset(char *symbol, long *offset) return -EINVAL; tmp = strchr(symbol, '+'); - if (!tmp) - tmp = strchr(symbol, '-'); - if (tmp) { /* skip sign because strict_strtol doesn't accept '+' */ - ret = strict_strtol(tmp + 1, 0, offset); + ret = strict_strtoul(tmp + 1, 0, offset); if (ret) return ret; - if (*tmp == '-') - *offset = -(*offset); *tmp = '\0'; } else *offset = 0; @@ -520,7 +515,7 @@ static int create_trace_probe(int argc, char **argv) { /* * Argument syntax: - * - Add kprobe: p[:EVENT] SYMBOL[+OFFS|-OFFS]|ADDRESS [FETCHARGS] + * - Add kprobe: p[:EVENT] SYMBOL[+OFFS]|ADDRESS [FETCHARGS] * - Add kretprobe: r[:EVENT] SYMBOL[+0] [FETCHARGS] * Fetch args: * aN : fetch Nth of function argument. (N:0-) @@ -539,7 +534,7 @@ static int create_trace_probe(int argc, char **argv) int i, ret = 0; int is_return = 0; char *symbol = NULL, *event = NULL; - long offset = 0; + unsigned long offset = 0; void *addr = NULL; if (argc < 2) @@ -605,7 +600,7 @@ static int create_trace_probe(int argc, char **argv) if (tp->symbol) { kp->symbol_name = tp->symbol; - kp->offset = offset; + kp->offset = (unsigned int)offset; } else kp->addr = addr; @@ -675,7 +670,7 @@ static int probes_seq_show(struct seq_file *m, void *v) seq_printf(m, ":%s", tp->call.name); if (tp->symbol) - seq_printf(m, " %s%+ld", probe_symbol(tp), probe_offset(tp)); + seq_printf(m, " %s+%u", probe_symbol(tp), probe_offset(tp)); else seq_printf(m, " 0x%p", probe_address(tp)); From 4a846b443b4e8633057946a2234e23559a67ce42 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 11 Sep 2009 05:31:21 +0200 Subject: [PATCH 060/470] tracing/kprobes: Cleanup kprobe tracer code. Simplify trace_probe to remove a union, and remove some redundant wrappers. And also, cleanup create_trace_probe() function. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235322.22412.52525.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 81 ++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 47 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index c24b7e9d97c..4ce728ca1b1 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -180,10 +180,7 @@ static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) struct trace_probe { struct list_head list; - union { - struct kprobe kp; - struct kretprobe rp; - }; + struct kretprobe rp; /* Use rp.kp for kprobe use */ unsigned long nhit; const char *symbol; /* symbol name */ struct ftrace_event_call call; @@ -202,7 +199,7 @@ static int kretprobe_trace_func(struct kretprobe_instance *ri, static __kprobes int probe_is_return(struct trace_probe *tp) { - return (tp->rp.handler == kretprobe_trace_func); + return tp->rp.handler != NULL; } static __kprobes const char *probe_symbol(struct trace_probe *tp) @@ -210,16 +207,6 @@ static __kprobes const char *probe_symbol(struct trace_probe *tp) return tp->symbol ? tp->symbol : "unknown"; } -static __kprobes unsigned int probe_offset(struct trace_probe *tp) -{ - return (probe_is_return(tp)) ? tp->rp.kp.offset : tp->kp.offset; -} - -static __kprobes void *probe_address(struct trace_probe *tp) -{ - return (probe_is_return(tp)) ? tp->rp.kp.addr : tp->kp.addr; -} - static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) { int ret = -EINVAL; @@ -269,8 +256,14 @@ static void unregister_probe_event(struct trace_probe *tp); static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); -static struct trace_probe *alloc_trace_probe(const char *symbol, - const char *event, int nargs) +/* + * Allocate new trace_probe and initialize it (including kprobes). + */ +static struct trace_probe *alloc_trace_probe(const char *event, + void *addr, + const char *symbol, + unsigned long offs, + int nargs, int is_return) { struct trace_probe *tp; @@ -282,7 +275,16 @@ static struct trace_probe *alloc_trace_probe(const char *symbol, tp->symbol = kstrdup(symbol, GFP_KERNEL); if (!tp->symbol) goto error; - } + tp->rp.kp.symbol_name = tp->symbol; + tp->rp.kp.offset = offs; + } else + tp->rp.kp.addr = addr; + + if (is_return) + tp->rp.handler = kretprobe_trace_func; + else + tp->rp.kp.pre_handler = kprobe_trace_func; + if (!event) goto error; tp->call.name = kstrdup(event, GFP_KERNEL); @@ -327,7 +329,7 @@ static void __unregister_trace_probe(struct trace_probe *tp) if (probe_is_return(tp)) unregister_kretprobe(&tp->rp); else - unregister_kprobe(&tp->kp); + unregister_kprobe(&tp->rp.kp); } /* Unregister a trace_probe and probe_event: call with locking probe_lock */ @@ -349,14 +351,14 @@ static int register_trace_probe(struct trace_probe *tp) if (probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else - ret = register_kprobe(&tp->kp); + ret = register_kprobe(&tp->rp.kp); if (ret) { pr_warning("Could not insert probe(%d)\n", ret); if (ret == -EILSEQ) { pr_warning("Probing address(0x%p) is not an " "instruction boundary.\n", - probe_address(tp)); + tp->rp.kp.addr); ret = -EINVAL; } goto end; @@ -530,12 +532,12 @@ static int create_trace_probe(int argc, char **argv) * +|-offs(ARG) : fetch memory at ARG +|- offs address. */ struct trace_probe *tp; - struct kprobe *kp; int i, ret = 0; int is_return = 0; char *symbol = NULL, *event = NULL; unsigned long offset = 0; void *addr = NULL; + char buf[MAX_EVENT_NAME_LEN]; if (argc < 2) return -EINVAL; @@ -577,33 +579,18 @@ static int create_trace_probe(int argc, char **argv) /* setup a probe */ if (!event) { /* Make a new event name */ - char buf[MAX_EVENT_NAME_LEN]; if (symbol) snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", is_return ? 'r' : 'p', symbol, offset); else snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", is_return ? 'r' : 'p', addr); - tp = alloc_trace_probe(symbol, buf, argc); - } else - tp = alloc_trace_probe(symbol, event, argc); + event = buf; + } + tp = alloc_trace_probe(event, addr, symbol, offset, argc, is_return); if (IS_ERR(tp)) return PTR_ERR(tp); - if (is_return) { - kp = &tp->rp.kp; - tp->rp.handler = kretprobe_trace_func; - } else { - kp = &tp->kp; - tp->kp.pre_handler = kprobe_trace_func; - } - - if (tp->symbol) { - kp->symbol_name = tp->symbol; - kp->offset = (unsigned int)offset; - } else - kp->addr = addr; - /* parse arguments */ ret = 0; for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { @@ -670,9 +657,9 @@ static int probes_seq_show(struct seq_file *m, void *v) seq_printf(m, ":%s", tp->call.name); if (tp->symbol) - seq_printf(m, " %s+%u", probe_symbol(tp), probe_offset(tp)); + seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); else - seq_printf(m, " 0x%p", probe_address(tp)); + seq_printf(m, " 0x%p", tp->rp.kp.addr); for (i = 0; i < tp->nr_args; i++) { ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); @@ -783,7 +770,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) struct trace_probe *tp = v; seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit, - probe_is_return(tp) ? tp->rp.kp.nmissed : tp->kp.nmissed); + tp->rp.kp.nmissed); return 0; } @@ -811,7 +798,7 @@ static const struct file_operations kprobe_profile_ops = { /* Kprobe handler */ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) { - struct trace_probe *tp = container_of(kp, struct trace_probe, kp); + struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct kprobe_trace_entry *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -866,7 +853,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, entry = ring_buffer_event_data(event); entry->nargs = tp->nr_args; - entry->func = (unsigned long)probe_address(tp); + entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i], regs); @@ -945,7 +932,7 @@ static int probe_event_enable(struct ftrace_event_call *call) if (probe_is_return(tp)) return enable_kretprobe(&tp->rp); else - return enable_kprobe(&tp->kp); + return enable_kprobe(&tp->rp.kp); } static void probe_event_disable(struct ftrace_event_call *call) @@ -955,7 +942,7 @@ static void probe_event_disable(struct ftrace_event_call *call) if (probe_is_return(tp)) disable_kretprobe(&tp->rp); else - disable_kprobe(&tp->kp); + disable_kprobe(&tp->rp.kp); } static int probe_event_raw_init(struct ftrace_event_call *event_call) From e08d1c657f70bcaca11401cd6ac5c8fe59bd2bb7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:30 -0400 Subject: [PATCH 061/470] tracing/kprobes: Add event profiling support Add *probe_profile_enable/disable to support kprobes raw events sampling from perf counters, like other ftrace events, when CONFIG_PROFILE_EVENT=y. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235329.22412.94731.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 4 +- kernel/trace/trace_kprobe.c | 110 +++++++++++++++++++++++++++- 2 files changed, 111 insertions(+), 3 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index db553186564..8f882ebd136 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -62,13 +62,15 @@ enabled: You can enable/disable the probe by writing 1 or 0 on it. format: - It shows the format of this probe event. It also shows aliases of arguments + This shows the format of this probe event. It also shows aliases of arguments which you specified to kprobe_events. filter: You can write filtering rules of this event. And you can use both of aliase names and field names for describing filters. +id: + This shows the id of this probe event. Event Profiling --------------- diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4ce728ca1b1..730e992d28d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "trace.h" #include "trace_output.h" @@ -280,6 +281,7 @@ static struct trace_probe *alloc_trace_probe(const char *event, } else tp->rp.kp.addr = addr; + /* Set handler here for checking whether this probe is return or not. */ if (is_return) tp->rp.handler = kretprobe_trace_func; else @@ -929,10 +931,13 @@ static int probe_event_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; - if (probe_is_return(tp)) + if (probe_is_return(tp)) { + tp->rp.handler = kretprobe_trace_func; return enable_kretprobe(&tp->rp); - else + } else { + tp->rp.kp.pre_handler = kprobe_trace_func; return enable_kprobe(&tp->rp.kp); + } } static void probe_event_disable(struct ftrace_event_call *call) @@ -1105,6 +1110,101 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, "func, ret_ip"); } +#ifdef CONFIG_EVENT_PROFILE + +/* Kprobe profile handler */ +static __kprobes int kprobe_profile_func(struct kprobe *kp, + struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); + struct ftrace_event_call *call = &tp->call; + struct kprobe_trace_entry *entry; + int size, i, pc; + unsigned long irq_flags; + + local_save_flags(irq_flags); + pc = preempt_count(); + + size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); + + do { + char raw_data[size]; + struct trace_entry *ent; + + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + entry = (struct kprobe_trace_entry *)raw_data; + ent = &entry->ent; + + tracing_generic_entry_update(ent, irq_flags, pc); + ent->type = call->id; + entry->nargs = tp->nr_args; + entry->ip = (unsigned long)kp->addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i], regs); + perf_tpcounter_event(call->id, entry->ip, 1, entry, size); + } while (0); + return 0; +} + +/* Kretprobe profile handler */ +static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); + struct ftrace_event_call *call = &tp->call; + struct kretprobe_trace_entry *entry; + int size, i, pc; + unsigned long irq_flags; + + local_save_flags(irq_flags); + pc = preempt_count(); + + size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); + + do { + char raw_data[size]; + struct trace_entry *ent; + + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + entry = (struct kretprobe_trace_entry *)raw_data; + ent = &entry->ent; + + tracing_generic_entry_update(ent, irq_flags, pc); + ent->type = call->id; + entry->nargs = tp->nr_args; + entry->func = (unsigned long)tp->rp.kp.addr; + entry->ret_ip = (unsigned long)ri->ret_addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i], regs); + perf_tpcounter_event(call->id, entry->ret_ip, 1, entry, size); + } while (0); + return 0; +} + +static int probe_profile_enable(struct ftrace_event_call *call) +{ + struct trace_probe *tp = (struct trace_probe *)call->data; + + if (atomic_inc_return(&call->profile_count)) + return 0; + + if (probe_is_return(tp)) { + tp->rp.handler = kretprobe_profile_func; + return enable_kretprobe(&tp->rp); + } else { + tp->rp.kp.pre_handler = kprobe_profile_func; + return enable_kprobe(&tp->rp.kp); + } +} + +static void probe_profile_disable(struct ftrace_event_call *call) +{ + if (atomic_add_negative(-1, &call->profile_count)) + probe_event_disable(call); +} + +#endif /* CONFIG_EVENT_PROFILE */ + static int register_probe_event(struct trace_probe *tp) { struct ftrace_event_call *call = &tp->call; @@ -1130,6 +1230,12 @@ static int register_probe_event(struct trace_probe *tp) call->enabled = 1; call->regfunc = probe_event_enable; call->unregfunc = probe_event_disable; + +#ifdef CONFIG_EVENT_PROFILE + atomic_set(&call->profile_count, -1); + call->profile_enable = probe_profile_enable; + call->profile_disable = probe_profile_disable; +#endif call->data = tp; ret = trace_add_event_call(call); if (ret) { From eca0d916f6429785bbc88db3ff66631cde62b432 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:38 -0400 Subject: [PATCH 062/470] tracing/kprobes: Add argument name support Add argument name assignment support and remove "alias" lines from format. This allows user to assign unique name to each argument. For example, $ echo p do_sys_open dfd=a0 filename=a1 flags=a2 mode=a3 > kprobe_events This assigns dfd, filename, flags, and mode to 1st - 4th arguments respectively. Trace buffer shows those names too. <...>-1439 [000] 1200885.933147: do_sys_open+0x0/0xdf: dfd=ffffff9c filename=bfa898ac flags=8000 mode=0 This helps users to know what each value means. Users can filter each events by these names too. Note that you can not filter by argN anymore. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235337.22412.77383.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 46 +++++----- kernel/trace/trace_kprobe.c | 128 ++++++++++++++-------------- 2 files changed, 84 insertions(+), 90 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 8f882ebd136..aaa6c1067c7 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -42,7 +42,8 @@ Synopsis of kprobe_events aN : Fetch function argument. (N >= 0)(*) rv : Fetch return value.(**) ra : Fetch return address.(**) - +|-offs(FETCHARG) : fetch memory at FETCHARG +|- offs address.(***) + +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) + NAME=FETCHARG: Set NAME as the argument name of FETCHARG. (*) aN may not correct on asmlinkaged functions and at the middle of function body. @@ -62,12 +63,10 @@ enabled: You can enable/disable the probe by writing 1 or 0 on it. format: - This shows the format of this probe event. It also shows aliases of arguments - which you specified to kprobe_events. + This shows the format of this probe event. filter: - You can write filtering rules of this event. And you can use both of aliase - names and field names for describing filters. + You can write filtering rules of this event. id: This shows the id of this probe event. @@ -85,10 +84,11 @@ Usage examples To add a probe as a new event, write a new definition to kprobe_events as below. - echo p:myprobe do_sys_open a0 a1 a2 a3 > /sys/kernel/debug/tracing/kprobe_events + echo p:myprobe do_sys_open dfd=a0 filename=a1 flags=a2 mode=a3 > /sys/kernel/debug/tracing/kprobe_events This sets a kprobe on the top of do_sys_open() function with recording -1st to 4th arguments as "myprobe" event. +1st to 4th arguments as "myprobe" event. As this example shows, users can +choose more familiar names for each arguments. echo r:myretprobe do_sys_open rv ra >> /sys/kernel/debug/tracing/kprobe_events @@ -99,7 +99,7 @@ recording return value and return address as "myretprobe" event. cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format name: myprobe -ID: 23 +ID: 75 format: field:unsigned short common_type; offset:0; size:2; field:unsigned char common_flags; offset:2; size:1; @@ -109,21 +109,15 @@ format: field: unsigned long ip; offset:16;tsize:8; field: int nargs; offset:24;tsize:4; - field: unsigned long arg0; offset:32;tsize:8; - field: unsigned long arg1; offset:40;tsize:8; - field: unsigned long arg2; offset:48;tsize:8; - field: unsigned long arg3; offset:56;tsize:8; + field: unsigned long dfd; offset:32;tsize:8; + field: unsigned long filename; offset:40;tsize:8; + field: unsigned long flags; offset:48;tsize:8; + field: unsigned long mode; offset:56;tsize:8; - alias: a0; original: arg0; - alias: a1; original: arg1; - alias: a2; original: arg2; - alias: a3; original: arg3; - -print fmt: "%lx: 0x%lx 0x%lx 0x%lx 0x%lx", ip, arg0, arg1, arg2, arg3 +print fmt: "%lx: dfd=%lx filename=%lx flags=%lx mode=%lx", ip, REC->dfd, REC->filename, REC->flags, REC->mode - You can see that the event has 4 arguments and alias expressions -corresponding to it. + You can see that the event has 4 arguments as in the expressions you specified. echo > /sys/kernel/debug/tracing/kprobe_events @@ -135,12 +129,12 @@ corresponding to it. # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | - <...>-1447 [001] 1038282.286875: do_sys_open+0x0/0xd6: 0x3 0x7fffd1ec4440 0x8000 0x0 - <...>-1447 [001] 1038282.286878: sys_openat+0xc/0xe <- do_sys_open: 0xfffffffffffffffe 0xffffffff81367a3a - <...>-1447 [001] 1038282.286885: do_sys_open+0x0/0xd6: 0xffffff9c 0x40413c 0x8000 0x1b6 - <...>-1447 [001] 1038282.286915: sys_open+0x1b/0x1d <- do_sys_open: 0x3 0xffffffff81367a3a - <...>-1447 [001] 1038282.286969: do_sys_open+0x0/0xd6: 0xffffff9c 0x4041c6 0x98800 0x10 - <...>-1447 [001] 1038282.286976: sys_open+0x1b/0x1d <- do_sys_open: 0x3 0xffffffff81367a3a + <...>-1447 [001] 1038282.286875: do_sys_open+0x0/0xd6: dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 + <...>-1447 [001] 1038282.286878: sys_openat+0xc/0xe <- do_sys_open: rv=fffffffffffffffe ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286885: do_sys_open+0x0/0xd6: dfd=ffffff9c filename=40413c flags=8000 mode=1b6 + <...>-1447 [001] 1038282.286915: sys_open+0x1b/0x1d <- do_sys_open: rv=3 ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286969: do_sys_open+0x0/0xd6: dfd=ffffff9c filename=4041c6 flags=98800 mode=10 + <...>-1447 [001] 1038282.286976: sys_open+0x1b/0x1d <- do_sys_open: rv=3 ra=ffffffff81367a3a Each line shows when the kernel hits a probe, and <- SYMBOL means kernel diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 730e992d28d..44dad1aa95d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -176,9 +176,14 @@ static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) } /** - * kprobe_trace_core + * Kprobe tracer core functions */ +struct probe_arg { + struct fetch_func fetch; + const char *name; +}; + struct trace_probe { struct list_head list; struct kretprobe rp; /* Use rp.kp for kprobe use */ @@ -187,12 +192,12 @@ struct trace_probe { struct ftrace_event_call call; struct trace_event event; unsigned int nr_args; - struct fetch_func args[]; + struct probe_arg args[]; }; #define SIZEOF_TRACE_PROBE(n) \ (offsetof(struct trace_probe, args) + \ - (sizeof(struct fetch_func) * (n))) + (sizeof(struct probe_arg) * (n))) static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs); static int kretprobe_trace_func(struct kretprobe_instance *ri, @@ -301,15 +306,21 @@ error: return ERR_PTR(-ENOMEM); } +static void free_probe_arg(struct probe_arg *arg) +{ + if (arg->fetch.func == fetch_symbol) + free_symbol_cache(arg->fetch.data); + else if (arg->fetch.func == fetch_indirect) + free_indirect_fetch_data(arg->fetch.data); + kfree(arg->name); +} + static void free_trace_probe(struct trace_probe *tp) { int i; for (i = 0; i < tp->nr_args; i++) - if (tp->args[i].func == fetch_symbol) - free_symbol_cache(tp->args[i].data); - else if (tp->args[i].func == fetch_indirect) - free_indirect_fetch_data(tp->args[i].data); + free_probe_arg(&tp->args[i]); kfree(tp->call.name); kfree(tp->symbol); @@ -532,11 +543,13 @@ static int create_trace_probe(int argc, char **argv) * %REG : fetch register REG * Indirect memory fetch: * +|-offs(ARG) : fetch memory at ARG +|- offs address. + * Alias name of args: + * NAME=FETCHARG : set NAME as alias of FETCHARG. */ struct trace_probe *tp; int i, ret = 0; int is_return = 0; - char *symbol = NULL, *event = NULL; + char *symbol = NULL, *event = NULL, *arg = NULL; unsigned long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; @@ -596,12 +609,21 @@ static int create_trace_probe(int argc, char **argv) /* parse arguments */ ret = 0; for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { - if (strlen(argv[i]) > MAX_ARGSTR_LEN) { - pr_info("Argument%d(%s) is too long.\n", i, argv[i]); + /* Parse argument name */ + arg = strchr(argv[i], '='); + if (arg) + *arg++ = '\0'; + else + arg = argv[i]; + tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); + + /* Parse fetch argument */ + if (strlen(arg) > MAX_ARGSTR_LEN) { + pr_info("Argument%d(%s) is too long.\n", i, arg); ret = -ENOSPC; goto error; } - ret = parse_probe_arg(argv[i], &tp->args[i], is_return); + ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); if (ret) goto error; } @@ -664,12 +686,12 @@ static int probes_seq_show(struct seq_file *m, void *v) seq_printf(m, " 0x%p", tp->rp.kp.addr); for (i = 0; i < tp->nr_args; i++) { - ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); + ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); if (ret < 0) { pr_warning("Argument%d decoding error(%d).\n", i, ret); return ret; } - seq_printf(m, " %s", buf); + seq_printf(m, " %s=%s", tp->args[i].name, buf); } seq_printf(m, "\n"); return 0; @@ -824,7 +846,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) entry->nargs = tp->nr_args; entry->ip = (unsigned long)kp->addr; for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i], regs); + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); @@ -858,7 +880,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i], regs); + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); if (!filter_current_check_discard(buffer, call, entry, event)) trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); @@ -872,9 +894,13 @@ print_kprobe_event(struct trace_iterator *iter, int flags) { struct kprobe_trace_entry *field; struct trace_seq *s = &iter->seq; + struct trace_event *event; + struct trace_probe *tp; int i; field = (struct kprobe_trace_entry *)iter->ent; + event = ftrace_find_event(field->ent.type); + tp = container_of(event, struct trace_probe, event); if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; @@ -883,7 +909,8 @@ print_kprobe_event(struct trace_iterator *iter, int flags) goto partial; for (i = 0; i < field->nargs; i++) - if (!trace_seq_printf(s, " 0x%lx", field->args[i])) + if (!trace_seq_printf(s, " %s=%lx", + tp->args[i].name, field->args[i])) goto partial; if (!trace_seq_puts(s, "\n")) @@ -899,9 +926,13 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) { struct kretprobe_trace_entry *field; struct trace_seq *s = &iter->seq; + struct trace_event *event; + struct trace_probe *tp; int i; field = (struct kretprobe_trace_entry *)iter->ent; + event = ftrace_find_event(field->ent.type); + tp = container_of(event, struct trace_probe, event); if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; @@ -916,7 +947,8 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) goto partial; for (i = 0; i < field->nargs; i++) - if (!trace_seq_printf(s, " 0x%lx", field->args[i])) + if (!trace_seq_printf(s, " %s=%lx", + tp->args[i].name, field->args[i])) goto partial; if (!trace_seq_puts(s, "\n")) @@ -972,7 +1004,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) { int ret, i; struct kprobe_trace_entry field; - char buf[MAX_ARGSTR_LEN + 1]; struct trace_probe *tp = (struct trace_probe *)event_call->data; ret = trace_define_common_fields(event_call); @@ -981,16 +1012,9 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) DEFINE_FIELD(unsigned long, ip, "ip", 0); DEFINE_FIELD(int, nargs, "nargs", 1); - for (i = 0; i < tp->nr_args; i++) { - /* Set argN as a field */ - sprintf(buf, "arg%d", i); - DEFINE_FIELD(unsigned long, args[i], buf, 0); - /* Set argument string as an alias field */ - ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); - if (ret < 0) - return ret; - DEFINE_FIELD(unsigned long, args[i], buf, 0); - } + /* Set argument names as fields */ + for (i = 0; i < tp->nr_args; i++) + DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); return 0; } @@ -998,7 +1022,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) { int ret, i; struct kretprobe_trace_entry field; - char buf[MAX_ARGSTR_LEN + 1]; struct trace_probe *tp = (struct trace_probe *)event_call->data; ret = trace_define_common_fields(event_call); @@ -1008,16 +1031,9 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) DEFINE_FIELD(unsigned long, func, "func", 0); DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0); DEFINE_FIELD(int, nargs, "nargs", 1); - for (i = 0; i < tp->nr_args; i++) { - /* Set argN as a field */ - sprintf(buf, "arg%d", i); - DEFINE_FIELD(unsigned long, args[i], buf, 0); - /* Set argument string as an alias field */ - ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); - if (ret < 0) - return ret; - DEFINE_FIELD(unsigned long, args[i], buf, 0); - } + /* Set argument names as fields */ + for (i = 0; i < tp->nr_args; i++) + DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); return 0; } @@ -1025,31 +1041,21 @@ static int __probe_event_show_format(struct trace_seq *s, struct trace_probe *tp, const char *fmt, const char *arg) { - int i, ret; - char buf[MAX_ARGSTR_LEN + 1]; + int i; - /* Show aliases */ - for (i = 0; i < tp->nr_args; i++) { - ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); - if (ret < 0) - return ret; - if (!trace_seq_printf(s, "\talias: %s;\toriginal: arg%d;\n", - buf, i)) - return 0; - } /* Show format */ if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) return 0; for (i = 0; i < tp->nr_args; i++) - if (!trace_seq_puts(s, " 0x%lx")) + if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name)) return 0; if (!trace_seq_printf(s, "\", %s", arg)) return 0; for (i = 0; i < tp->nr_args; i++) - if (!trace_seq_printf(s, ", arg%d", i)) + if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name)) return 0; return trace_seq_puts(s, "\n"); @@ -1071,17 +1077,14 @@ static int kprobe_event_show_format(struct ftrace_event_call *call, { struct kprobe_trace_entry field __attribute__((unused)); int ret, i; - char buf[8]; struct trace_probe *tp = (struct trace_probe *)call->data; SHOW_FIELD(unsigned long, ip, "ip"); SHOW_FIELD(int, nargs, "nargs"); /* Show fields */ - for (i = 0; i < tp->nr_args; i++) { - sprintf(buf, "arg%d", i); - SHOW_FIELD(unsigned long, args[i], buf); - } + for (i = 0; i < tp->nr_args; i++) + SHOW_FIELD(unsigned long, args[i], tp->args[i].name); trace_seq_puts(s, "\n"); return __probe_event_show_format(s, tp, "%lx:", "ip"); @@ -1092,7 +1095,6 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, { struct kretprobe_trace_entry field __attribute__((unused)); int ret, i; - char buf[8]; struct trace_probe *tp = (struct trace_probe *)call->data; SHOW_FIELD(unsigned long, func, "func"); @@ -1100,10 +1102,8 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, SHOW_FIELD(int, nargs, "nargs"); /* Show fields */ - for (i = 0; i < tp->nr_args; i++) { - sprintf(buf, "arg%d", i); - SHOW_FIELD(unsigned long, args[i], buf); - } + for (i = 0; i < tp->nr_args; i++) + SHOW_FIELD(unsigned long, args[i], tp->args[i].name); trace_seq_puts(s, "\n"); return __probe_event_show_format(s, tp, "%lx <- %lx:", @@ -1140,7 +1140,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, entry->nargs = tp->nr_args; entry->ip = (unsigned long)kp->addr; for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i], regs); + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tpcounter_event(call->id, entry->ip, 1, entry, size); } while (0); return 0; @@ -1175,7 +1175,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, entry->func = (unsigned long)tp->rp.kp.addr; entry->ret_ip = (unsigned long)ri->ret_addr; for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i], regs); + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tpcounter_event(call->id, entry->ret_ip, 1, entry, size); } while (0); return 0; From 6e9f23d1619f7badaf9090dac09e86a22d6061d8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:45 -0400 Subject: [PATCH 063/470] tracing/kprobes: Show event name in trace output Show event name in tracing/trace output. This also fixes kprobes events format to comply with other tracepoint events formats. Before patching: <...>-1447 [001] 1038282.286875: do_sys_open+0x0/0xd6: ... <...>-1447 [001] 1038282.286878: sys_openat+0xc/0xe <- do_sys_open: ... After patching: <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) ... <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) ... Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235345.22412.76527.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 16 ++++++++-------- kernel/trace/trace_kprobe.c | 16 +++++++++++----- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index aaa6c1067c7..a849889e609 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -114,7 +114,7 @@ format: field: unsigned long flags; offset:48;tsize:8; field: unsigned long mode; offset:56;tsize:8; -print fmt: "%lx: dfd=%lx filename=%lx flags=%lx mode=%lx", ip, REC->dfd, REC->filename, REC->flags, REC->mode +print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode You can see that the event has 4 arguments as in the expressions you specified. @@ -129,15 +129,15 @@ print fmt: "%lx: dfd=%lx filename=%lx flags=%lx mode=%lx", ip, REC->dfd, REC->fi # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | - <...>-1447 [001] 1038282.286875: do_sys_open+0x0/0xd6: dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 - <...>-1447 [001] 1038282.286878: sys_openat+0xc/0xe <- do_sys_open: rv=fffffffffffffffe ra=ffffffff81367a3a - <...>-1447 [001] 1038282.286885: do_sys_open+0x0/0xd6: dfd=ffffff9c filename=40413c flags=8000 mode=1b6 - <...>-1447 [001] 1038282.286915: sys_open+0x1b/0x1d <- do_sys_open: rv=3 ra=ffffffff81367a3a - <...>-1447 [001] 1038282.286969: do_sys_open+0x0/0xd6: dfd=ffffff9c filename=4041c6 flags=98800 mode=10 - <...>-1447 [001] 1038282.286976: sys_open+0x1b/0x1d <- do_sys_open: rv=3 ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 + <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) rv=fffffffffffffffe ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286885: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=40413c flags=8000 mode=1b6 + <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) rv=3 ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286969: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=4041c6 flags=98800 mode=10 + <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) rv=3 ra=ffffffff81367a3a - Each line shows when the kernel hits a probe, and <- SYMBOL means kernel + Each line shows when the kernel hits an event, and <- SYMBOL means kernel returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel returns from do_sys_open to sys_open+0x1b). diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 44dad1aa95d..1746afeaabf 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -902,10 +902,13 @@ print_kprobe_event(struct trace_iterator *iter, int flags) event = ftrace_find_event(field->ent.type); tp = container_of(event, struct trace_probe, event); + if (!trace_seq_printf(s, "%s: (", tp->call.name)) + goto partial; + if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; - if (!trace_seq_puts(s, ":")) + if (!trace_seq_puts(s, ")")) goto partial; for (i = 0; i < field->nargs; i++) @@ -934,6 +937,9 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) event = ftrace_find_event(field->ent.type); tp = container_of(event, struct trace_probe, event); + if (!trace_seq_printf(s, "%s: (", tp->call.name)) + goto partial; + if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) goto partial; @@ -943,7 +949,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags) if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) goto partial; - if (!trace_seq_puts(s, ":")) + if (!trace_seq_puts(s, ")")) goto partial; for (i = 0; i < field->nargs; i++) @@ -1087,7 +1093,7 @@ static int kprobe_event_show_format(struct ftrace_event_call *call, SHOW_FIELD(unsigned long, args[i], tp->args[i].name); trace_seq_puts(s, "\n"); - return __probe_event_show_format(s, tp, "%lx:", "ip"); + return __probe_event_show_format(s, tp, "(%lx)", "REC->ip"); } static int kretprobe_event_show_format(struct ftrace_event_call *call, @@ -1106,8 +1112,8 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, SHOW_FIELD(unsigned long, args[i], tp->args[i].name); trace_seq_puts(s, "\n"); - return __probe_event_show_format(s, tp, "%lx <- %lx:", - "func, ret_ip"); + return __probe_event_show_format(s, tp, "(%lx <- %lx)", + "REC->func, REC->ret_ip"); } #ifdef CONFIG_EVENT_PROFILE From f52487e9c0041842eeb77c6c48774414b1cede08 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:53 -0400 Subject: [PATCH 064/470] tracing/kprobes: Support custom subsystem for each kprobe event Support specifying a custom subsystem(group) for each kprobe event. This allows users to create new group to control several probes at once, or add events to existing groups as additional tracepoints. New synopsis: p[:[subsys/]event-name] KADDR|KSYM[+offs] [ARGS] Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235353.22412.15149.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 5 +++-- kernel/trace/trace_kprobe.c | 33 +++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index a849889e609..6521681e783 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -25,9 +25,10 @@ probe events via /sys/kernel/debug/tracing/events/kprobes//filter. Synopsis of kprobe_events ------------------------- - p[:EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe - r[:EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe + p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe + r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe + GRP : Group name. If omitted, use "kprobes" for it. EVENT : Event name. If omitted, the event name is generated based on SYMBOL+offs or MEMADDR. SYMBOL[+offs] : Symbol+offset where the probe is inserted. diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1746afeaabf..cbc0870dcf5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -36,6 +36,7 @@ #define MAX_TRACE_ARGS 128 #define MAX_ARGSTR_LEN 63 #define MAX_EVENT_NAME_LEN 64 +#define KPROBE_EVENT_SYSTEM "kprobes" /* currently, trace_kprobe only supports X86. */ @@ -265,7 +266,8 @@ static LIST_HEAD(probe_list); /* * Allocate new trace_probe and initialize it (including kprobes). */ -static struct trace_probe *alloc_trace_probe(const char *event, +static struct trace_probe *alloc_trace_probe(const char *group, + const char *event, void *addr, const char *symbol, unsigned long offs, @@ -298,9 +300,16 @@ static struct trace_probe *alloc_trace_probe(const char *event, if (!tp->call.name) goto error; + if (!group) + goto error; + tp->call.system = kstrdup(group, GFP_KERNEL); + if (!tp->call.system) + goto error; + INIT_LIST_HEAD(&tp->list); return tp; error: + kfree(tp->call.name); kfree(tp->symbol); kfree(tp); return ERR_PTR(-ENOMEM); @@ -322,6 +331,7 @@ static void free_trace_probe(struct trace_probe *tp) for (i = 0; i < tp->nr_args; i++) free_probe_arg(&tp->args[i]); + kfree(tp->call.system); kfree(tp->call.name); kfree(tp->symbol); kfree(tp); @@ -530,8 +540,8 @@ static int create_trace_probe(int argc, char **argv) { /* * Argument syntax: - * - Add kprobe: p[:EVENT] SYMBOL[+OFFS]|ADDRESS [FETCHARGS] - * - Add kretprobe: r[:EVENT] SYMBOL[+0] [FETCHARGS] + * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] + * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] * Fetch args: * aN : fetch Nth of function argument. (N:0-) * rv : fetch return value @@ -549,7 +559,7 @@ static int create_trace_probe(int argc, char **argv) struct trace_probe *tp; int i, ret = 0; int is_return = 0; - char *symbol = NULL, *event = NULL, *arg = NULL; + char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; unsigned long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; @@ -566,6 +576,15 @@ static int create_trace_probe(int argc, char **argv) if (argv[0][1] == ':') { event = &argv[0][2]; + if (strchr(event, '/')) { + group = event; + event = strchr(group, '/') + 1; + event[-1] = '\0'; + if (strlen(group) == 0) { + pr_info("Group name is not specifiled\n"); + return -EINVAL; + } + } if (strlen(event) == 0) { pr_info("Event name is not specifiled\n"); return -EINVAL; @@ -592,6 +611,8 @@ static int create_trace_probe(int argc, char **argv) argc -= 2; argv += 2; /* setup a probe */ + if (!group) + group = KPROBE_EVENT_SYSTEM; if (!event) { /* Make a new event name */ if (symbol) @@ -602,7 +623,8 @@ static int create_trace_probe(int argc, char **argv) is_return ? 'r' : 'p', addr); event = buf; } - tp = alloc_trace_probe(event, addr, symbol, offset, argc, is_return); + tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, + is_return); if (IS_ERR(tp)) return PTR_ERR(tp); @@ -1217,7 +1239,6 @@ static int register_probe_event(struct trace_probe *tp) int ret; /* Initialize ftrace_event_call */ - call->system = "kprobes"; if (probe_is_return(tp)) { tp->event.trace = print_kretprobe_event; call->raw_init = probe_event_raw_init; From 2d5e067edc4635ff7515bfa9ab3edb38bc344cab Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 14 Sep 2009 16:48:56 -0400 Subject: [PATCH 065/470] tracing/kprobes: Fix trace_probe registration order Fix trace_probe registration order. ftrace_event_call and ftrace_event must be registered before kprobe/kretprobe, because tracing/profiling handlers dereference the event-id. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090914204856.18779.52961.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 42 +++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index cbc0870dcf5..ea0db8eee57 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -347,20 +347,15 @@ static struct trace_probe *find_probe_event(const char *event) return NULL; } -static void __unregister_trace_probe(struct trace_probe *tp) +/* Unregister a trace_probe and probe_event: call with locking probe_lock */ +static void unregister_trace_probe(struct trace_probe *tp) { if (probe_is_return(tp)) unregister_kretprobe(&tp->rp); else unregister_kprobe(&tp->rp.kp); -} - -/* Unregister a trace_probe and probe_event: call with locking probe_lock */ -static void unregister_trace_probe(struct trace_probe *tp) -{ - unregister_probe_event(tp); - __unregister_trace_probe(tp); list_del(&tp->list); + unregister_probe_event(tp); } /* Register a trace_probe and probe_event */ @@ -371,6 +366,19 @@ static int register_trace_probe(struct trace_probe *tp) mutex_lock(&probe_lock); + /* register as an event */ + old_tp = find_probe_event(tp->call.name); + if (old_tp) { + /* delete old event */ + unregister_trace_probe(old_tp); + free_trace_probe(old_tp); + } + ret = register_probe_event(tp); + if (ret) { + pr_warning("Faild to register probe event(%d)\n", ret); + goto end; + } + if (probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else @@ -384,21 +392,9 @@ static int register_trace_probe(struct trace_probe *tp) tp->rp.kp.addr); ret = -EINVAL; } - goto end; - } - /* register as an event */ - old_tp = find_probe_event(tp->call.name); - if (old_tp) { - /* delete old event */ - unregister_trace_probe(old_tp); - free_trace_probe(old_tp); - } - ret = register_probe_event(tp); - if (ret) { - pr_warning("Faild to register probe event(%d)\n", ret); - __unregister_trace_probe(tp); - } - list_add_tail(&tp->list, &probe_list); + unregister_probe_event(tp); + } else + list_add_tail(&tp->list, &probe_list); end: mutex_unlock(&probe_lock); return ret; From 588bebb74fe87270f94c2810652bd683d63c4b54 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 16 Sep 2009 11:42:55 -0400 Subject: [PATCH 066/470] ftrace: Fix trace_add_event_call() to initialize list Handle failure path in trace_add_event_call() to fix the below bug which occurred when I tried to add invalid event twice. Could not create debugfs 'kmalloc' directory Failed to register kprobe event: kmalloc Faild to register probe event(-1) ------------[ cut here ]------------ WARNING: at /home/mhiramat/ksrc/random-tracing/lib/list_debug.c:26 __list_add+0x27/0x5c() Hardware name: list_add corruption. next->prev should be prev (c07d78cc), but was 00001000. (next=d854236c). Modules linked in: sunrpc uinput virtio_net virtio_balloon i2c_piix4 pcspkr i2c_core virtio_blk virtio_pci virtio_ring virtio [last unloaded: scsi_wait_scan] Pid: 1394, comm: tee Not tainted 2.6.31-rc9 #51 Call Trace: [] warn_slowpath_common+0x65/0x7c [] ? __list_add+0x27/0x5c [] warn_slowpath_fmt+0x24/0x27 [] __list_add+0x27/0x5c [] list_add+0xa/0xc [] trace_add_event_call+0x60/0x97 [] command_trace_probe+0x42c/0x51b [] ? remove_wait_queue+0x22/0x27 [] ? __wake_up+0x32/0x3b [] probes_write+0xd4/0x10a [] ? probes_write+0x0/0x10a [] vfs_write+0x80/0xdf [] sys_write+0x3b/0x5d [] syscall_call+0x7/0xb ---[ end trace 2b962b5dc1fdc07d ]--- Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <4AB1077F.6020107@redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_events.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ba3492076ab..83cc2c01195 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1010,9 +1010,12 @@ static int __trace_add_event_call(struct ftrace_event_call *call) return -ENOENT; list_add(&call->list, &ftrace_events); - return event_create_dir(call, d_events, &ftrace_event_id_fops, + ret = event_create_dir(call, d_events, &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); + if (ret < 0) + list_del(&call->list); + return ret; } /* Add an additional event_call dynamically */ From 4fead8e46fded93cc0d432ced774d9a3a8d21bad Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 14 Sep 2009 16:49:12 -0400 Subject: [PATCH 067/470] ftrace: Fix trace_remove_event_call() to lock trace_event_mutex Lock not only event_mutex but also trace_event_mutex in trace_remove_event_call() to protect __unregister_ftrace_event(). Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090914204912.18779.68734.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_events.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 83cc2c01195..f85b0f1cb94 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1054,6 +1054,9 @@ static void remove_subsystem_dir(const char *name) } } +/* + * Must be called under locking both of event_mutex and trace_event_mutex. + */ static void __trace_remove_event_call(struct ftrace_event_call *call) { ftrace_event_enable_disable(call, 0); @@ -1070,7 +1073,9 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) void trace_remove_event_call(struct ftrace_event_call *call) { mutex_lock(&event_mutex); + down_write(&trace_event_mutex); __trace_remove_event_call(call); + up_write(&trace_event_mutex); mutex_unlock(&event_mutex); } From 50d780560785b068c358675c5f0bf6c83b5c373e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 14 Sep 2009 16:49:20 -0400 Subject: [PATCH 068/470] tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use Add kprobe_dispatcher and kretprobe_dispatcher to dispatch event in both profile and tracing handlers. This allows simultaneous kprobe uses by ftrace and perf. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090914204920.18779.57555.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 85 +++++++++++++++++++++++++++---------- 1 file changed, 63 insertions(+), 22 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ea0db8eee57..70b632c3bd0 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -185,10 +185,15 @@ struct probe_arg { const char *name; }; +/* Flags for trace_probe */ +#define TP_FLAG_TRACE 1 +#define TP_FLAG_PROFILE 2 + struct trace_probe { struct list_head list; struct kretprobe rp; /* Use rp.kp for kprobe use */ unsigned long nhit; + unsigned int flags; /* For TP_FLAG_* */ const char *symbol; /* symbol name */ struct ftrace_event_call call; struct trace_event event; @@ -200,10 +205,6 @@ struct trace_probe { (offsetof(struct trace_probe, args) + \ (sizeof(struct probe_arg) * (n))) -static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs); -static int kretprobe_trace_func(struct kretprobe_instance *ri, - struct pt_regs *regs); - static __kprobes int probe_is_return(struct trace_probe *tp) { return tp->rp.handler != NULL; @@ -263,6 +264,10 @@ static void unregister_probe_event(struct trace_probe *tp); static DEFINE_MUTEX(probe_lock); static LIST_HEAD(probe_list); +static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); +static int kretprobe_dispatcher(struct kretprobe_instance *ri, + struct pt_regs *regs); + /* * Allocate new trace_probe and initialize it (including kprobes). */ @@ -288,11 +293,10 @@ static struct trace_probe *alloc_trace_probe(const char *group, } else tp->rp.kp.addr = addr; - /* Set handler here for checking whether this probe is return or not. */ if (is_return) - tp->rp.handler = kretprobe_trace_func; + tp->rp.handler = kretprobe_dispatcher; else - tp->rp.kp.pre_handler = kprobe_trace_func; + tp->rp.kp.pre_handler = kprobe_dispatcher; if (!event) goto error; @@ -379,6 +383,7 @@ static int register_trace_probe(struct trace_probe *tp) goto end; } + tp->flags = TP_FLAG_TRACE; if (probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else @@ -987,23 +992,24 @@ static int probe_event_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; - if (probe_is_return(tp)) { - tp->rp.handler = kretprobe_trace_func; + tp->flags |= TP_FLAG_TRACE; + if (probe_is_return(tp)) return enable_kretprobe(&tp->rp); - } else { - tp->rp.kp.pre_handler = kprobe_trace_func; + else return enable_kprobe(&tp->rp.kp); - } } static void probe_event_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; - if (probe_is_return(tp)) - disable_kretprobe(&tp->rp); - else - disable_kprobe(&tp->rp.kp); + tp->flags &= ~TP_FLAG_TRACE; + if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { + if (probe_is_return(tp)) + disable_kretprobe(&tp->rp); + else + disable_kprobe(&tp->rp.kp); + } } static int probe_event_raw_init(struct ftrace_event_call *event_call) @@ -1212,22 +1218,57 @@ static int probe_profile_enable(struct ftrace_event_call *call) if (atomic_inc_return(&call->profile_count)) return 0; - if (probe_is_return(tp)) { - tp->rp.handler = kretprobe_profile_func; + tp->flags |= TP_FLAG_PROFILE; + if (probe_is_return(tp)) return enable_kretprobe(&tp->rp); - } else { - tp->rp.kp.pre_handler = kprobe_profile_func; + else return enable_kprobe(&tp->rp.kp); - } } static void probe_profile_disable(struct ftrace_event_call *call) { + struct trace_probe *tp = (struct trace_probe *)call->data; + if (atomic_add_negative(-1, &call->profile_count)) - probe_event_disable(call); + tp->flags &= ~TP_FLAG_PROFILE; + + if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { + if (probe_is_return(tp)) + disable_kretprobe(&tp->rp); + else + disable_kprobe(&tp->rp.kp); + } +} +#endif /* CONFIG_EVENT_PROFILE */ + + +static __kprobes +int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); + + if (tp->flags & TP_FLAG_TRACE) + kprobe_trace_func(kp, regs); +#ifdef CONFIG_EVENT_PROFILE + if (tp->flags & TP_FLAG_PROFILE) + kprobe_profile_func(kp, regs); +#endif /* CONFIG_EVENT_PROFILE */ + return 0; /* We don't tweek kernel, so just return 0 */ } +static __kprobes +int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) +{ + struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); + + if (tp->flags & TP_FLAG_TRACE) + kretprobe_trace_func(ri, regs); +#ifdef CONFIG_EVENT_PROFILE + if (tp->flags & TP_FLAG_PROFILE) + kretprobe_profile_func(ri, regs); #endif /* CONFIG_EVENT_PROFILE */ + return 0; /* We don't tweek kernel, so just return 0 */ +} static int register_probe_event(struct trace_probe *tp) { From 74ebb63e7cd25f6fb02a45fc2ea7735bce1217c9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 14 Sep 2009 16:49:28 -0400 Subject: [PATCH 069/470] tracing/kprobes: Fix profiling alignment for perf_counter buffer Fix *probe_profile_func() to align buffer size, since perf_counter requires its buffer entries to be 8 bytes aligned. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090914204928.18779.60029.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 70b632c3bd0..d8db9357489 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1149,18 +1149,23 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry *entry; - int size, i, pc; + int size, __size, i, pc; unsigned long irq_flags; local_save_flags(irq_flags); pc = preempt_count(); - size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); + __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); + size = ALIGN(__size + sizeof(u32), sizeof(u64)); + size -= sizeof(u32); do { char raw_data[size]; struct trace_entry *ent; - + /* + * Zero dead bytes from alignment to avoid stack leak + * to userspace + */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; entry = (struct kprobe_trace_entry *)raw_data; ent = &entry->ent; @@ -1183,13 +1188,15 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry *entry; - int size, i, pc; + int size, __size, i, pc; unsigned long irq_flags; local_save_flags(irq_flags); pc = preempt_count(); - size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); + __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); + size = ALIGN(__size + sizeof(u32), sizeof(u64)); + size -= sizeof(u32); do { char raw_data[size]; From 5a0d9050db4d1147722b42afef9011251b2651ee Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 14 Sep 2009 16:49:37 -0400 Subject: [PATCH 070/470] tracing/kprobes: Disable kprobe events by default after creation Disable newly created kprobe events by default, not to disturb another user using ftrace. "Disturb" means when someone is using ftrace and another user tries to use perf-tools, (in near future) if he defines new kprobe event via perf-tools, then new events will mess up the frace buffer. Fix this to allow proper and transparent kprobes events concurrent usage between ftrace users and perf users. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090914204937.18779.59422.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 11 +++++++++-- kernel/trace/trace_kprobe.c | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 6521681e783..9b8f7c6040a 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -122,8 +122,15 @@ print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, R echo > /sys/kernel/debug/tracing/kprobe_events - This clears all probe points. and you can see the traced information via -/sys/kernel/debug/tracing/trace. + This clears all probe points. + + Right after definition, each event is disabled by default. For tracing these +events, you need to enable it. + + echo 1 > /sys/kernel/debug/tracing/events/kprobes/myprobe/enable + echo 1 > /sys/kernel/debug/tracing/events/kprobes/myretprobe/enable + + And you can see the traced information via /sys/kernel/debug/tracing/trace. cat /sys/kernel/debug/tracing/trace # tracer: nop diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d8db9357489..f6821f16227 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -383,7 +383,7 @@ static int register_trace_probe(struct trace_probe *tp) goto end; } - tp->flags = TP_FLAG_TRACE; + tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; if (probe_is_return(tp)) ret = register_kretprobe(&tp->rp); else @@ -1298,7 +1298,7 @@ static int register_probe_event(struct trace_probe *tp) call->id = register_ftrace_event(&tp->event); if (!call->id) return -ENODEV; - call->enabled = 1; + call->enabled = 0; call->regfunc = probe_event_enable; call->unregfunc = probe_event_disable; From 1f0ab40976460bc4673fa204ce917a725185d8f2 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Tue, 15 Sep 2009 10:43:07 +0530 Subject: [PATCH 071/470] kprobes: Prevent re-registration of the same kprobe Prevent re-registration of the same kprobe. This situation, though unlikely, needs to be flagged since it can lead to a system crash if it's not handled. The core change itself is small, but the helper routine needed to be moved around a bit; hence the diffstat. Signed-off-by: Ananth N Mavinakayanahalli Acked-by: Masami Hiramatsu Cc: Jim Keniston Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090915051307.GB26458@in.ibm.com> Signed-off-by: Frederic Weisbecker --- kernel/kprobes.c | 58 +++++++++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 20 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 00d01b0f9fe..b946761f84b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -676,6 +676,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) return (kprobe_opcode_t *)(((char *)addr) + p->offset); } +/* Check passed kprobe is valid and return kprobe in kprobe_table. */ +static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) +{ + struct kprobe *old_p, *list_p; + + old_p = get_kprobe(p->addr); + if (unlikely(!old_p)) + return NULL; + + if (p != old_p) { + list_for_each_entry_rcu(list_p, &old_p->list, list) + if (list_p == p) + /* kprobe p is a valid probe */ + goto valid; + return NULL; + } +valid: + return old_p; +} + +/* Return error if the kprobe is being re-registered */ +static inline int check_kprobe_rereg(struct kprobe *p) +{ + int ret = 0; + struct kprobe *old_p; + + mutex_lock(&kprobe_mutex); + old_p = __get_valid_kprobe(p); + if (old_p) + ret = -EINVAL; + mutex_unlock(&kprobe_mutex); + return ret; +} + int __kprobes register_kprobe(struct kprobe *p) { int ret = 0; @@ -688,6 +722,10 @@ int __kprobes register_kprobe(struct kprobe *p) return -EINVAL; p->addr = addr; + ret = check_kprobe_rereg(p); + if (ret) + return ret; + preempt_disable(); if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr)) { @@ -757,26 +795,6 @@ out: } EXPORT_SYMBOL_GPL(register_kprobe); -/* Check passed kprobe is valid and return kprobe in kprobe_table. */ -static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) -{ - struct kprobe *old_p, *list_p; - - old_p = get_kprobe(p->addr); - if (unlikely(!old_p)) - return NULL; - - if (p != old_p) { - list_for_each_entry_rcu(list_p, &old_p->list, list) - if (list_p == p) - /* kprobe p is a valid probe */ - goto valid; - return NULL; - } -valid: - return old_p; -} - /* * Unregister a kprobe without a scheduler synchronization. */ From cbfeb267cb0ff632dbc8ff02685012bee2e87434 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:01:51 +0200 Subject: [PATCH 072/470] perf annotate: Add the cmp_null function and make use of it This function exists in builtin-report.c but not in builtin-annotate.c Functions that use cmp_null are shorter and clearer. Synchronizing functions between these two files will also make it easier to potential share code in the future. Signed-off-by: John Kacur Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 1ec74161581..a33087328bd 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -82,6 +82,16 @@ struct sort_entry { size_t (*print)(FILE *fp, struct hist_entry *); }; +static int64_t cmp_null(void *l, void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + /* --sort pid */ static int64_t @@ -116,14 +126,8 @@ sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) char *comm_l = left->thread->comm; char *comm_r = right->thread->comm; - if (!comm_l || !comm_r) { - if (!comm_l && !comm_r) - return 0; - else if (!comm_l) - return -1; - else - return 1; - } + if (!comm_l || !comm_r) + return cmp_null(comm_l, comm_r); return strcmp(comm_l, comm_r); } @@ -149,14 +153,8 @@ sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) struct dso *dso_l = left->dso; struct dso *dso_r = right->dso; - if (!dso_l || !dso_r) { - if (!dso_l && !dso_r) - return 0; - else if (!dso_l) - return -1; - else - return 1; - } + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); return strcmp(dso_l->name, dso_r->name); } From 8b40f521cf1c9750eab0c04da9075e7484675e9c Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:02:18 +0200 Subject: [PATCH 073/470] perf tools: Protect header files with a consistent style There was a colorful mix of header guards - standardize them. Signed-off-by: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/cache.h | 6 +++--- tools/perf/util/callchain.h | 2 +- tools/perf/util/color.h | 6 +++--- tools/perf/util/debug.h | 4 ++++ tools/perf/util/event.h | 3 ++- tools/perf/util/exec_cmd.h | 6 +++--- tools/perf/util/header.h | 6 +++--- tools/perf/util/help.h | 6 +++--- tools/perf/util/levenshtein.h | 6 +++--- tools/perf/util/module.h | 6 +++--- tools/perf/util/parse-events.h | 6 +++--- tools/perf/util/parse-options.h | 6 +++--- tools/perf/util/quote.h | 6 +++--- tools/perf/util/run-command.h | 6 +++--- tools/perf/util/sigchain.h | 6 +++--- tools/perf/util/strbuf.h | 6 +++--- tools/perf/util/string.h | 6 +++--- tools/perf/util/strlist.h | 6 +++--- tools/perf/util/svghelper.h | 6 +++--- tools/perf/util/symbol.h | 6 +++--- tools/perf/util/thread.h | 5 +++++ tools/perf/util/trace-event.h | 6 +++--- tools/perf/util/types.h | 6 +++--- tools/perf/util/values.h | 6 +++--- 24 files changed, 72 insertions(+), 62 deletions(-) diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 6f8ea9d210b..f26172c0c91 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -1,5 +1,5 @@ -#ifndef CACHE_H -#define CACHE_H +#ifndef __PERF_CACHE_H +#define __PERF_CACHE_H #include "util.h" #include "strbuf.h" @@ -117,4 +117,4 @@ extern char *perf_pathdup(const char *fmt, ...) extern size_t strlcpy(char *dest, const char *src, size_t size); -#endif /* CACHE_H */ +#endif /* __PERF_CACHE_H */ diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 43cf3ea9e08..ad4626de4c2 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -58,4 +58,4 @@ static inline u64 cumul_hits(struct callchain_node *node) int register_callchain_param(struct callchain_param *param); void append_chain(struct callchain_node *root, struct ip_callchain *chain, struct symbol **syms); -#endif +#endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h index 58d597564b9..24e8809210b 100644 --- a/tools/perf/util/color.h +++ b/tools/perf/util/color.h @@ -1,5 +1,5 @@ -#ifndef COLOR_H -#define COLOR_H +#ifndef __PERF_COLOR_H +#define __PERF_COLOR_H /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ #define COLOR_MAXLEN 24 @@ -39,4 +39,4 @@ int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *bu int percent_color_fprintf(FILE *fp, const char *fmt, double percent); const char *get_percent_color(double percent); -#endif /* COLOR_H */ +#endif /* __PERF_COLOR_H */ diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 437eea58ce4..02d1fa1c246 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -1,4 +1,6 @@ /* For debugging general purposes */ +#ifndef __PERF_DEBUG_H +#define __PERF_DEBUG_H extern int verbose; extern int dump_trace; @@ -6,3 +8,5 @@ extern int dump_trace; int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); + +#endif /* __PERF_DEBUG_H */ diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2c9c26d6ded..c31a5da6458 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -1,5 +1,6 @@ #ifndef __PERF_RECORD_H #define __PERF_RECORD_H + #include "../perf.h" #include "util.h" #include @@ -101,4 +102,4 @@ struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -#endif +#endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h index effe25eb154..31647ac92ed 100644 --- a/tools/perf/util/exec_cmd.h +++ b/tools/perf/util/exec_cmd.h @@ -1,5 +1,5 @@ -#ifndef PERF_EXEC_CMD_H -#define PERF_EXEC_CMD_H +#ifndef __PERF_EXEC_CMD_H +#define __PERF_EXEC_CMD_H extern void perf_set_argv_exec_path(const char *exec_path); extern const char *perf_extract_argv0_path(const char *path); @@ -10,4 +10,4 @@ extern int execv_perf_cmd(const char **argv); /* NULL terminated */ extern int execl_perf_cmd(const char *cmd, ...); extern const char *system_path(const char *path); -#endif /* PERF_EXEC_CMD_H */ +#endif /* __PERF_EXEC_CMD_H */ diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a0761bc7863..a2916b652a1 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -1,5 +1,5 @@ -#ifndef _PERF_HEADER_H -#define _PERF_HEADER_H +#ifndef __PERF_HEADER_H +#define __PERF_HEADER_H #include "../../../include/linux/perf_event.h" #include @@ -44,4 +44,4 @@ perf_header__find_attr(u64 id, struct perf_header *header); struct perf_header *perf_header__new(void); -#endif /* _PERF_HEADER_H */ +#endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h index 7128783637b..7f5c6dedd71 100644 --- a/tools/perf/util/help.h +++ b/tools/perf/util/help.h @@ -1,5 +1,5 @@ -#ifndef HELP_H -#define HELP_H +#ifndef __PERF_HELP_H +#define __PERF_HELP_H struct cmdnames { size_t alloc; @@ -26,4 +26,4 @@ int is_in_cmdlist(struct cmdnames *c, const char *s); void list_commands(const char *title, struct cmdnames *main_cmds, struct cmdnames *other_cmds); -#endif /* HELP_H */ +#endif /* __PERF_HELP_H */ diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h index 0173abeef52..b0fcb6d8a88 100644 --- a/tools/perf/util/levenshtein.h +++ b/tools/perf/util/levenshtein.h @@ -1,8 +1,8 @@ -#ifndef LEVENSHTEIN_H -#define LEVENSHTEIN_H +#ifndef __PERF_LEVENSHTEIN_H +#define __PERF_LEVENSHTEIN_H int levenshtein(const char *string1, const char *string2, int swap_penalty, int substition_penalty, int insertion_penalty, int deletion_penalty); -#endif +#endif /* __PERF_LEVENSHTEIN_H */ diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h index 8a592ef641c..098e0412bc2 100644 --- a/tools/perf/util/module.h +++ b/tools/perf/util/module.h @@ -1,5 +1,5 @@ -#ifndef _PERF_MODULE_ -#define _PERF_MODULE_ 1 +#ifndef __PERF_MODULE_ +#define __PERF_MODULE_ 1 #include #include "../types.h" @@ -50,4 +50,4 @@ size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); struct module *mod_dso__find_module(struct mod_dso *self, const char *name); int mod_dso__load_modules(struct mod_dso *dso); -#endif /* _PERF_MODULE_ */ +#endif /* __PERF_MODULE_ */ diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 30c60811284..8626a439033 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -1,5 +1,5 @@ -#ifndef _PARSE_EVENTS_H -#define _PARSE_EVENTS_H +#ifndef __PERF_PARSE_EVENTS_H +#define __PERF_PARSE_EVENTS_H /* * Parse symbolic events/counts passed in as options: */ @@ -31,4 +31,4 @@ extern char debugfs_path[]; extern int valid_debugfs_mount(const char *debugfs); -#endif /* _PARSE_EVENTS_H */ +#endif /* __PERF_PARSE_EVENTS_H */ diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h index 2ee248ff27e..948805af43c 100644 --- a/tools/perf/util/parse-options.h +++ b/tools/perf/util/parse-options.h @@ -1,5 +1,5 @@ -#ifndef PARSE_OPTIONS_H -#define PARSE_OPTIONS_H +#ifndef __PERF_PARSE_OPTIONS_H +#define __PERF_PARSE_OPTIONS_H enum parse_opt_type { /* special types */ @@ -174,4 +174,4 @@ extern int parse_opt_verbosity_cb(const struct option *, const char *, int); extern const char *parse_options_fix_filename(const char *prefix, const char *file); -#endif +#endif /* __PERF_PARSE_OPTIONS_H */ diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h index a5454a1d1c1..b6a01973391 100644 --- a/tools/perf/util/quote.h +++ b/tools/perf/util/quote.h @@ -1,5 +1,5 @@ -#ifndef QUOTE_H -#define QUOTE_H +#ifndef __PERF_QUOTE_H +#define __PERF_QUOTE_H #include #include @@ -65,4 +65,4 @@ extern void perl_quote_print(FILE *stream, const char *src); extern void python_quote_print(FILE *stream, const char *src); extern void tcl_quote_print(FILE *stream, const char *src); -#endif +#endif /* __PERF_QUOTE_H */ diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h index cc1837deba8..d79028727ce 100644 --- a/tools/perf/util/run-command.h +++ b/tools/perf/util/run-command.h @@ -1,5 +1,5 @@ -#ifndef RUN_COMMAND_H -#define RUN_COMMAND_H +#ifndef __PERF_RUN_COMMAND_H +#define __PERF_RUN_COMMAND_H enum { ERR_RUN_COMMAND_FORK = 10000, @@ -85,4 +85,4 @@ struct async { int start_async(struct async *async); int finish_async(struct async *async); -#endif +#endif /* __PERF_RUN_COMMAND_H */ diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h index 618083bce0c..1a53c11265f 100644 --- a/tools/perf/util/sigchain.h +++ b/tools/perf/util/sigchain.h @@ -1,5 +1,5 @@ -#ifndef SIGCHAIN_H -#define SIGCHAIN_H +#ifndef __PERF_SIGCHAIN_H +#define __PERF_SIGCHAIN_H typedef void (*sigchain_fun)(int); @@ -8,4 +8,4 @@ int sigchain_pop(int sig); void sigchain_push_common(sigchain_fun f); -#endif /* SIGCHAIN_H */ +#endif /* __PERF_SIGCHAIN_H */ diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h index d2aa86c014c..a3d121d6c83 100644 --- a/tools/perf/util/strbuf.h +++ b/tools/perf/util/strbuf.h @@ -1,5 +1,5 @@ -#ifndef STRBUF_H -#define STRBUF_H +#ifndef __PERF_STRBUF_H +#define __PERF_STRBUF_H /* * Strbuf's can be use in many ways: as a byte array, or to store arbitrary @@ -134,4 +134,4 @@ extern int launch_editor(const char *path, struct strbuf *buffer, const char *co extern int strbuf_branchname(struct strbuf *sb, const char *name); extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name); -#endif /* STRBUF_H */ +#endif /* __PERF_STRBUF_H */ diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index bf39dfadfd2..15c827475e7 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -1,5 +1,5 @@ -#ifndef _PERF_STRING_H_ -#define _PERF_STRING_H_ +#ifndef __PERF_STRING_H_ +#define __PERF_STRING_H_ #include "types.h" @@ -8,4 +8,4 @@ int hex2u64(const char *ptr, u64 *val); #define _STR(x) #x #define STR(x) _STR(x) -#endif +#endif /* __PERF_STRING_H */ diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h index 921818e44a5..cb4659306d7 100644 --- a/tools/perf/util/strlist.h +++ b/tools/perf/util/strlist.h @@ -1,5 +1,5 @@ -#ifndef STRLIST_H_ -#define STRLIST_H_ +#ifndef __PERF_STRLIST_H +#define __PERF_STRLIST_H #include #include @@ -36,4 +36,4 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self) } int strlist__parse_list(struct strlist *self, const char *s); -#endif /* STRLIST_H_ */ +#endif /* __PERF_STRLIST_H */ diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h index cd93195aedb..e0781989cc3 100644 --- a/tools/perf/util/svghelper.h +++ b/tools/perf/util/svghelper.h @@ -1,5 +1,5 @@ -#ifndef _INCLUDE_GUARD_SVG_HELPER_ -#define _INCLUDE_GUARD_SVG_HELPER_ +#ifndef __PERF_SVGHELPER_H +#define __PERF_SVGHELPER_H #include "types.h" @@ -25,4 +25,4 @@ extern void svg_close(void); extern int svg_page_width; -#endif +#endif /* __PERF_SVGHELPER_H */ diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 6e849071640..ee164f659ed 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -1,5 +1,5 @@ -#ifndef _PERF_SYMBOL_ -#define _PERF_SYMBOL_ 1 +#ifndef __PERF_SYMBOL +#define __PERF_SYMBOL 1 #include #include "types.h" @@ -89,4 +89,4 @@ extern struct dso *vdso; extern struct dso *hypervisor_dso; extern const char *vmlinux_name; extern int modules; -#endif /* _PERF_SYMBOL_ */ +#endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 32aea3c1c2a..693ed1ea10b 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -1,3 +1,6 @@ +#ifndef __PERF_THREAD_H +#define __PERF_THREAD_H + #include #include #include @@ -20,3 +23,5 @@ void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); struct map *thread__find_map(struct thread *self, u64 ip); size_t threads__fprintf(FILE *fp, struct rb_root *threads); + +#endif /* __PERF_THREAD_H */ diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 693f815c942..162c3e6deb9 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -1,5 +1,5 @@ -#ifndef _TRACE_EVENTS_H -#define _TRACE_EVENTS_H +#ifndef __PERF_TRACE_EVENTS_H +#define __PERF_TRACE_EVENTS_H #include "parse-events.h" @@ -242,4 +242,4 @@ void *raw_field_ptr(struct event *event, const char *name, void *data); void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); -#endif /* _TRACE_EVENTS_H */ +#endif /* __PERF_TRACE_EVENTS_H */ diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h index 5e75f900594..7d6b8331f89 100644 --- a/tools/perf/util/types.h +++ b/tools/perf/util/types.h @@ -1,5 +1,5 @@ -#ifndef _PERF_TYPES_H -#define _PERF_TYPES_H +#ifndef __PERF_TYPES_H +#define __PERF_TYPES_H /* * We define u64 as unsigned long long for every architecture @@ -14,4 +14,4 @@ typedef signed short s16; typedef unsigned char u8; typedef signed char s8; -#endif /* _PERF_TYPES_H */ +#endif /* __PERF_TYPES_H */ diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h index cadf8cf2a59..2fa967e1a88 100644 --- a/tools/perf/util/values.h +++ b/tools/perf/util/values.h @@ -1,5 +1,5 @@ -#ifndef _PERF_VALUES_H -#define _PERF_VALUES_H +#ifndef __PERF_VALUES_H +#define __PERF_VALUES_H #include "types.h" @@ -24,4 +24,4 @@ void perf_read_values_add_value(struct perf_read_values *values, void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw); -#endif /* _PERF_VALUES_H */ +#endif /* __PERF_VALUES_H */ From dd68ada2d417e57b848822a1407b5317a54136c5 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 24 Sep 2009 18:02:49 +0200 Subject: [PATCH 074/470] perf tools: Create util/sort.and use it Create util/sort.[ch] and move common functionality for builtin-report.c and builtin-annotate.c there, and make use of it. Signed-off-by: John Kacur LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-annotate.c | 211 +---------------------- tools/perf/builtin-report.c | 307 +--------------------------------- tools/perf/util/sort.c | 268 +++++++++++++++++++++++++++++ tools/perf/util/sort.h | 93 ++++++++++ 5 files changed, 373 insertions(+), 508 deletions(-) create mode 100644 tools/perf/util/sort.c create mode 100644 tools/perf/util/sort.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5f1953b614..0a9e5aede31 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -339,6 +339,7 @@ LIB_H += util/symbol.h LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h +LIB_H += util/sort.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -374,6 +375,7 @@ LIB_OBJS += util/trace-event-parse.o LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o +LIB_OBJS += util/sort.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index a33087328bd..059c565b31e 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -22,12 +22,10 @@ #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" +#include "util/sort.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,symbol"; -static char *sort_order = default_sort_order; - static int force; static int input; static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; @@ -55,207 +53,6 @@ struct sym_ext { static struct rb_root hist; -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - u64 ip; - char level; - - uint32_t count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *); -}; - -static int64_t cmp_null(void *l, void *r) -{ - if (!l && !r) - return 0; - else if (!l) - return -1; - else - return 1; -} - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = " Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self) -{ - return fprintf(fp, "%16s", self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = " Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self) -{ - if (self->dso) - return fprintf(fp, "%-25s", self->dso->name); - - return fprintf(fp, "%016llx ", (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object ", - .cmp = sort__dso_cmp, - .print = sort__dso_print, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self) -{ - size_t ret = 0; - - if (verbose) - ret += fprintf(fp, "%#018llx ", (u64)self->ip); - - if (self->sym) { - ret += fprintf(fp, "[%c] %s", - self->dso == kernel_dso ? 'k' : '.', self->sym->name); - } else { - ret += fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -static int sort__need_collapse = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { @@ -1137,5 +934,11 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) setup_pager(); + if (field_sep && *field_sep == '.') { + fputs("'.' is the only non valid --field-separator argument\n", + stderr); + exit(129); + } + return __cmd_annotate(); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 19669c20088..7b43504900f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -27,15 +27,13 @@ #include "util/parse-events.h" #include "util/thread.h" +#include "util/sort.h" static char const *input_name = "perf.data"; -static char default_sort_order[] = "comm,dso,symbol"; -static char *sort_order = default_sort_order; static char *dso_list_str, *comm_list_str, *sym_list_str, *col_width_list_str; static struct strlist *dso_list, *comm_list, *sym_list; -static char *field_sep; static int force; static int input; @@ -53,10 +51,6 @@ static char *pretty_printing_style = default_pretty_printing_style; static unsigned long page_size; static unsigned long mmap_window = 32; -static char default_parent_pattern[] = "^sys_|^do_page_fault"; -static char *parent_pattern = default_parent_pattern; -static regex_t parent_regex; - static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; @@ -80,304 +74,8 @@ struct callchain_param callchain_param = { static u64 sample_type; -static int repsep_fprintf(FILE *fp, const char *fmt, ...) -{ - int n; - va_list ap; - - va_start(ap, fmt); - if (!field_sep) - n = vfprintf(fp, fmt, ap); - else { - char *bf = NULL; - n = vasprintf(&bf, fmt, ap); - if (n > 0) { - char *sep = bf; - - while (1) { - sep = strchr(sep, *field_sep); - if (sep == NULL) - break; - *sep = '.'; - } - } - fputs(bf, fp); - free(bf); - } - va_end(ap); - return n; -} - -static unsigned int dsos__col_width, - comms__col_width, - threads__col_width; - -/* - * histogram, sorted on item, collects counts - */ - static struct rb_root hist; -struct hist_entry { - struct rb_node rb_node; - - struct thread *thread; - struct map *map; - struct dso *dso; - struct symbol *sym; - struct symbol *parent; - u64 ip; - char level; - struct callchain_node callchain; - struct rb_root sorted_chain; - - u64 count; -}; - -/* - * configurable sorting bits - */ - -struct sort_entry { - struct list_head list; - - const char *header; - - int64_t (*cmp)(struct hist_entry *, struct hist_entry *); - int64_t (*collapse)(struct hist_entry *, struct hist_entry *); - size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); - unsigned int *width; - bool elide; -}; - -static int64_t cmp_null(void *l, void *r) -{ - if (!l && !r) - return 0; - else if (!l) - return -1; - else - return 1; -} - -/* --sort pid */ - -static int64_t -sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static size_t -sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s:%5d", width - 6, - self->thread->comm ?: "", self->thread->pid); -} - -static struct sort_entry sort_thread = { - .header = "Command: Pid", - .cmp = sort__thread_cmp, - .print = sort__thread_print, - .width = &threads__col_width, -}; - -/* --sort comm */ - -static int64_t -sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) -{ - return right->thread->pid - left->thread->pid; -} - -static int64_t -sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) -{ - char *comm_l = left->thread->comm; - char *comm_r = right->thread->comm; - - if (!comm_l || !comm_r) - return cmp_null(comm_l, comm_r); - - return strcmp(comm_l, comm_r); -} - -static size_t -sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%*s", width, self->thread->comm); -} - -static struct sort_entry sort_comm = { - .header = "Command", - .cmp = sort__comm_cmp, - .collapse = sort__comm_collapse, - .print = sort__comm_print, - .width = &comms__col_width, -}; - -/* --sort dso */ - -static int64_t -sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; - - if (!dso_l || !dso_r) - return cmp_null(dso_l, dso_r); - - return strcmp(dso_l->name, dso_r->name); -} - -static size_t -sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - if (self->dso) - return repsep_fprintf(fp, "%-*s", width, self->dso->name); - - return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); -} - -static struct sort_entry sort_dso = { - .header = "Shared Object", - .cmp = sort__dso_cmp, - .print = sort__dso_print, - .width = &dsos__col_width, -}; - -/* --sort symbol */ - -static int64_t -sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) -{ - u64 ip_l, ip_r; - - if (left->sym == right->sym) - return 0; - - ip_l = left->sym ? left->sym->start : left->ip; - ip_r = right->sym ? right->sym->start : right->ip; - - return (int64_t)(ip_r - ip_l); -} - -static size_t -sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) -{ - size_t ret = 0; - - if (verbose) - ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, - dso__symtab_origin(self->dso)); - - ret += repsep_fprintf(fp, "[%c] ", self->level); - if (self->sym) { - ret += repsep_fprintf(fp, "%s", self->sym->name); - - if (self->sym->module) - ret += repsep_fprintf(fp, "\t[%s]", - self->sym->module->name); - } else { - ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); - } - - return ret; -} - -static struct sort_entry sort_sym = { - .header = "Symbol", - .cmp = sort__sym_cmp, - .print = sort__sym_print, -}; - -/* --sort parent */ - -static int64_t -sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct symbol *sym_l = left->parent; - struct symbol *sym_r = right->parent; - - if (!sym_l || !sym_r) - return cmp_null(sym_l, sym_r); - - return strcmp(sym_l->name, sym_r->name); -} - -static size_t -sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) -{ - return repsep_fprintf(fp, "%-*s", width, - self->parent ? self->parent->name : "[other]"); -} - -static unsigned int parent_symbol__col_width; - -static struct sort_entry sort_parent = { - .header = "Parent symbol", - .cmp = sort__parent_cmp, - .print = sort__parent_print, - .width = &parent_symbol__col_width, -}; - -static int sort__need_collapse = 0; -static int sort__has_parent = 0; - -struct sort_dimension { - const char *name; - struct sort_entry *entry; - int taken; -}; - -static struct sort_dimension sort_dimensions[] = { - { .name = "pid", .entry = &sort_thread, }, - { .name = "comm", .entry = &sort_comm, }, - { .name = "dso", .entry = &sort_dso, }, - { .name = "symbol", .entry = &sort_sym, }, - { .name = "parent", .entry = &sort_parent, }, -}; - -static LIST_HEAD(hist_entry__sort_list); - -static int sort_dimension__add(const char *tok) -{ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { - struct sort_dimension *sd = &sort_dimensions[i]; - - if (sd->taken) - continue; - - if (strncasecmp(tok, sd->name, strlen(tok))) - continue; - - if (sd->entry->collapse) - sort__need_collapse = 1; - - if (sd->entry == &sort_parent) { - int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); - if (ret) { - char err[BUFSIZ]; - - regerror(ret, &parent_regex, err, sizeof(err)); - fprintf(stderr, "Invalid regex: %s\n%s", - parent_pattern, err); - exit(-1); - } - sort__has_parent = 1; - } - - list_add_tail(&sd->entry->list, &hist_entry__sort_list); - sd->taken = 1; - - return 0; - } - - return -ESRCH; -} - static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { @@ -1606,7 +1304,8 @@ setup: return 0; } -static const char * const report_usage[] = { +//static const char * const report_usage[] = { +const char * const report_usage[] = { "perf report [] ", NULL }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c new file mode 100644 index 00000000000..50e75abb1fd --- /dev/null +++ b/tools/perf/util/sort.c @@ -0,0 +1,268 @@ +#include "sort.h" + +regex_t parent_regex; +char default_parent_pattern[] = "^sys_|^do_page_fault"; +char *parent_pattern = default_parent_pattern; +char default_sort_order[] = "comm,dso,symbol"; +char *sort_order = default_sort_order; +int sort__need_collapse = 0; +int sort__has_parent = 0; + +unsigned int dsos__col_width; +unsigned int comms__col_width; +unsigned int threads__col_width; +static unsigned int parent_symbol__col_width; +char * field_sep; + +LIST_HEAD(hist_entry__sort_list); + +struct sort_entry sort_thread = { + .header = "Command: Pid", + .cmp = sort__thread_cmp, + .print = sort__thread_print, + .width = &threads__col_width, +}; + +struct sort_entry sort_comm = { + .header = "Command", + .cmp = sort__comm_cmp, + .collapse = sort__comm_collapse, + .print = sort__comm_print, + .width = &comms__col_width, +}; + +struct sort_entry sort_dso = { + .header = "Shared Object", + .cmp = sort__dso_cmp, + .print = sort__dso_print, + .width = &dsos__col_width, +}; + +struct sort_entry sort_sym = { + .header = "Symbol", + .cmp = sort__sym_cmp, + .print = sort__sym_print, +}; + +struct sort_entry sort_parent = { + .header = "Parent symbol", + .cmp = sort__parent_cmp, + .print = sort__parent_print, + .width = &parent_symbol__col_width, +}; + +struct sort_dimension { + const char *name; + struct sort_entry *entry; + int taken; +}; + +static struct sort_dimension sort_dimensions[] = { + { .name = "pid", .entry = &sort_thread, }, + { .name = "comm", .entry = &sort_comm, }, + { .name = "dso", .entry = &sort_dso, }, + { .name = "symbol", .entry = &sort_sym, }, + { .name = "parent", .entry = &sort_parent, }, +}; + +int64_t cmp_null(void *l, void *r) +{ + if (!l && !r) + return 0; + else if (!l) + return -1; + else + return 1; +} + +/* --sort pid */ + +int64_t +sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int repsep_fprintf(FILE *fp, const char *fmt, ...) +{ + int n; + va_list ap; + + va_start(ap, fmt); + if (!field_sep) + n = vfprintf(fp, fmt, ap); + else { + char *bf = NULL; + n = vasprintf(&bf, fmt, ap); + if (n > 0) { + char *sep = bf; + + while (1) { + sep = strchr(sep, *field_sep); + if (sep == NULL) + break; + *sep = '.'; + } + } + fputs(bf, fp); + free(bf); + } + va_end(ap); + return n; +} + +size_t +sort__thread_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s:%5d", width - 6, + self->thread->comm ?: "", self->thread->pid); +} + +size_t +sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%*s", width, self->thread->comm); +} + +/* --sort dso */ + +int64_t +sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct dso *dso_l = left->dso; + struct dso *dso_r = right->dso; + + if (!dso_l || !dso_r) + return cmp_null(dso_l, dso_r); + + return strcmp(dso_l->name, dso_r->name); +} + +size_t +sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + if (self->dso) + return repsep_fprintf(fp, "%-*s", width, self->dso->name); + + return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); +} + +/* --sort symbol */ + +int64_t +sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) +{ + u64 ip_l, ip_r; + + if (left->sym == right->sym) + return 0; + + ip_l = left->sym ? left->sym->start : left->ip; + ip_r = right->sym ? right->sym->start : right->ip; + + return (int64_t)(ip_r - ip_l); +} + + +size_t +sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) +{ + size_t ret = 0; + + if (verbose) + ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, + dso__symtab_origin(self->dso)); + + ret += repsep_fprintf(fp, "[%c] ", self->level); + if (self->sym) { + ret += repsep_fprintf(fp, "%s", self->sym->name); + + if (self->sym->module) + ret += repsep_fprintf(fp, "\t[%s]", + self->sym->module->name); + } else { + ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); + } + + return ret; +} + +/* --sort comm */ + +int64_t +sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return right->thread->pid - left->thread->pid; +} + +int64_t +sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) +{ + char *comm_l = left->thread->comm; + char *comm_r = right->thread->comm; + + if (!comm_l || !comm_r) + return cmp_null(comm_l, comm_r); + + return strcmp(comm_l, comm_r); +} + +/* --sort parent */ + +int64_t +sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct symbol *sym_l = left->parent; + struct symbol *sym_r = right->parent; + + if (!sym_l || !sym_r) + return cmp_null(sym_l, sym_r); + + return strcmp(sym_l->name, sym_r->name); +} + +size_t +sort__parent_print(FILE *fp, struct hist_entry *self, unsigned int width) +{ + return repsep_fprintf(fp, "%-*s", width, + self->parent ? self->parent->name : "[other]"); +} + +int sort_dimension__add(const char *tok) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) { + struct sort_dimension *sd = &sort_dimensions[i]; + + if (sd->taken) + continue; + + if (strncasecmp(tok, sd->name, strlen(tok))) + continue; + + if (sd->entry->collapse) + sort__need_collapse = 1; + + if (sd->entry == &sort_parent) { + int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); + if (ret) { + char err[BUFSIZ]; + + regerror(ret, &parent_regex, err, sizeof(err)); + fprintf(stderr, "Invalid regex: %s\n%s", + parent_pattern, err); + exit(-1); + } + sort__has_parent = 1; + } + + list_add_tail(&sd->entry->list, &hist_entry__sort_list); + sd->taken = 1; + + return 0; + } + + return -ESRCH; +} + diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h new file mode 100644 index 00000000000..4684fd6d5c4 --- /dev/null +++ b/tools/perf/util/sort.h @@ -0,0 +1,93 @@ +#ifndef __PERF_SORT_H +#define __PERF_SORT_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern regex_t parent_regex; +extern char *sort_order; +extern char default_parent_pattern[]; +extern char *parent_pattern; +extern char default_sort_order[]; +extern int sort__need_collapse; +extern int sort__has_parent; +extern char *field_sep; +extern struct sort_entry sort_comm; +extern struct sort_entry sort_dso; +extern struct sort_entry sort_sym; +extern struct sort_entry sort_parent; +extern unsigned int dsos__col_width; +extern unsigned int comms__col_width; +extern unsigned int threads__col_width; + +struct hist_entry { + struct rb_node rb_node; + + struct thread *thread; + struct map *map; + struct dso *dso; + struct symbol *sym; + struct symbol *parent; + u64 ip; + char level; + struct callchain_node callchain; + struct rb_root sorted_chain; + + u64 count; +}; + +/* + * configurable sorting bits + */ + +struct sort_entry { + struct list_head list; + + const char *header; + + int64_t (*cmp)(struct hist_entry *, struct hist_entry *); + int64_t (*collapse)(struct hist_entry *, struct hist_entry *); + size_t (*print)(FILE *fp, struct hist_entry *, unsigned int width); + unsigned int *width; + bool elide; +}; + +extern struct sort_entry sort_thread; +extern struct list_head hist_entry__sort_list; + +extern int repsep_fprintf(FILE *fp, const char *fmt, ...); +extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int); +extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used); +extern int64_t cmp_null(void *, void *); +extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *); +extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *); +extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *); +extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int); +extern int sort_dimension__add(const char *); + +#endif /* __PERF_SORT_H */ From 3d1d07ecd2009f65cb2091563fa21f9600c36774 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Mon, 28 Sep 2009 15:32:55 +0200 Subject: [PATCH 075/470] perf tools: Put common histogram functions in their own file Move histogram related functions into their own files (hist.c and hist.h) and make use of them in builtin-annotate.c and builtin-report.c. Signed-off-by: John Kacur Acked-by: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-annotate.c | 152 +------------------------------ tools/perf/builtin-report.c | 164 +--------------------------------- tools/perf/util/hist.c | 164 ++++++++++++++++++++++++++++++++++ tools/perf/util/hist.h | 47 ++++++++++ 5 files changed, 216 insertions(+), 313 deletions(-) create mode 100644 tools/perf/util/hist.c create mode 100644 tools/perf/util/hist.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0a9e5aede31..3a99a9fda64 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -340,6 +340,7 @@ LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h +LIB_H += util/hist.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -376,6 +377,7 @@ LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o +LIB_OBJS += util/hist.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 059c565b31e..df516dce954 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -23,6 +23,7 @@ #include "util/parse-events.h" #include "util/thread.h" #include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; @@ -47,45 +48,6 @@ struct sym_ext { char *path; }; -/* - * histogram, sorted on item, collects counts - */ - -static struct rb_root hist; - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} /* * collect histogram counts @@ -163,116 +125,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n); - } -} - -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0; - static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { @@ -861,7 +713,7 @@ more: dsos__fprintf(stdout); collapse__resort(); - output__resort(); + output__resort(total); find_annotations(); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 7b43504900f..c1a54fc8527 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -28,6 +28,7 @@ #include "util/thread.h" #include "util/sort.h" +#include "util/hist.h" static char const *input_name = "perf.data"; @@ -55,8 +56,6 @@ static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -static int callchain; - static char __cwd[PATH_MAX]; static char *cwd = __cwd; static int cwdlen; @@ -66,50 +65,8 @@ static struct thread *last_match; static struct perf_header *header; -static -struct callchain_param callchain_param = { - .mode = CHAIN_GRAPH_REL, - .min_percent = 0.5 -}; - static u64 sample_type; -static struct rb_root hist; - -static int64_t -hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - cmp = se->cmp(left, right); - if (cmp) - break; - } - - return cmp; -} - -static int64_t -hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) -{ - struct sort_entry *se; - int64_t cmp = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - int64_t (*f)(struct hist_entry *, struct hist_entry *); - - f = se->collapse ?: se->cmp; - - cmp = f(left, right); - if (cmp) - break; - } - - return cmp; -} - static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) { int i; @@ -308,7 +265,6 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, return ret; } - static size_t hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) { @@ -573,117 +529,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, return 0; } -static void hist_entry__free(struct hist_entry *he) -{ - free(he); -} - -/* - * collapse the histogram - */ - -static struct rb_root collapse_hists; - -static void collapse__insert_entry(struct hist_entry *he) -{ - struct rb_node **p = &collapse_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - int64_t cmp; - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__collapse(iter, he); - - if (!cmp) { - iter->count += he->count; - hist_entry__free(he); - return; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &collapse_hists); -} - -static void collapse__resort(void) -{ - struct rb_node *next; - struct hist_entry *n; - - if (!sort__need_collapse) - return; - - next = rb_first(&hist); - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, &hist); - collapse__insert_entry(n); - } -} - -/* - * reverse the map, sort on count. - */ - -static struct rb_root output_hists; - -static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) -{ - struct rb_node **p = &output_hists.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *iter; - - if (callchain) - callchain_param.sort(&he->sorted_chain, &he->callchain, - min_callchain_hits, &callchain_param); - - while (*p != NULL) { - parent = *p; - iter = rb_entry(parent, struct hist_entry, rb_node); - - if (he->count > iter->count) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &output_hists); -} - -static void output__resort(u64 total_samples) -{ - struct rb_node *next; - struct hist_entry *n; - struct rb_root *tree = &hist; - u64 min_callchain_hits; - - min_callchain_hits = total_samples * (callchain_param.min_percent / 100); - - if (sort__need_collapse) - tree = &collapse_hists; - - next = rb_first(tree); - - while (next) { - n = rb_entry(next, struct hist_entry, rb_node); - next = rb_next(&n->rb_node); - - rb_erase(&n->rb_node, tree); - output__insert_entry(n, min_callchain_hits); - } -} - static size_t output__fprintf(FILE *fp, u64 total_samples) { struct hist_entry *pos; @@ -778,13 +623,6 @@ print_entries: return ret; } -static unsigned long total = 0, - total_mmap = 0, - total_comm = 0, - total_fork = 0, - total_unknown = 0, - total_lost = 0; - static int validate_chain(struct ip_callchain *chain, event_t *event) { unsigned int chain_size; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c new file mode 100644 index 00000000000..82808dc4f8e --- /dev/null +++ b/tools/perf/util/hist.c @@ -0,0 +1,164 @@ +#include "hist.h" + +struct rb_root hist; +struct rb_root collapse_hists; +struct rb_root output_hists; +int callchain; + +struct callchain_param callchain_param = { + .mode = CHAIN_GRAPH_REL, + .min_percent = 0.5 +}; + +unsigned long total; +unsigned long total_mmap; +unsigned long total_comm; +unsigned long total_fork; +unsigned long total_unknown; +unsigned long total_lost; + +/* + * histogram, sorted on item, collects counts + */ + +int64_t +hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + cmp = se->cmp(left, right); + if (cmp) + break; + } + + return cmp; +} + +int64_t +hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct sort_entry *se; + int64_t cmp = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + int64_t (*f)(struct hist_entry *, struct hist_entry *); + + f = se->collapse ?: se->cmp; + + cmp = f(left, right); + if (cmp) + break; + } + + return cmp; +} + +void hist_entry__free(struct hist_entry *he) +{ + free(he); +} + +/* + * collapse the histogram + */ + +void collapse__insert_entry(struct hist_entry *he) +{ + struct rb_node **p = &collapse_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + int64_t cmp; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__collapse(iter, he); + + if (!cmp) { + iter->count += he->count; + hist_entry__free(he); + return; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &collapse_hists); +} + +void collapse__resort(void) +{ + struct rb_node *next; + struct hist_entry *n; + + if (!sort__need_collapse) + return; + + next = rb_first(&hist); + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, &hist); + collapse__insert_entry(n); + } +} + +/* + * reverse the map, sort on count. + */ + +void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits) +{ + struct rb_node **p = &output_hists.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *iter; + + if (callchain) + callchain_param.sort(&he->sorted_chain, &he->callchain, + min_callchain_hits, &callchain_param); + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct hist_entry, rb_node); + + if (he->count > iter->count) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &output_hists); +} + +void output__resort(u64 total_samples) +{ + struct rb_node *next; + struct hist_entry *n; + struct rb_root *tree = &hist; + u64 min_callchain_hits; + + min_callchain_hits = + total_samples * (callchain_param.min_percent / 100); + + if (sort__need_collapse) + tree = &collapse_hists; + + next = rb_first(tree); + + while (next) { + n = rb_entry(next, struct hist_entry, rb_node); + next = rb_next(&n->rb_node); + + rb_erase(&n->rb_node, tree); + output__insert_entry(n, min_callchain_hits); + } +} diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h new file mode 100644 index 00000000000..9a8daa12b43 --- /dev/null +++ b/tools/perf/util/hist.h @@ -0,0 +1,47 @@ +#ifndef __PERF_HIST_H +#define __PERF_HIST_H +#include "../builtin.h" + +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" + +extern struct rb_root hist; +extern struct rb_root collapse_hists; +extern struct rb_root output_hists; +extern int callchain; +extern struct callchain_param callchain_param; +extern unsigned long total; +extern unsigned long total_mmap; +extern unsigned long total_comm; +extern unsigned long total_fork; +extern unsigned long total_unknown; +extern unsigned long total_lost; + +extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); +extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); +extern void hist_entry__free(struct hist_entry *); +extern void collapse__insert_entry(struct hist_entry *); +extern void collapse__resort(void); +extern void output__insert_entry(struct hist_entry *, u64); +extern void output__resort(u64); + +#endif /* __PERF_HIST_H */ From 1b46cddfccfec4cc67b187fb53d78198de6a057c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 14:48:46 -0300 Subject: [PATCH 076/470] perf tools: Use rb_tree for maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Threads can have many and kernel modules will be represented as a tree of maps as well. Ah, and for a perf.data with 146607 samples: Before: [root@doppio ~]# perf stat -r 5 perf report > /dev/null Performance counter stats for 'perf report' (5 runs): 699.823680 task-clock-msecs # 0.991 CPUs ( +- 0.454% ) 74 context-switches # 0.000 M/sec ( +- 1.709% ) 2 CPU-migrations # 0.000 M/sec ( +- 17.008% ) 23114 page-faults # 0.033 M/sec ( +- 0.000% ) 1381257019 cycles # 1973.721 M/sec ( +- 0.290% ) 1456894438 instructions # 1.055 IPC ( +- 0.007% ) 18779818 cache-references # 26.835 M/sec ( +- 0.380% ) 641799 cache-misses # 0.917 M/sec ( +- 1.200% ) 0.705972729 seconds time elapsed ( +- 0.501% ) [root@doppio ~]# After Performance counter stats for 'perf report' (5 runs): 691.261451 task-clock-msecs # 0.993 CPUs ( +- 0.307% ) 72 context-switches # 0.000 M/sec ( +- 0.829% ) 6 CPU-migrations # 0.000 M/sec ( +- 18.409% ) 23127 page-faults # 0.033 M/sec ( +- 0.000% ) 1366395876 cycles # 1976.670 M/sec ( +- 0.153% ) 1443136016 instructions # 1.056 IPC ( +- 0.012% ) 17956402 cache-references # 25.976 M/sec ( +- 0.325% ) 661924 cache-misses # 0.958 M/sec ( +- 1.335% ) 0.696127275 seconds time elapsed ( +- 0.377% ) I.e. we see some speedup too. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: "H. Peter Anvin" LKML-Reference: <20090928174846.GA3361@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/event.h | 4 +- tools/perf/util/thread.c | 119 +++++++++++++++++++++++++-------------- tools/perf/util/thread.h | 12 +++- 4 files changed, 90 insertions(+), 46 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3a99a9fda64..055290a5b83 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -341,6 +341,7 @@ LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h LIB_H += util/hist.h +LIB_H += util/thread.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index c31a5da6458..4c69eb55380 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -3,7 +3,7 @@ #include "../perf.h" #include "util.h" -#include +#include enum { SHOW_KERNEL = 1, @@ -79,7 +79,7 @@ typedef union event_union { } event_t; struct map { - struct list_head node; + struct rb_node rb_node; u64 start; u64 end; u64 pgoff; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 45efb5db0d1..9d0945cc66d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -15,7 +15,7 @@ static struct thread *thread__new(pid_t pid) self->comm = malloc(32); if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); - INIT_LIST_HEAD(&self->maps); + self->maps = RB_ROOT; } return self; @@ -31,11 +31,13 @@ int thread__set_comm(struct thread *self, const char *comm) static size_t thread__fprintf(struct thread *self, FILE *fp) { - struct map *pos; + struct rb_node *nd; size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); - list_for_each_entry(pos, &self->maps, node) + for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); ret += map__fprintf(pos, fp); + } return ret; } @@ -93,42 +95,90 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match) return thread; } -void thread__insert_map(struct thread *self, struct map *map) +static void thread__remove_overlappings(struct thread *self, struct map *map) { - struct map *pos, *tmp; + struct rb_node *next = rb_first(&self->maps); - list_for_each_entry_safe(pos, tmp, &self->maps, node) { - if (map__overlap(pos, map)) { - if (verbose >= 2) { - printf("overlapping maps:\n"); - map__fprintf(map, stdout); - map__fprintf(pos, stdout); - } + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + next = rb_next(&pos->rb_node); - if (map->start <= pos->start && map->end > pos->start) - pos->start = map->end; + if (!map__overlap(pos, map)) + continue; - if (map->end >= pos->end && map->start < pos->end) - pos->end = map->start; + if (verbose >= 2) { + printf("overlapping maps:\n"); + map__fprintf(map, stdout); + map__fprintf(pos, stdout); + } - if (verbose >= 2) { - printf("after collision:\n"); - map__fprintf(pos, stdout); - } + if (map->start <= pos->start && map->end > pos->start) + pos->start = map->end; - if (pos->start >= pos->end) { - list_del_init(&pos->node); - free(pos); - } + if (map->end >= pos->end && map->start < pos->end) + pos->end = map->start; + + if (verbose >= 2) { + printf("after collision:\n"); + map__fprintf(pos, stdout); + } + + if (pos->start >= pos->end) { + rb_erase(&pos->rb_node, &self->maps); + free(pos); } } +} - list_add_tail(&map->node, &self->maps); +void maps__insert(struct rb_root *maps, struct map *map) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + const u64 ip = map->start; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(&map->rb_node, parent, p); + rb_insert_color(&map->rb_node, maps); +} + +struct map *maps__find(struct rb_root *maps, u64 ip) +{ + struct rb_node **p = &maps->rb_node; + struct rb_node *parent = NULL; + struct map *m; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node); + if (ip < m->start) + p = &(*p)->rb_left; + else if (ip > m->end) + p = &(*p)->rb_right; + else + return m; + } + + return NULL; +} + +void thread__insert_map(struct thread *self, struct map *map) +{ + thread__remove_overlappings(self, map); + maps__insert(&self->maps, map); } int thread__fork(struct thread *self, struct thread *parent) { - struct map *map; + struct rb_node *nd; if (self->comm) free(self->comm); @@ -136,7 +186,8 @@ int thread__fork(struct thread *self, struct thread *parent) if (!self->comm) return -ENOMEM; - list_for_each_entry(map, &parent->maps, node) { + for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); struct map *new = map__clone(map); if (!new) return -ENOMEM; @@ -146,20 +197,6 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -struct map *thread__find_map(struct thread *self, u64 ip) -{ - struct map *pos; - - if (self == NULL) - return NULL; - - list_for_each_entry(pos, &self->maps, node) - if (ip >= pos->start && ip <= pos->end) - return pos; - - return NULL; -} - size_t threads__fprintf(FILE *fp, struct rb_root *threads) { size_t ret = 0; diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 693ed1ea10b..bbb37c1a52e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -2,13 +2,12 @@ #define __PERF_THREAD_H #include -#include #include #include "symbol.h" struct thread { struct rb_node rb_node; - struct list_head maps; + struct rb_root maps; pid_t pid; char shortname[3]; char *comm; @@ -21,7 +20,14 @@ struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -struct map *thread__find_map(struct thread *self, u64 ip); size_t threads__fprintf(FILE *fp, struct rb_root *threads); +void maps__insert(struct rb_root *maps, struct map *map); +struct map *maps__find(struct rb_root *maps, u64 ip); + +static inline struct map *thread__find_map(struct thread *self, u64 ip) +{ + return self ? maps__find(&self->maps, ip) : NULL; +} + #endif /* __PERF_THREAD_H */ From a80deb622dba7dfb65d9e27b6b74b7c1963c3635 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 15:23:51 -0300 Subject: [PATCH 077/470] perf sched: Remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several variables are not used at all, cut'n'paste leftovers. Also check if the sample_type is RAW earlier, to avoid needless searches. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 34 ++++------------------------------ 1 file changed, 4 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index ea9c15c0cdf..4470f253570 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1544,16 +1544,15 @@ process_raw_event(event_t *raw_event __used, void *more_data, static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; + + if (!(sample_type & PERF_SAMPLE_RAW)) + return 0; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -1589,32 +1588,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; - - dump_printf(" ...... dso: [hypervisor]\n"); - } - - if (sample_type & PERF_SAMPLE_RAW) - process_raw_event(event, more_data, cpu, timestamp, thread); + process_raw_event(event, more_data, cpu, timestamp, thread); return 0; } From cad3071424edd7854f63aa80d09473e84f49ed79 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 28 Sep 2009 17:08:18 -0300 Subject: [PATCH 078/470] perf trace: Remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several variables are not used at all, cut'n'paste leftovers. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: "H. Peter Anvin" LKML-Reference: <20090928200818.GF3361@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e9d256e2f47..2f938887335 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -53,16 +53,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - char level; - int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - int cpumode; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -98,30 +94,6 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - - if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; - level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - - } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; - level = '.'; - - } else { - show = SHOW_HV; - level = 'H'; - - dso = hypervisor_dso; - - dump_printf(" ...... dso: [hypervisor]\n"); - } - if (sample_type & PERF_SAMPLE_RAW) { struct { u32 size; From 2ccdc450e658053681202d42ac64b3638f22dc1a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 24 Sep 2009 14:24:00 -0700 Subject: [PATCH 079/470] perf top: Remove dead {min,max}_ip unused variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20090924212400.GA15321@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1ca88896eee..bf464ce7e3e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -96,9 +96,6 @@ static int display_weighted = -1; * Symbols */ -static u64 min_ip; -static u64 max_ip = -1ll; - struct sym_entry { struct rb_node rb_node; struct list_head node; @@ -826,8 +823,6 @@ static int symbol_filter(struct dso *self, struct symbol *sym) static int parse_symbols(void) { - struct rb_node *node; - struct symbol *sym; int use_modules = vmlinux_name ? 1 : 0; kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); @@ -837,14 +832,6 @@ static int parse_symbols(void) if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) goto out_delete_dso; - node = rb_first(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - min_ip = sym->start; - - node = rb_last(&kernel_dso->syms); - sym = rb_entry(node, struct symbol, rb_node); - max_ip = sym->end; - if (dump_symtab) dso__fprintf(kernel_dso, stderr); From 439d473b4777de510e1322168ac6f2f377ecd5bc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 2 Oct 2009 03:29:58 -0300 Subject: [PATCH 080/470] perf tools: Rewrite and improve support for kernel modules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Representing modules as struct map entries, backed by a DSO, etc, using /proc/modules to find where the module is loaded. DSOs now can have a short and long name, so that in verbose mode we can show exactly which .ko or vmlinux image was used. As kernel modules now are a DSO separate from the kernel, we can ask for just the hits for a particular set of kernel modules, just like we can do with shared libraries: [root@doppio linux-2.6-tip]# perf report -n --vmlinux /home/acme/git/build/tip-recvmmsg/vmlinux --modules --dsos \[drm\] | head -15 84.58% 13266 Xorg [k] drm_clflush_pages 4.02% 630 Xorg [k] trace_kmalloc.clone.0 3.95% 619 Xorg [k] drm_ioctl 2.07% 324 Xorg [k] drm_addbufs 1.68% 263 Xorg [k] drm_gem_close_ioctl 0.77% 120 Xorg [k] drm_setmaster_ioctl 0.70% 110 Xorg [k] drm_lastclose 0.68% 106 Xorg [k] drm_open 0.54% 85 Xorg [k] drm_mm_search_free [root@doppio linux-2.6-tip]# Specifying --dsos /lib/modules/2.6.31-tip/kernel/drivers/gpu/drm/drm.ko would have the same effect. Allowing specifying just 'drm.ko' is left for another patch. Processing kallsyms so that per kernel module struct map are instantiated was also left for another patch. That will allow removing the module name from each of its symbols. struct symbol was reduced by removing the ->module backpointer and moving it (well now the map) to struct symbol_entry in perf top, that is its only user right now. The total linecount went down by ~500 lines. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Avi Kivity Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 - tools/perf/builtin-annotate.c | 71 ++--- tools/perf/builtin-report.c | 79 +++-- tools/perf/builtin-top.c | 74 ++--- tools/perf/util/event.h | 6 +- tools/perf/util/module.c | 545 ---------------------------------- tools/perf/util/module.h | 53 ---- tools/perf/util/sort.c | 38 ++- tools/perf/util/sort.h | 7 +- tools/perf/util/symbol.c | 459 ++++++++++++++++++---------- tools/perf/util/symbol.h | 20 +- tools/perf/util/thread.c | 34 +-- tools/perf/util/thread.h | 4 + 13 files changed, 458 insertions(+), 934 deletions(-) delete mode 100644 tools/perf/util/module.c delete mode 100644 tools/perf/util/module.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 055290a5b83..8e7509f2d88 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -336,7 +336,6 @@ LIB_H += util/strlist.h LIB_H += util/run-command.h LIB_H += util/sigchain.h LIB_H += util/symbol.h -LIB_H += util/module.h LIB_H += util/color.h LIB_H += util/values.h LIB_H += util/sort.h @@ -364,7 +363,6 @@ LIB_OBJS += util/usage.o LIB_OBJS += util/wrapper.o LIB_OBJS += util/sigchain.o LIB_OBJS += util/symbol.o -LIB_OBJS += util/module.o LIB_OBJS += util/color.o LIB_OBJS += util/pager.o LIB_OBJS += util/header.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index df516dce954..7d5a3b1bcda 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -63,6 +63,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) return; sym_size = sym->end - sym->start; + ip = he->map->map_ip(he->map, ip); offset = ip - sym->start; if (offset >= sym_size) @@ -80,7 +81,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) } static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, +hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, char level) { struct rb_node **p = &hist.rb_node; @@ -89,7 +90,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct hist_entry entry = { .thread = thread, .map = map, - .dso = dso, .sym = sym, .ip = ip, .level = level, @@ -130,10 +130,10 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; int show = 0; - struct dso *dso = NULL; struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; + struct symbol *sym = NULL; thread = threads__findnew(event->ip.pid, &threads, &last_match); @@ -155,32 +155,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (event->header.misc & PERF_RECORD_MISC_KERNEL) { show = SHOW_KERNEL; level = 'k'; - - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { - show = SHOW_USER; level = '.'; - map = thread__find_map(thread, ip); if (map != NULL) { +got_map: ip = map->map_ip(map, ip); - dso = map->dso; + sym = map->dso->find_symbol(map->dso, ip); } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) - dso = kernel_dso; + if ((long long)ip < 0) { + map = kernel_map; + goto got_map; + } } - dump_printf(" ...... dso: %s\n", dso ? dso->name : ""); - + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else { show = SHOW_HV; level = 'H'; @@ -188,12 +191,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (show & show_mask) { - struct symbol *sym = NULL; - - if (dso) - sym = dso->find_symbol(dso, ip); - - if (hist_entry__add(thread, map, dso, sym, ip, level)) { + if (hist_entry__add(thread, map, sym, ip, level)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); return -1; @@ -313,7 +311,7 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } static int -parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) +parse_line(FILE *file, struct symbol *sym, u64 len) { char *line = NULL, *tmp, *tmp2; static const char *prev_line; @@ -363,7 +361,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len) const char *color; struct sym_ext *sym_ext = sym->priv; - offset = line_ip - start; + offset = line_ip - sym->start; if (offset < len) hits = sym->hist[offset]; @@ -442,7 +440,7 @@ static void free_source_line(struct symbol *sym, int len) /* Get the filename:line for the colored entries */ static void -get_source_line(struct symbol *sym, u64 start, int len, const char *filename) +get_source_line(struct symbol *sym, int len, const char *filename) { int i; char cmd[PATH_MAX * 2]; @@ -467,7 +465,7 @@ get_source_line(struct symbol *sym, u64 start, int len, const char *filename) if (sym_ext[i].percent <= 0.5) continue; - offset = start + i; + offset = sym->start + i; sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) @@ -519,31 +517,23 @@ static void print_summary(const char *filename) static void annotate_sym(struct dso *dso, struct symbol *sym) { - const char *filename = dso->name, *d_filename; - u64 start, end, len; + const char *filename = dso->long_name, *d_filename; + u64 len; char command[PATH_MAX*2]; FILE *file; if (!filename) return; - if (sym->module) - filename = sym->module->path; - else if (dso == kernel_dso) - filename = vmlinux_name; - start = sym->obj_start; - if (!start) - start = sym->start; if (full_paths) d_filename = filename; else d_filename = basename(filename); - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; if (print_line) { - get_source_line(sym, start, len, filename); + get_source_line(sym, len, filename); print_summary(filename); } @@ -552,10 +542,11 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) printf("------------------------------------------------\n"); if (verbose >= 2) - printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name); + printf("annotating [%p] %30s : [%p] %30s\n", + dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - (u64)start, (u64)end, filename, filename); + sym->start, sym->end, filename, filename); if (verbose >= 3) printf("doing: %s\n", command); @@ -565,7 +556,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) return; while (!feof(file)) { - if (parse_line(file, sym, start, len) < 0) + if (parse_line(file, sym, len) < 0) break; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index c1a54fc8527..3ed3baf96ff 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -349,22 +349,17 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) static struct symbol * -resolve_symbol(struct thread *thread, struct map **mapp, - struct dso **dsop, u64 *ipp) +resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp) { - struct dso *dso = dsop ? *dsop : NULL; struct map *map = mapp ? *mapp : NULL; u64 ip = *ipp; - if (!thread) - return NULL; - - if (dso) - goto got_dso; - if (map) goto got_map; + if (!thread) + return NULL; + map = thread__find_map(thread, ip); if (map != NULL) { /* @@ -379,29 +374,29 @@ resolve_symbol(struct thread *thread, struct map **mapp, *mapp = map; got_map: ip = map->map_ip(map, ip); - - dso = map->dso; } else { /* * If this is outside of all known maps, * and is a negative address, try to look it * up in the kernel dso, as it might be a - * vsyscall (which executes in user-mode): + * vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in + * the "[vdso]" dso, but for now lets use the old + * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) - dso = kernel_dso; + if ((long long)ip < 0) { + map = kernel_map; + if (mapp) + *mapp = map; + } } - dump_printf(" ...... dso: %s\n", dso ? dso->name : ""); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - if (dsop) - *dsop = dso; - - if (!dso) - return NULL; -got_dso: - return dso->find_symbol(dso, ip); + return map ? map->dso->find_symbol(map->dso, ip) : NULL; } static int call__match(struct symbol *sym) @@ -413,7 +408,7 @@ static int call__match(struct symbol *sym) } static struct symbol ** -resolve_callchain(struct thread *thread, struct map *map __used, +resolve_callchain(struct thread *thread, struct map *map, struct ip_callchain *chain, struct hist_entry *entry) { u64 context = PERF_CONTEXT_MAX; @@ -430,8 +425,7 @@ resolve_callchain(struct thread *thread, struct map *map __used, for (i = 0; i < chain->nr; i++) { u64 ip = chain->ips[i]; - struct dso *dso = NULL; - struct symbol *sym; + struct symbol *sym = NULL; if (ip >= PERF_CONTEXT_MAX) { context = ip; @@ -440,17 +434,15 @@ resolve_callchain(struct thread *thread, struct map *map __used, switch (context) { case PERF_CONTEXT_HV: - dso = hypervisor_dso; break; case PERF_CONTEXT_KERNEL: - dso = kernel_dso; + sym = kernel_maps__find_symbol(ip, &map); break; default: + sym = resolve_symbol(thread, &map, &ip); break; } - sym = resolve_symbol(thread, NULL, &dso, &ip); - if (sym) { if (sort__has_parent && call__match(sym) && !entry->parent) @@ -469,7 +461,7 @@ resolve_callchain(struct thread *thread, struct map *map __used, */ static int -hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, +hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, struct ip_callchain *chain, char level, u64 count) { @@ -480,7 +472,6 @@ hist_entry__add(struct thread *thread, struct map *map, struct dso *dso, struct hist_entry entry = { .thread = thread, .map = map, - .dso = dso, .sym = sym, .ip = ip, .level = level, @@ -641,7 +632,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; int show = 0; - struct dso *dso = NULL; + struct symbol *sym = NULL; struct thread *thread; u64 ip = event->ip.ip; u64 period = 1; @@ -700,35 +691,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) show = SHOW_KERNEL; level = 'k'; - dso = kernel_dso; - - dump_printf(" ...... dso: %s\n", dso->name); - + sym = kernel_maps__find_symbol(ip, &map); + dump_printf(" ...... dso: %s\n", + map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { show = SHOW_USER; level = '.'; + sym = resolve_symbol(thread, &map, &ip); } else { show = SHOW_HV; level = 'H'; - dso = hypervisor_dso; - dump_printf(" ...... dso: [hypervisor]\n"); } if (show & show_mask) { - struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip); - - if (dso_list && (!dso || !dso->name || - !strlist__has_entry(dso_list, dso->name))) + if (dso_list && + (!map || !map->dso || + !(strlist__has_entry(dso_list, map->dso->short_name) || + (map->dso->short_name != map->dso->long_name && + strlist__has_entry(dso_list, map->dso->long_name))))) return 0; - if (sym_list && (!sym || !strlist__has_entry(sym_list, sym->name))) + if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) return 0; - if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) { + if (hist_entry__add(thread, map, sym, ip, + chain, level, period)) { eprintf("problem incrementing symbol count, skipping event\n"); return -1; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index bf464ce7e3e..befef842757 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -22,6 +22,7 @@ #include "util/symbol.h" #include "util/color.h" +#include "util/thread.h" #include "util/util.h" #include #include "util/parse-options.h" @@ -103,6 +104,7 @@ struct sym_entry { unsigned long snap_count; double weight; int skip; + struct map *map; struct source_line *source; struct source_line *lines; struct source_line **lines_tail; @@ -116,12 +118,11 @@ struct sym_entry { static void parse_source(struct sym_entry *syme) { struct symbol *sym; - struct module *module; - struct section *section = NULL; + struct map *map; FILE *file; char command[PATH_MAX*2]; - const char *path = vmlinux_name; - u64 start, end, len; + const char *path; + u64 len; if (!syme) return; @@ -132,27 +133,15 @@ static void parse_source(struct sym_entry *syme) } sym = (struct symbol *)(syme + 1); - module = sym->module; + map = syme->map; + path = map->dso->long_name; - if (module) - path = module->path; - if (!path) - return; - - start = sym->obj_start; - if (!start) - start = sym->start; - - if (module) { - section = module->sections->find_section(module->sections, ".text"); - if (section) - start -= section->vma; - } - - end = start + sym->end - sym->start + 1; len = sym->end - sym->start; - sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", start, end, path); + sprintf(command, + "objdump --start-address=0x%016Lx " + "--stop-address=0x%016Lx -dS %s", + sym->start, sym->end, path); file = popen(command, "r"); if (!file) @@ -184,13 +173,11 @@ static void parse_source(struct sym_entry *syme) if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } if (strlen(src->line)>8 && src->line[16] == ':') { src->eip = strtoull(src->line, NULL, 16); - if (section) - src->eip += section->vma; + src->eip += map->start; } } pclose(file); @@ -242,16 +229,9 @@ static void lookup_sym_source(struct sym_entry *syme) struct symbol *symbol = (struct symbol *)(syme + 1); struct source_line *line; char pattern[PATH_MAX]; - char *idx; sprintf(pattern, "<%s>:", symbol->name); - if (symbol->module) { - idx = strstr(pattern, "\t"); - if (idx) - *idx = 0; - } - pthread_mutex_lock(&syme->source_lock); for (line = syme->lines; line; line = line->next) { if (strstr(line->line, pattern)) { @@ -513,8 +493,8 @@ static void print_sym_table(void) if (verbose) printf(" - %016llx", sym->start); printf(" : %s", sym->name); - if (sym->module) - printf("\t[%s]", sym->module->name); + if (syme->map->dso->name[0] == '[') + printf(" \t%s", syme->map->dso->name); printf("\n"); } } @@ -784,7 +764,7 @@ static const char *skip_symbols[] = { NULL }; -static int symbol_filter(struct dso *self, struct symbol *sym) +static int symbol_filter(struct map *map, struct symbol *sym) { struct sym_entry *syme; const char *name = sym->name; @@ -806,7 +786,8 @@ static int symbol_filter(struct dso *self, struct symbol *sym) strstr(name, "_text_end")) return 1; - syme = dso__sym_priv(self, sym); + syme = dso__sym_priv(map->dso, sym); + syme->map = map; pthread_mutex_init(&syme->source_lock, NULL); if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) sym_filter_entry = syme; @@ -825,22 +806,14 @@ static int parse_symbols(void) { int use_modules = vmlinux_name ? 1 : 0; - kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); - if (kernel_dso == NULL) + if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), + symbol_filter, verbose, use_modules) <= 0) return -1; - if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0) - goto out_delete_dso; - if (dump_symtab) - dso__fprintf(kernel_dso, stderr); + dsos__fprintf(stderr); return 0; - -out_delete_dso: - dso__delete(kernel_dso); - kernel_dso = NULL; - return -1; } /* @@ -848,10 +821,11 @@ out_delete_dso: */ static void record_ip(u64 ip, int counter) { - struct symbol *sym = dso__find_symbol(kernel_dso, ip); + struct map *map; + struct symbol *sym = kernel_maps__find_symbol(ip, &map); if (sym != NULL) { - struct sym_entry *syme = dso__sym_priv(kernel_dso, sym); + struct sym_entry *syme = dso__sym_priv(map->dso, sym); if (!syme->skip) { syme->count[counter]++; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 4c69eb55380..a39520e6ae8 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -3,6 +3,7 @@ #include "../perf.h" #include "util.h" +#include #include enum { @@ -79,7 +80,10 @@ typedef union event_union { } event_t; struct map { - struct rb_node rb_node; + union { + struct rb_node rb_node; + struct list_head node; + }; u64 start; u64 end; u64 pgoff; diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c deleted file mode 100644 index 0d8c85defcd..00000000000 --- a/tools/perf/util/module.c +++ /dev/null @@ -1,545 +0,0 @@ -#include "util.h" -#include "../perf.h" -#include "string.h" -#include "module.h" - -#include -#include -#include -#include -#include -#include - -static unsigned int crc32(const char *p, unsigned int len) -{ - int i; - unsigned int crc = 0; - - while (len--) { - crc ^= *p++; - for (i = 0; i < 8; i++) - crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); - } - return crc; -} - -/* module section methods */ - -struct sec_dso *sec_dso__new_dso(const char *name) -{ - struct sec_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->secs = RB_ROOT; - self->find_section = sec_dso__find_section; - } - - return self; -} - -static void sec_dso__delete_section(struct section *self) -{ - free(((void *)self)); -} - -void sec_dso__delete_sections(struct sec_dso *self) -{ - struct section *pos; - struct rb_node *next = rb_first(&self->secs); - - while (next) { - pos = rb_entry(next, struct section, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->secs); - sec_dso__delete_section(pos); - } -} - -void sec_dso__delete_self(struct sec_dso *self) -{ - sec_dso__delete_sections(self); - free(self); -} - -static void sec_dso__insert_section(struct sec_dso *self, struct section *sec) -{ - struct rb_node **p = &self->secs.rb_node; - struct rb_node *parent = NULL; - const u64 hash = sec->hash; - struct section *s; - - while (*p != NULL) { - parent = *p; - s = rb_entry(parent, struct section, rb_node); - if (hash < s->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&sec->rb_node, parent, p); - rb_insert_color(&sec->rb_node, &self->secs); -} - -struct section *sec_dso__find_section(struct sec_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->secs.rb_node; - - while (n) { - struct section *s = rb_entry(n, struct section, rb_node); - - if (hash < s->hash) - n = n->rb_left; - else if (hash > s->hash) - n = n->rb_right; - else { - if (!strcmp(name, s->name)) - return s; - else - n = rb_next(&s->rb_node); - } - } - - return NULL; -} - -static size_t sec_dso__fprintf_section(struct section *self, FILE *fp) -{ - return fprintf(fp, "name:%s vma:%llx path:%s\n", - self->name, self->vma, self->path); -} - -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp) -{ - size_t ret = fprintf(fp, "dso: %s\n", self->name); - - struct rb_node *nd; - for (nd = rb_first(&self->secs); nd; nd = rb_next(nd)) { - struct section *pos = rb_entry(nd, struct section, rb_node); - ret += sec_dso__fprintf_section(pos, fp); - } - - return ret; -} - -static struct section *section__new(const char *name, const char *path) -{ - struct section *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -/* module methods */ - -struct mod_dso *mod_dso__new_dso(const char *name) -{ - struct mod_dso *self = malloc(sizeof(*self) + strlen(name) + 1); - - if (self != NULL) { - strcpy(self->name, name); - self->mods = RB_ROOT; - self->find_module = mod_dso__find_module; - } - - return self; -} - -static void mod_dso__delete_module(struct module *self) -{ - free(((void *)self)); -} - -void mod_dso__delete_modules(struct mod_dso *self) -{ - struct module *pos; - struct rb_node *next = rb_first(&self->mods); - - while (next) { - pos = rb_entry(next, struct module, rb_node); - next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->mods); - mod_dso__delete_module(pos); - } -} - -void mod_dso__delete_self(struct mod_dso *self) -{ - mod_dso__delete_modules(self); - free(self); -} - -static void mod_dso__insert_module(struct mod_dso *self, struct module *mod) -{ - struct rb_node **p = &self->mods.rb_node; - struct rb_node *parent = NULL; - const u64 hash = mod->hash; - struct module *m; - - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct module, rb_node); - if (hash < m->hash) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - rb_link_node(&mod->rb_node, parent, p); - rb_insert_color(&mod->rb_node, &self->mods); -} - -struct module *mod_dso__find_module(struct mod_dso *self, const char *name) -{ - struct rb_node *n; - u64 hash; - int len; - - if (self == NULL) - return NULL; - - len = strlen(name); - hash = crc32(name, len); - - n = self->mods.rb_node; - - while (n) { - struct module *m = rb_entry(n, struct module, rb_node); - - if (hash < m->hash) - n = n->rb_left; - else if (hash > m->hash) - n = n->rb_right; - else { - if (!strcmp(name, m->name)) - return m; - else - n = rb_next(&m->rb_node); - } - } - - return NULL; -} - -static size_t mod_dso__fprintf_module(struct module *self, FILE *fp) -{ - return fprintf(fp, "name:%s path:%s\n", self->name, self->path); -} - -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp) -{ - struct rb_node *nd; - size_t ret; - - ret = fprintf(fp, "dso: %s\n", self->name); - - for (nd = rb_first(&self->mods); nd; nd = rb_next(nd)) { - struct module *pos = rb_entry(nd, struct module, rb_node); - - ret += mod_dso__fprintf_module(pos, fp); - } - - return ret; -} - -static struct module *module__new(const char *name, const char *path) -{ - struct module *self = calloc(1, sizeof(*self)); - - if (!self) - goto out_failure; - - self->name = calloc(1, strlen(name) + 1); - if (!self->name) - goto out_failure; - - self->path = calloc(1, strlen(path) + 1); - if (!self->path) - goto out_failure; - - strcpy(self->name, name); - strcpy(self->path, path); - self->hash = crc32(self->name, strlen(name)); - - return self; - -out_failure: - if (self) { - if (self->name) - free(self->name); - if (self->path) - free(self->path); - free(self); - } - - return NULL; -} - -static int mod_dso__load_sections(struct module *mod) -{ - int count = 0, path_len; - struct dirent *entry; - char *line = NULL; - char *dir_path; - DIR *dir; - size_t n; - - path_len = strlen("/sys/module/"); - path_len += strlen(mod->name); - path_len += strlen("/sections/"); - - dir_path = calloc(1, path_len + 1); - if (dir_path == NULL) - goto out_failure; - - strcat(dir_path, "/sys/module/"); - strcat(dir_path, mod->name); - strcat(dir_path, "/sections/"); - - dir = opendir(dir_path); - if (dir == NULL) - goto out_free; - - while ((entry = readdir(dir))) { - struct section *section; - char *path, *vma; - int line_len; - FILE *file; - - if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name)) - continue; - - path = calloc(1, path_len + strlen(entry->d_name) + 1); - if (path == NULL) - break; - strcat(path, dir_path); - strcat(path, entry->d_name); - - file = fopen(path, "r"); - if (file == NULL) { - free(path); - break; - } - - line_len = getline(&line, &n, file); - if (line_len < 0) { - free(path); - fclose(file); - break; - } - - if (!line) { - free(path); - fclose(file); - break; - } - - line[--line_len] = '\0'; /* \n */ - - vma = strstr(line, "0x"); - if (!vma) { - free(path); - fclose(file); - break; - } - vma += 2; - - section = section__new(entry->d_name, path); - if (!section) { - fprintf(stderr, "load_sections: allocation error\n"); - free(path); - fclose(file); - break; - } - - hex2u64(vma, §ion->vma); - sec_dso__insert_section(mod->sections, section); - - free(path); - fclose(file); - count++; - } - - closedir(dir); - free(line); - free(dir_path); - - return count; - -out_free: - free(dir_path); - -out_failure: - return count; -} - -static int mod_dso__load_module_paths(struct mod_dso *self) -{ - struct utsname uts; - int count = 0, len, err = -1; - char *line = NULL; - FILE *file; - char *dpath, *dir; - size_t n; - - if (uname(&uts) < 0) - return err; - - len = strlen("/lib/modules/"); - len += strlen(uts.release); - len += strlen("/modules.dep"); - - dpath = calloc(1, len + 1); - if (dpath == NULL) - return err; - - strcat(dpath, "/lib/modules/"); - strcat(dpath, uts.release); - strcat(dpath, "/modules.dep"); - - file = fopen(dpath, "r"); - if (file == NULL) - goto out_failure; - - dir = dirname(dpath); - if (!dir) - goto out_failure; - strcat(dir, "/"); - - while (!feof(file)) { - struct module *module; - char *name, *path, *tmp; - FILE *modfile; - int line_len; - - line_len = getline(&line, &n, file); - if (line_len < 0) - break; - - if (!line) - break; - - line[--line_len] = '\0'; /* \n */ - - path = strchr(line, ':'); - if (!path) - break; - *path = '\0'; - - path = strdup(line); - if (!path) - break; - - if (!strstr(path, dir)) { - if (strncmp(path, "kernel/", 7)) - break; - - free(path); - path = calloc(1, strlen(dir) + strlen(line) + 1); - if (!path) - break; - strcat(path, dir); - strcat(path, line); - } - - modfile = fopen(path, "r"); - if (modfile == NULL) - break; - fclose(modfile); - - name = strdup(path); - if (!name) - break; - - name = strtok(name, "/"); - tmp = name; - - while (tmp) { - tmp = strtok(NULL, "/"); - if (tmp) - name = tmp; - } - - name = strsep(&name, "."); - if (!name) - break; - - /* Quirk: replace '-' with '_' in all modules */ - for (len = strlen(name); len; len--) { - if (*(name+len) == '-') - *(name+len) = '_'; - } - - module = module__new(name, path); - if (!module) - break; - mod_dso__insert_module(self, module); - - module->sections = sec_dso__new_dso("sections"); - if (!module->sections) - break; - - module->active = mod_dso__load_sections(module); - - if (module->active > 0) - count++; - } - - if (feof(file)) - err = count; - else - fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n"); - -out_failure: - if (dpath) - free(dpath); - if (file) - fclose(file); - if (line) - free(line); - - return err; -} - -int mod_dso__load_modules(struct mod_dso *dso) -{ - int err; - - err = mod_dso__load_module_paths(dso); - - return err; -} diff --git a/tools/perf/util/module.h b/tools/perf/util/module.h deleted file mode 100644 index 098e0412bc2..00000000000 --- a/tools/perf/util/module.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef __PERF_MODULE_ -#define __PERF_MODULE_ 1 - -#include -#include "../types.h" -#include -#include - -struct section { - struct rb_node rb_node; - u64 hash; - u64 vma; - char *name; - char *path; -}; - -struct sec_dso { - struct list_head node; - struct rb_root secs; - struct section *(*find_section)(struct sec_dso *, const char *name); - char name[0]; -}; - -struct module { - struct rb_node rb_node; - u64 hash; - char *name; - char *path; - struct sec_dso *sections; - int active; -}; - -struct mod_dso { - struct list_head node; - struct rb_root mods; - struct module *(*find_module)(struct mod_dso *, const char *name); - char name[0]; -}; - -struct sec_dso *sec_dso__new_dso(const char *name); -void sec_dso__delete_sections(struct sec_dso *self); -void sec_dso__delete_self(struct sec_dso *self); -size_t sec_dso__fprintf(struct sec_dso *self, FILE *fp); -struct section *sec_dso__find_section(struct sec_dso *self, const char *name); - -struct mod_dso *mod_dso__new_dso(const char *name); -void mod_dso__delete_modules(struct mod_dso *self); -void mod_dso__delete_self(struct mod_dso *self); -size_t mod_dso__fprintf(struct mod_dso *self, FILE *fp); -struct module *mod_dso__find_module(struct mod_dso *self, const char *name); -int mod_dso__load_modules(struct mod_dso *dso); - -#endif /* __PERF_MODULE_ */ diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 50e75abb1fd..40c9acd41ca 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -129,20 +129,32 @@ sort__comm_print(FILE *fp, struct hist_entry *self, unsigned int width) int64_t sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) { - struct dso *dso_l = left->dso; - struct dso *dso_r = right->dso; + struct dso *dso_l = left->map ? left->map->dso : NULL; + struct dso *dso_r = right->map ? right->map->dso : NULL; + const char *dso_name_l, *dso_name_r; if (!dso_l || !dso_r) return cmp_null(dso_l, dso_r); - return strcmp(dso_l->name, dso_r->name); + if (verbose) { + dso_name_l = dso_l->long_name; + dso_name_r = dso_r->long_name; + } else { + dso_name_l = dso_l->short_name; + dso_name_r = dso_r->short_name; + } + + return strcmp(dso_name_l, dso_name_r); } size_t sort__dso_print(FILE *fp, struct hist_entry *self, unsigned int width) { - if (self->dso) - return repsep_fprintf(fp, "%-*s", width, self->dso->name); + if (self->map && self->map->dso) { + const char *dso_name = !verbose ? self->map->dso->short_name : + self->map->dso->long_name; + return repsep_fprintf(fp, "%-*s", width, dso_name); + } return repsep_fprintf(fp, "%*llx", width, (u64)self->ip); } @@ -169,20 +181,16 @@ sort__sym_print(FILE *fp, struct hist_entry *self, unsigned int width __used) { size_t ret = 0; - if (verbose) - ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, - dso__symtab_origin(self->dso)); + if (verbose) { + char o = self->map ? dso__symtab_origin(self->map->dso) : '!'; + ret += repsep_fprintf(fp, "%#018llx %c ", (u64)self->ip, o); + } ret += repsep_fprintf(fp, "[%c] ", self->level); - if (self->sym) { + if (self->sym) ret += repsep_fprintf(fp, "%s", self->sym->name); - - if (self->sym->module) - ret += repsep_fprintf(fp, "\t[%s]", - self->sym->module->name); - } else { + else ret += repsep_fprintf(fp, "%#016llx", (u64)self->ip); - } return ret; } diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 4684fd6d5c4..13806d782af 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -42,18 +42,15 @@ extern unsigned int threads__col_width; struct hist_entry { struct rb_node rb_node; - + u64 count; struct thread *thread; struct map *map; - struct dso *dso; struct symbol *sym; - struct symbol *parent; u64 ip; char level; + struct symbol *parent; struct callchain_node callchain; struct rb_root sorted_chain; - - u64 count; }; /* diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 559fb06210f..e8829689947 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2,12 +2,14 @@ #include "../perf.h" #include "string.h" #include "symbol.h" +#include "thread.h" #include "debug.h" #include #include #include +#include const char *sym_hist_filter; @@ -18,12 +20,15 @@ enum dso_origin { DSO__ORIG_UBUNTU, DSO__ORIG_BUILDID, DSO__ORIG_DSO, + DSO__ORIG_KMODULE, DSO__ORIG_NOT_FOUND, }; -static struct symbol *symbol__new(u64 start, u64 len, - const char *name, unsigned int priv_size, - u64 obj_start, int v) +static void dsos__add(struct dso *dso); +static struct dso *dsos__find(const char *name); + +static struct symbol *symbol__new(u64 start, u64 len, const char *name, + unsigned int priv_size, int v) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -32,10 +37,9 @@ static struct symbol *symbol__new(u64 start, u64 len, return NULL; if (v >= 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", - (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); + printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", + start, (unsigned long)len, name, self->hist); - self->obj_start= obj_start; self->hist = NULL; self->hist_sum = 0; @@ -60,12 +64,8 @@ static void symbol__delete(struct symbol *self, unsigned int priv_size) static size_t symbol__fprintf(struct symbol *self, FILE *fp) { - if (!self->module) - return fprintf(fp, " %llx-%llx %s\n", + return fprintf(fp, " %llx-%llx %s\n", self->start, self->end, self->name); - else - return fprintf(fp, " %llx-%llx %s \t[%s]\n", - self->start, self->end, self->name, self->module->name); } struct dso *dso__new(const char *name, unsigned int sym_priv_size) @@ -74,6 +74,8 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) if (self != NULL) { strcpy(self->name, name); + self->long_name = self->name; + self->short_name = self->name; self->syms = RB_ROOT; self->sym_priv_size = sym_priv_size; self->find_symbol = dso__find_symbol; @@ -100,6 +102,8 @@ static void dso__delete_symbols(struct dso *self) void dso__delete(struct dso *self) { dso__delete_symbols(self); + if (self->long_name != self->name) + free(self->long_name); free(self); } @@ -147,7 +151,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->name); + size_t ret = fprintf(fp, "dso: %s\n", self->long_name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { @@ -158,7 +162,8 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) +static int dso__load_kallsyms(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { struct rb_node *nd, *prevnd; char *line = NULL; @@ -200,12 +205,12 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) * Well fix up the end later, when we have all sorted. */ sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, 0, v); + self->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) + if (filter && filter(map, sym)) symbol__delete(sym, self->sym_priv_size); else { dso__insert_symbol(self, sym); @@ -241,14 +246,15 @@ out_failure: return -1; } -static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) +static int dso__load_perf_map(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { char *line = NULL; size_t n; FILE *file; int nr_syms = 0; - file = fopen(self->name, "r"); + file = fopen(self->long_name, "r"); if (file == NULL) goto out_failure; @@ -279,12 +285,12 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v) continue; sym = symbol__new(start, size, line + len, - self->sym_priv_size, start, v); + self->sym_priv_size, v); if (sym == NULL) goto out_delete_line; - if (filter && filter(self, sym)) + if (filter && filter(map, sym)) symbol__delete(sym, self->sym_priv_size); else { dso__insert_symbol(self, sym); @@ -410,7 +416,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) Elf *elf; int nr = 0, symidx, fd, err = 0; - fd = open(self->name, O_RDONLY); + fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -478,7 +484,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -496,7 +502,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, 0, v); + sympltname, self->sym_priv_size, v); if (!f) goto out_elf_end; @@ -515,12 +521,13 @@ out_close: return nr; out: fprintf(stderr, "%s: problems reading %s PLT info.\n", - __func__, self->name); + __func__, self->long_name); return 0; } -static int dso__load_sym(struct dso *self, int fd, const char *name, - symbol_filter_t filter, int v, struct module *mod) +static int dso__load_sym(struct dso *self, struct map *map, const char *name, + int fd, symbol_filter_t filter, int kernel, + int kmodule, int v) { Elf_Data *symstrs, *secstrs; uint32_t nr_syms; @@ -532,7 +539,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, GElf_Sym sym; Elf_Scn *sec, *sec_strndx; Elf *elf; - int nr = 0, kernel = !strcmp("[kernel]", self->name); + int nr = 0; elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { @@ -589,8 +596,6 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, struct symbol *f; const char *elf_name; char *demangled; - u64 obj_start; - struct section *section = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; @@ -607,7 +612,6 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, continue; section_name = elf_sec__name(&shdr, secstrs); - obj_start = sym.st_value; if (self->adjust_symbols) { if (v >= 2) @@ -615,18 +619,8 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; - } - - if (mod) { - section = mod->sections->find_section(mod->sections, section_name); - if (section) - sym.st_value += section->vma; - else { - fprintf(stderr, "dso__load_sym() module %s lookup of %s failed\n", - mod->name, section_name); - goto out_elf_end; - } - } + } else if (kmodule) + sym.st_value += shdr.sh_offset; /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -638,15 +632,14 @@ static int dso__load_sym(struct dso *self, int fd, const char *name, elf_name = demangled; f = symbol__new(sym.st_value, sym.st_size, elf_name, - self->sym_priv_size, obj_start, v); + self->sym_priv_size, v); free(demangled); if (!f) goto out_elf_end; - if (filter && filter(self, f)) + if (filter && filter(map, f)) symbol__delete(f, self->sym_priv_size); else { - f->module = mod; dso__insert_symbol(self, f); nr++; } @@ -671,7 +664,7 @@ static char *dso__read_build_id(struct dso *self, int v) char *build_id = NULL, *bid; unsigned char *raw; Elf *elf; - int fd = open(self->name, O_RDONLY); + int fd = open(self->long_name, O_RDONLY); if (fd < 0) goto out; @@ -680,7 +673,7 @@ static char *dso__read_build_id(struct dso *self, int v) if (elf == NULL) { if (v) fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->name); + __func__, self->long_name); goto out_close; } @@ -709,7 +702,7 @@ static char *dso__read_build_id(struct dso *self, int v) bid += 2; } if (v >= 2) - printf("%s(%s): %s\n", __func__, self->name, build_id); + printf("%s(%s): %s\n", __func__, self->long_name, build_id); out_elf_end: elf_end(elf); out_close: @@ -727,6 +720,7 @@ char dso__symtab_origin(const struct dso *self) [DSO__ORIG_UBUNTU] = 'u', [DSO__ORIG_BUILDID] = 'b', [DSO__ORIG_DSO] = 'd', + [DSO__ORIG_KMODULE] = 'K', }; if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) @@ -734,7 +728,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -747,7 +741,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int v) self->adjust_symbols = 0; if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, filter, v); + ret = dso__load_perf_map(self, map, filter, v); self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : DSO__ORIG_NOT_FOUND; return ret; @@ -760,10 +754,12 @@ more: self->origin++; switch (self->origin) { case DSO__ORIG_FEDORA: - snprintf(name, size, "/usr/lib/debug%s.debug", self->name); + snprintf(name, size, "/usr/lib/debug%s.debug", + self->long_name); break; case DSO__ORIG_UBUNTU: - snprintf(name, size, "/usr/lib/debug%s", self->name); + snprintf(name, size, "/usr/lib/debug%s", + self->long_name); break; case DSO__ORIG_BUILDID: build_id = dso__read_build_id(self, v); @@ -777,7 +773,7 @@ more: self->origin++; /* Fall thru */ case DSO__ORIG_DSO: - snprintf(name, size, "%s", self->name); + snprintf(name, size, "%s", self->long_name); break; default: @@ -787,7 +783,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, fd, name, filter, v, NULL); + ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v); close(fd); /* @@ -808,89 +804,247 @@ out: return ret; } -static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name, - symbol_filter_t filter, int v) +static struct rb_root kernel_maps; +struct map *kernel_map; + +static void kernel_maps__insert(struct map *map) { - struct module *mod = mod_dso__find_module(mods, name); - int err = 0, fd; + maps__insert(&kernel_maps, map); +} - if (mod == NULL || !mod->active) +struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) +{ + /* + * We can't have kernel_map in kernel_maps because it spans an address + * space that includes the modules. The right way to fix this is to + * create several maps, so that we don't have overlapping ranges with + * modules. For now lets look first on the kernel dso. + */ + struct map *map = maps__find(&kernel_maps, ip); + struct symbol *sym; + + if (map) { + ip = map->map_ip(map, ip); + sym = map->dso->find_symbol(map->dso, ip); + } else { + map = kernel_map; + sym = map->dso->find_symbol(map->dso, ip); + } + + if (mapp) + *mapp = map; + + return sym; +} + +struct map *kernel_maps__find_by_dso_name(const char *name) +{ + struct rb_node *nd; + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + + if (map->dso && strcmp(map->dso->name, name) == 0) + return map; + } + + return NULL; +} + +static int dso__load_module_sym(struct dso *self, struct map *map, + symbol_filter_t filter, int v) +{ + int err = 0, fd = open(self->long_name, O_RDONLY); + + if (fd < 0) { + if (v) + fprintf(stderr, "%s: cannot open %s\n", + __func__, self->long_name); return err; + } - fd = open(mod->path, O_RDONLY); - - if (fd < 0) - return err; - - err = dso__load_sym(self, fd, name, filter, v, mod); + err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v); close(fd); return err; } -int dso__load_modules(struct dso *self, symbol_filter_t filter, int v) +static int dsos__load_modules_sym_dir(char *dirname, + symbol_filter_t filter, int v) { - struct mod_dso *mods = mod_dso__new_dso("modules"); - struct module *pos; - struct rb_node *next; - int err, count = 0; + struct dirent *dent; + int nr_symbols = 0, err; + DIR *dir = opendir(dirname); - err = mod_dso__load_modules(mods); - - if (err <= 0) - return err; - - /* - * Iterate over modules, and load active symbols. - */ - next = rb_first(&mods->mods); - while (next) { - pos = rb_entry(next, struct module, rb_node); - err = dso__load_module(self, mods, pos->name, filter, v); - - if (err < 0) - break; - - next = rb_next(&pos->rb_node); - count += err; + if (!dir) { + if (v) + fprintf(stderr, "%s: cannot open %s dir\n", __func__, + dirname); + return -1; } - if (err < 0) { - mod_dso__delete_modules(mods); - mod_dso__delete_self(mods); - return err; - } + while ((dent = readdir(dir)) != NULL) { + char path[PATH_MAX]; - return count; -} + if (dent->d_type == DT_DIR) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; -static inline void dso__fill_symbol_holes(struct dso *self) -{ - struct symbol *prev = NULL; - struct rb_node *nd; + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); + err = dsos__load_modules_sym_dir(path, filter, v); + if (err < 0) + goto failure; + } else { + char *dot = strrchr(dent->d_name, '.'), + dso_name[PATH_MAX]; + struct map *map; + struct rb_node *last; - for (nd = rb_last(&self->syms); nd; nd = rb_prev(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + if (dot == NULL || strcmp(dot, ".ko")) + continue; + snprintf(dso_name, sizeof(dso_name), "[%.*s]", + (int)(dot - dent->d_name), dent->d_name); - if (prev) { - u64 hole = 0; - int alias = pos->start == prev->start; + map = kernel_maps__find_by_dso_name(dso_name); + if (map == NULL) + continue; - if (!alias) - hole = prev->start - pos->end - 1; + snprintf(path, sizeof(path), "%s/%s", + dirname, dent->d_name); - if (hole || alias) { - if (alias) - pos->end = prev->end; - else if (hole) - pos->end = prev->start - 1; + map->dso->long_name = strdup(path); + if (map->dso->long_name == NULL) + goto failure; + + err = dso__load_module_sym(map->dso, map, filter, v); + if (err < 0) + goto failure; + last = rb_last(&map->dso->syms); + if (last) { + struct symbol *sym; + sym = rb_entry(last, struct symbol, rb_node); + map->end = map->start + sym->end; } } - prev = pos; + nr_symbols += err; } + + return nr_symbols; +failure: + closedir(dir); + return -1; } -static int dso__load_vmlinux(struct dso *self, const char *vmlinux, +static int dsos__load_modules_sym(symbol_filter_t filter, int v) +{ + struct utsname uts; + char modules_path[PATH_MAX]; + + if (uname(&uts) < 0) + return -1; + + snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", + uts.release); + + return dsos__load_modules_sym_dir(modules_path, filter, v); +} + +/* + * Constructor variant for modules (where we know from /proc/modules where + * they are loaded) and for vmlinux, where only after we load all the + * symbols we'll know where it starts and ends. + */ +static struct map *map__new2(u64 start, struct dso *dso) +{ + struct map *self = malloc(sizeof(*self)); + + if (self != NULL) { + self->start = start; + /* + * Will be filled after we load all the symbols + */ + self->end = 0; + + self->pgoff = 0; + self->dso = dso; + self->map_ip = map__map_ip; + RB_CLEAR_NODE(&self->rb_node); + } + return self; +} + +int dsos__load_modules(unsigned int sym_priv_size, + symbol_filter_t filter, int v) +{ + char *line = NULL; + size_t n; + FILE *file = fopen("/proc/modules", "r"); + struct map *map; + + if (file == NULL) + return -1; + + while (!feof(file)) { + char name[PATH_MAX]; + u64 start; + struct dso *dso; + char *sep; + int line_len; + + line_len = getline(&line, &n, file); + if (line_len < 0) + break; + + if (!line) + goto out_failure; + + line[--line_len] = '\0'; /* \n */ + + sep = strrchr(line, 'x'); + if (sep == NULL) + continue; + + hex2u64(sep + 1, &start); + + sep = strchr(line, ' '); + if (sep == NULL) + continue; + + *sep = '\0'; + + snprintf(name, sizeof(name), "[%s]", line); + dso = dso__new(name, sym_priv_size); + + if (dso == NULL) + goto out_delete_line; + + map = map__new2(start, dso); + if (map == NULL) { + dso__delete(dso); + goto out_delete_line; + } + + dso->origin = DSO__ORIG_KMODULE; + kernel_maps__insert(map); + dsos__add(dso); + } + + free(line); + fclose(file); + + v = 1; + return dsos__load_modules_sym(filter, v); + +out_delete_line: + free(line); +out_failure: + return -1; +} + +static int dso__load_vmlinux(struct dso *self, struct map *map, + const char *vmlinux, symbol_filter_t filter, int v) { int err, fd = open(vmlinux, O_RDONLY); @@ -898,28 +1052,36 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux, if (fd < 0) return -1; - err = dso__load_sym(self, fd, vmlinux, filter, v, NULL); - - if (err > 0) - dso__fill_symbol_holes(self); + err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v); close(fd); return err; } -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int v, int use_modules) +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int v, int use_modules) { int err = -1; + struct dso *dso = dso__new(vmlinux, sym_priv_size); + + if (dso == NULL) + return -1; + + dso->short_name = "[kernel]"; + kernel_map = map__new2(0, dso); + if (kernel_map == NULL) + goto out_delete_dso; + + kernel_map->map_ip = vdso__map_ip; if (vmlinux) { - err = dso__load_vmlinux(self, vmlinux, filter, v); + err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); if (err > 0 && use_modules) { - int syms = dso__load_modules(self, filter, v); + int syms = dsos__load_modules(sym_priv_size, filter, v); if (syms < 0) { - fprintf(stderr, "dso__load_modules failed!\n"); + fprintf(stderr, "dsos__load_modules failed!\n"); return syms; } err += syms; @@ -927,18 +1089,34 @@ int dso__load_kernel(struct dso *self, const char *vmlinux, } if (err <= 0) - err = dso__load_kallsyms(self, filter, v); + err = dso__load_kallsyms(dso, kernel_map, filter, v); - if (err > 0) - self->origin = DSO__ORIG_KERNEL; + if (err > 0) { + struct rb_node *node = rb_first(&dso->syms); + struct symbol *sym = rb_entry(node, struct symbol, rb_node); + + kernel_map->start = sym->start; + node = rb_last(&dso->syms); + sym = rb_entry(node, struct symbol, rb_node); + kernel_map->end = sym->end; + + dso->origin = DSO__ORIG_KERNEL; + /* + * XXX See kernel_maps__find_symbol comment + * kernel_maps__insert(kernel_map) + */ + dsos__add(dso); + } return err; + +out_delete_dso: + dso__delete(dso); + return -1; } LIST_HEAD(dsos); -struct dso *kernel_dso; struct dso *vdso; -struct dso *hypervisor_dso; const char *vmlinux_name = "vmlinux"; int modules; @@ -970,7 +1148,7 @@ struct dso *dsos__findnew(const char *name) if (!dso) goto out_delete_dso; - nr = dso__load(dso, NULL, verbose); + nr = dso__load(dso, NULL, NULL, verbose); if (nr < 0) { eprintf("Failed to open: %s\n", name); goto out_delete_dso; @@ -995,43 +1173,20 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip) -{ - return dso__find_symbol(dso, ip); -} - int load_kernel(void) { - int err; - - kernel_dso = dso__new("[kernel]", 0); - if (!kernel_dso) + if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0) return -1; - err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules); - if (err <= 0) { - dso__delete(kernel_dso); - kernel_dso = NULL; - } else - dsos__add(kernel_dso); - vdso = dso__new("[vdso]", 0); if (!vdso) return -1; - vdso->find_symbol = vdso__find_symbol; - dsos__add(vdso); - hypervisor_dso = dso__new("[hypervisor]", 0); - if (!hypervisor_dso) - return -1; - dsos__add(hypervisor_dso); - - return err; + return 0; } - void symbol__init(void) { elf_version(EV_CURRENT); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index ee164f659ed..5339fd82ec9 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -5,7 +5,6 @@ #include "types.h" #include #include -#include "module.h" #include "event.h" #ifdef HAVE_CPLUS_DEMANGLE @@ -36,10 +35,8 @@ struct symbol { struct rb_node rb_node; u64 start; u64 end; - u64 obj_start; u64 hist_sum; u64 *hist; - struct module *module; void *priv; char name[0]; }; @@ -52,12 +49,14 @@ struct dso { unsigned char adjust_symbols; unsigned char slen_calculated; unsigned char origin; + const char *short_name; + char *long_name; char name[0]; }; extern const char *sym_hist_filter; -typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -69,10 +68,12 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dso__load_kernel(struct dso *self, const char *vmlinux, - symbol_filter_t filter, int verbose, int modules); -int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); -int dso__load(struct dso *self, symbol_filter_t filter, int verbose); +int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, + symbol_filter_t filter, int verbose, int modules); +int dsos__load_modules(unsigned int sym_priv_size, symbol_filter_t filter, + int verbose); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, + int verbose); struct dso *dsos__findnew(const char *name); void dsos__fprintf(FILE *fp); @@ -84,9 +85,8 @@ int load_kernel(void); void symbol__init(void); extern struct list_head dsos; -extern struct dso *kernel_dso; +extern struct map *kernel_map; extern struct dso *vdso; -extern struct dso *hypervisor_dso; extern const char *vmlinux_name; extern int modules; #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 9d0945cc66d..3b56aebb1f4 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -16,6 +16,7 @@ static struct thread *thread__new(pid_t pid) if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); self->maps = RB_ROOT; + INIT_LIST_HEAD(&self->removed_maps); } return self; @@ -32,13 +33,20 @@ int thread__set_comm(struct thread *self, const char *comm) static size_t thread__fprintf(struct thread *self, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + struct map *pos; + size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n", + self->pid, self->comm); for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node); + pos = rb_entry(nd, struct map, rb_node); ret += map__fprintf(pos, fp); } + ret = fprintf(fp, "Removed maps:\n"); + + list_for_each_entry(pos, &self->removed_maps, node) + ret += map__fprintf(pos, fp); + return ret; } @@ -112,21 +120,13 @@ static void thread__remove_overlappings(struct thread *self, struct map *map) map__fprintf(pos, stdout); } - if (map->start <= pos->start && map->end > pos->start) - pos->start = map->end; - - if (map->end >= pos->end && map->start < pos->end) - pos->end = map->start; - - if (verbose >= 2) { - printf("after collision:\n"); - map__fprintf(pos, stdout); - } - - if (pos->start >= pos->end) { - rb_erase(&pos->rb_node, &self->maps); - free(pos); - } + rb_erase(&pos->rb_node, &self->maps); + /* + * We may have references to this map, for instance in some + * hist_entry instances, so just move them to a separate + * list. + */ + list_add_tail(&pos->node, &self->removed_maps); } } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index bbb37c1a52e..845d9b62f96 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -8,6 +8,7 @@ struct thread { struct rb_node rb_node; struct rb_root maps; + struct list_head removed_maps; pid_t pid; char shortname[3]; char *comm; @@ -25,6 +26,9 @@ size_t threads__fprintf(FILE *fp, struct rb_root *threads); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); +struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp); +struct map *kernel_maps__find_by_dso_name(const char *name); + static inline struct map *thread__find_map(struct thread *self, u64 ip) { return self ? maps__find(&self->maps, ip) : NULL; From a1a138d05fa060ac4238c19a1e890aacc25ed3ba Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 25 Sep 2009 11:20:12 -0700 Subject: [PATCH 081/470] tracing/kprobes: Use global event perf buffers in kprobe tracer Use new percpu global event buffer instead of stack in kprobe tracer while tracing through perf. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Acked-by: Ingo Molnar Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090925182011.10157.60140.stgit@omoto> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 109 +++++++++++++++++++++++------------- 1 file changed, 70 insertions(+), 39 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 09cba270392..97309d4714f 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry *entry; - int size, __size, i, pc; + struct trace_entry *ent; + int size, __size, i, pc, __cpu; unsigned long irq_flags; + char *raw_data; - local_save_flags(irq_flags); pc = preempt_count(); - __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); + if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + "profile buffer not large enough")) + return 0; - do { - char raw_data[size]; - struct trace_entry *ent; - /* - * Zero dead bytes from alignment to avoid stack leak - * to userspace - */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - entry = (struct kprobe_trace_entry *)raw_data; - ent = &entry->ent; + /* + * Protect the non nmi buffer + * This also protects the rcu read side + */ + local_irq_save(irq_flags); + __cpu = smp_processor_id(); - tracing_generic_entry_update(ent, irq_flags, pc); - ent->type = call->id; - entry->nargs = tp->nr_args; - entry->ip = (unsigned long)kp->addr; - for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - perf_tp_event(call->id, entry->ip, 1, entry, size); - } while (0); + if (in_nmi()) + raw_data = rcu_dereference(trace_profile_buf_nmi); + else + raw_data = rcu_dereference(trace_profile_buf); + + if (!raw_data) + goto end; + + raw_data = per_cpu_ptr(raw_data, __cpu); + /* Zero dead bytes from alignment to avoid buffer leak to userspace */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + entry = (struct kprobe_trace_entry *)raw_data; + ent = &entry->ent; + + tracing_generic_entry_update(ent, irq_flags, pc); + ent->type = call->id; + entry->nargs = tp->nr_args; + entry->ip = (unsigned long)kp->addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); + perf_tp_event(call->id, entry->ip, 1, entry, size); +end: + local_irq_restore(irq_flags); return 0; } @@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry *entry; - int size, __size, i, pc; + struct trace_entry *ent; + int size, __size, i, pc, __cpu; unsigned long irq_flags; + char *raw_data; - local_save_flags(irq_flags); pc = preempt_count(); - __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); + if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + "profile buffer not large enough")) + return 0; - do { - char raw_data[size]; - struct trace_entry *ent; + /* + * Protect the non nmi buffer + * This also protects the rcu read side + */ + local_irq_save(irq_flags); + __cpu = smp_processor_id(); - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - entry = (struct kretprobe_trace_entry *)raw_data; - ent = &entry->ent; + if (in_nmi()) + raw_data = rcu_dereference(trace_profile_buf_nmi); + else + raw_data = rcu_dereference(trace_profile_buf); - tracing_generic_entry_update(ent, irq_flags, pc); - ent->type = call->id; - entry->nargs = tp->nr_args; - entry->func = (unsigned long)tp->rp.kp.addr; - entry->ret_ip = (unsigned long)ri->ret_addr; - for (i = 0; i < tp->nr_args; i++) - entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - perf_tp_event(call->id, entry->ret_ip, 1, entry, size); - } while (0); + if (!raw_data) + goto end; + + raw_data = per_cpu_ptr(raw_data, __cpu); + /* Zero dead bytes from alignment to avoid buffer leak to userspace */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + entry = (struct kretprobe_trace_entry *)raw_data; + ent = &entry->ent; + + tracing_generic_entry_update(ent, irq_flags, pc); + ent->type = call->id; + entry->nargs = tp->nr_args; + entry->func = (unsigned long)tp->rp.kp.addr; + entry->ret_ip = (unsigned long)ri->ret_addr; + for (i = 0; i < tp->nr_args; i++) + entry->args[i] = call_fetch(&tp->args[i].fetch, regs); + perf_tp_event(call->id, entry->ret_ip, 1, entry, size); +end: + local_irq_restore(irq_flags); return 0; } From c0b11d3af164947c71e2491912c5b8418900dafb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 25 Sep 2009 11:20:38 -0700 Subject: [PATCH 082/470] x86: Add VIA processor instructions in opcodes decoder Add VIA processor's Padlock instructions(MONTMUL, XSHA1, XSHA256) as parts of the kernel may use them. This fixes the following crash in opcodes decoder selftests: make[2]: `scripts/unifdef' is up to date. TEST posttest Error: c145cf71: f3 0f a6 d0 repz xsha256 Error: objdump says 4 bytes, but insn_get_length() says 3 (attr:0) make[1]: *** [posttest] Error 2 make: *** [bzImage] Error 2 Reported-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Acked-by: Ingo Molnar Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090925182037.10157.3180.stgit@omoto> Signed-off-by: Frederic Weisbecker --- arch/x86/lib/x86-opcode-map.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 59e20d5c2a5..78a0daf12e1 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -469,7 +469,7 @@ a2: CPUID a3: BT Ev,Gv a4: SHLD Ev,Gv,Ib a5: SHLD Ev,Gv,CL -a6: +a6: GrpPDLK a7: GrpRNG a8: PUSH GS (d64) a9: POP GS (d64) @@ -803,6 +803,12 @@ GrpTable: Grp16 3: prefetch T2 EndTable +GrpTable: GrpPDLK +0: MONTMUL +1: XSHA1 +2: XSHA2 +EndTable + GrpTable: GrpRNG 0: xstore-rng 1: xcrypt-ecb From 88f70d7590538e427c8405a2e02ac2624847386c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 25 Sep 2009 11:20:54 -0700 Subject: [PATCH 083/470] tracing/ftrace: Fix to check create_event_dir() when adding new events Check result of event_create_dir() and add ftrace_event_call to ftrace_events list only if it is succeeded. Thanks to Li for pointing it out. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Acked-by: Ingo Molnar Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Tom Zanussi LKML-Reference: <20090925182054.10157.55219.stgit@omoto> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_events.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index a4b7c9a9130..155b5d5a4e4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -957,12 +957,12 @@ static int __trace_add_event_call(struct ftrace_event_call *call) if (!d_events) return -ENOENT; - list_add(&call->list, &ftrace_events); ret = event_create_dir(call, d_events, &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); - if (ret < 0) - list_del(&call->list); + if (!ret) + list_add(&call->list, &ftrace_events); + return ret; } @@ -1124,10 +1124,11 @@ static void trace_module_add_events(struct module *mod) return; } call->mod = mod; - list_add(&call->list, &ftrace_events); - event_create_dir(call, d_events, - &file_ops->id, &file_ops->enable, - &file_ops->filter, &file_ops->format); + ret = event_create_dir(call, d_events, + &file_ops->id, &file_ops->enable, + &file_ops->filter, &file_ops->format); + if (!ret) + list_add(&call->list, &ftrace_events); } } @@ -1267,10 +1268,12 @@ static __init int event_trace_init(void) continue; } } - list_add(&call->list, &ftrace_events); - event_create_dir(call, d_events, &ftrace_event_id_fops, - &ftrace_enable_fops, &ftrace_event_filter_fops, - &ftrace_event_format_fops); + ret = event_create_dir(call, d_events, &ftrace_event_id_fops, + &ftrace_enable_fops, + &ftrace_event_filter_fops, + &ftrace_event_format_fops); + if (!ret) + list_add(&call->list, &ftrace_events); } while (true) { From 9735abf11bec48bfbbb1b54772a02deb2ae0c403 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 3 Oct 2009 10:42:45 -0300 Subject: [PATCH 084/470] perf tools: Move hist_entry__add common code to hist.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now perf report and annotate do the callgraph/hit processing in their specialized hist_entry__add functions. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 50 +++++---------------------- tools/perf/builtin-report.c | 65 +++++++++-------------------------- tools/perf/util/hist.c | 46 +++++++++++++++++++++++++ tools/perf/util/hist.h | 3 ++ 4 files changed, 75 insertions(+), 89 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7d5a3b1bcda..855094234f2 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -80,48 +80,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) sym->hist[offset]); } -static int -hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, u64 ip, char level) +static int hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, u64 ip, u64 count, char level) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; - struct hist_entry *he; - struct hist_entry entry = { - .thread = thread, - .map = map, - .sym = sym, - .ip = ip, - .level = level, - .count = 1, - }; - int cmp; - - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - hist_hit(he, ip); - - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) + bool hit; + struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip, + count, level, &hit); + if (he == NULL) return -ENOMEM; - *he = entry; - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); - + if (hit) + hist_hit(he, ip); return 0; } @@ -191,7 +159,7 @@ got_map: } if (show & show_mask) { - if (hist_entry__add(thread, map, sym, ip, level)) { + if (hist_entry__add(thread, map, sym, ip, 1, level)) { fprintf(stderr, "problem incrementing symbol count, skipping event\n"); return -1; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3ed3baf96ff..0e83ffcbe55 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -407,9 +407,9 @@ static int call__match(struct symbol *sym) return 0; } -static struct symbol ** -resolve_callchain(struct thread *thread, struct map *map, - struct ip_callchain *chain, struct hist_entry *entry) +static struct symbol **resolve_callchain(struct thread *thread, struct map *map, + struct ip_callchain *chain, + struct symbol **parent) { u64 context = PERF_CONTEXT_MAX; struct symbol **syms = NULL; @@ -444,9 +444,8 @@ resolve_callchain(struct thread *thread, struct map *map, } if (sym) { - if (sort__has_parent && call__match(sym) && - !entry->parent) - entry->parent = sym; + if (sort__has_parent && !*parent && call__match(sym)) + *parent = sym; if (!callchain) break; syms[i] = sym; @@ -465,57 +464,27 @@ hist_entry__add(struct thread *thread, struct map *map, struct symbol *sym, u64 ip, struct ip_callchain *chain, char level, u64 count) { - struct rb_node **p = &hist.rb_node; - struct rb_node *parent = NULL; + struct symbol **syms = NULL, *parent = NULL; + bool hit; struct hist_entry *he; - struct symbol **syms = NULL; - struct hist_entry entry = { - .thread = thread, - .map = map, - .sym = sym, - .ip = ip, - .level = level, - .count = count, - .parent = NULL, - .sorted_chain = RB_ROOT - }; - int cmp; if ((sort__has_parent || callchain) && chain) - syms = resolve_callchain(thread, map, chain, &entry); + syms = resolve_callchain(thread, map, chain, &parent); - while (*p != NULL) { - parent = *p; - he = rb_entry(parent, struct hist_entry, rb_node); - - cmp = hist_entry__cmp(&entry, he); - - if (!cmp) { - he->count += count; - if (callchain) { - append_chain(&he->callchain, chain, syms); - free(syms); - } - return 0; - } - - if (cmp < 0) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } - - he = malloc(sizeof(*he)); - if (!he) + he = __hist_entry__add(thread, map, sym, parent, + ip, count, level, &hit); + if (he == NULL) return -ENOMEM; - *he = entry; + + if (hit) + he->count += count; + if (callchain) { - callchain_init(&he->callchain); + if (!hit) + callchain_init(&he->callchain); append_chain(&he->callchain, chain, syms); free(syms); } - rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &hist); return 0; } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 82808dc4f8e..7393a02fd8d 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -21,6 +21,52 @@ unsigned long total_lost; * histogram, sorted on item, collects counts */ +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, + struct symbol *sym_parent, + u64 ip, u64 count, char level, bool *hit) +{ + struct rb_node **p = &hist.rb_node; + struct rb_node *parent = NULL; + struct hist_entry *he; + struct hist_entry entry = { + .thread = thread, + .map = map, + .sym = sym, + .ip = ip, + .level = level, + .count = count, + .parent = sym_parent, + }; + int cmp; + + while (*p != NULL) { + parent = *p; + he = rb_entry(parent, struct hist_entry, rb_node); + + cmp = hist_entry__cmp(&entry, he); + + if (!cmp) { + *hit = true; + return he; + } + + if (cmp < 0) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + he = malloc(sizeof(*he)); + if (!he) + return NULL; + *he = entry; + rb_link_node(&he->rb_node, parent, p); + rb_insert_color(&he->rb_node, &hist); + *hit = false; + return he; +} + int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 9a8daa12b43..ac2149c559b 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -36,6 +36,9 @@ extern unsigned long total_fork; extern unsigned long total_unknown; extern unsigned long total_lost; +struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, + struct symbol *sym, struct symbol *parent, + u64 ip, u64 count, char level, bool *hit); extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); extern void hist_entry__free(struct hist_entry *); From ec218fc4a796a1b584741d59ef22615d96981188 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 3 Oct 2009 20:30:48 -0300 Subject: [PATCH 085/470] perf tools: Remove show_mask bitmask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As it was not being exposed via any command line and with --dsos/--comms we can do this and even more, like asking for just kernel + some module: [root@doppio linux-2.6-tip]# perf report --dsos \[kernel\],\[drm\] --vmlinux /home/acme/git/build/tip-recvmmsg/vmlinux --modules | head -15 # Samples: 619669 # # Overhead Command Shared Object Symbol # ........ ............... ............. ...... # 7.12% swapper [kernel] [k] read_hpet 6.86% init [kernel] [k] read_hpet 6.22% init [kernel] [k] mwait_idle_with_hints 5.34% swapper [kernel] [k] mwait_idle_with_hints 3.01% firefox [kernel] [.] vread_hpet 2.14% Xorg [drm] [k] drm_clflush_pages 2.09% pidgin [kernel] [.] vread_hpet 1.58% npviewer.bin [kernel] [.] vread_hpet 1.37% swapper [kernel] [k] hpet_next_event 1.23% Xorg [kernel] [k] read_hpet [root@doppio linux-2.6-tip]# Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20091003233048.GA30535@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/builtin-annotate.c | 15 ++++----------- tools/perf/builtin-report.c | 35 +++++++++++++---------------------- tools/perf/util/event.h | 6 ------ 4 files changed, 18 insertions(+), 39 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8e7509f2d88..2c309a5c686 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -323,6 +323,7 @@ LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h LIB_H += util/include/linux/list.h LIB_H += perf.h +LIB_H += util/event.h LIB_H += util/types.h LIB_H += util/levenshtein.h LIB_H += util/parse-options.h diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 855094234f2..35ed97bd0c6 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -29,7 +29,6 @@ static char const *input_name = "perf.data"; static int force; static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; @@ -97,7 +96,6 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; @@ -121,13 +119,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (event->header.misc & PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; sym = kernel_maps__find_symbol(ip, &map); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { - show = SHOW_USER; level = '.'; map = thread__find_map(thread, ip); if (map != NULL) { @@ -153,17 +149,14 @@ got_map: dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else { - show = SHOW_HV; level = 'H'; dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - if (hist_entry__add(thread, map, sym, ip, 1, level)) { - fprintf(stderr, - "problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, 1, level)) { + fprintf(stderr, "problem incrementing symbol count, " + "skipping event\n"); + return -1; } total++; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0e83ffcbe55..fe4aadc9630 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -38,7 +38,6 @@ static struct strlist *dso_list, *comm_list, *sym_list; static int force; static int input; -static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; static int full_paths; static int show_nr_samples; @@ -600,7 +599,6 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - int show = 0; struct symbol *sym = NULL; struct thread *thread; u64 ip = event->ip.ip; @@ -657,42 +655,35 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; if (cpumode == PERF_RECORD_MISC_KERNEL) { - show = SHOW_KERNEL; level = 'k'; - sym = kernel_maps__find_symbol(ip, &map); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { - - show = SHOW_USER; level = '.'; sym = resolve_symbol(thread, &map, &ip); } else { - show = SHOW_HV; level = 'H'; - dump_printf(" ...... dso: [hypervisor]\n"); } - if (show & show_mask) { - if (dso_list && - (!map || !map->dso || - !(strlist__has_entry(dso_list, map->dso->short_name) || - (map->dso->short_name != map->dso->long_name && - strlist__has_entry(dso_list, map->dso->long_name))))) - return 0; + if (dso_list && + (!map || !map->dso || + !(strlist__has_entry(dso_list, map->dso->short_name) || + (map->dso->short_name != map->dso->long_name && + strlist__has_entry(dso_list, map->dso->long_name))))) + return 0; - if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) - return 0; + if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) + return 0; - if (hist_entry__add(thread, map, sym, ip, - chain, level, period)) { - eprintf("problem incrementing symbol count, skipping event\n"); - return -1; - } + if (hist_entry__add(thread, map, sym, ip, + chain, level, period)) { + eprintf("problem incrementing symbol count, skipping event\n"); + return -1; } + total += period; return 0; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index a39520e6ae8..c2e62be6279 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -6,12 +6,6 @@ #include #include -enum { - SHOW_KERNEL = 1, - SHOW_USER = 2, - SHOW_HV = 4, -}; - /* * PERF_SAMPLE_IP | PERF_SAMPLE_TID | * */ From 5c2068059a0e852f72b7c2608d92170b752d821f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:15 -0300 Subject: [PATCH 086/470] perf top: Keep the default of asking for kernel module symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index befef842757..34d48c1b7a8 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -804,10 +804,8 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { - int use_modules = vmlinux_name ? 1 : 0; - if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), - symbol_filter, verbose, use_modules) <= 0) + symbol_filter, verbose, 1) <= 0) return -1; if (dump_symtab) From af427bf529c5991be8d1a36f43e2d0141f532f63 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:17 -0300 Subject: [PATCH 087/470] perf tools: Create maps for modules when processing kallsyms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that we get kallsyms processing closer to vmlinux + modules symtabs processing. One change in behaviour is that since when one specifies --vmlinux -m should be used to ask for modules, so it is now for kallsyms as well. Also continue if one manages to load the vmlinux data but module processing fails, so that at least some analisys can be done with part of the needed symbols. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 161 +++++++++++++++++++++++++++++---------- tools/perf/util/symbol.h | 2 - 2 files changed, 122 insertions(+), 41 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e8829689947..4dfdefd5ec7 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -27,6 +27,44 @@ enum dso_origin { static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); +static struct rb_root kernel_maps; + +static void dso__set_symbols_end(struct dso *self) +{ + struct rb_node *nd, *prevnd = rb_first(&self->syms); + + if (prevnd == NULL) + return; + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), + *curr = rb_entry(nd, struct symbol, rb_node); + + if (prev->end == prev->start) + prev->end = curr->start - 1; + prevnd = nd; + } +} + +static void kernel_maps__fixup_sym_end(void) +{ + struct map *prev, *curr; + struct rb_node *nd, *prevnd = rb_first(&kernel_maps); + + if (prevnd == NULL) + return; + + curr = rb_entry(prevnd, struct map, rb_node); + dso__set_symbols_end(curr->dso); + + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { + prev = curr; + curr = rb_entry(nd, struct map, rb_node); + prev->end = curr->start - 1; + dso__set_symbols_end(curr->dso); + } +} + static struct symbol *symbol__new(u64 start, u64 len, const char *name, unsigned int priv_size, int v) { @@ -162,10 +200,9 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int dso__load_kallsyms(struct dso *self, struct map *map, - symbol_filter_t filter, int v) +static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) { - struct rb_node *nd, *prevnd; + struct map *map = kernel_map; char *line = NULL; size_t n; FILE *file = fopen("/proc/kallsyms", "r"); @@ -179,6 +216,7 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, struct symbol *sym; int line_len, len; char symbol_type; + char *module, *symbol_name; line_len = getline(&line, &n, file); if (line_len < 0) @@ -201,40 +239,50 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, */ if (symbol_type != 'T' && symbol_type != 'W') continue; + + symbol_name = line + len + 2; + module = strchr(symbol_name, '\t'); + if (module) { + char *module_name_end; + + if (!use_modules) + continue; + *module = '\0'; + module = strchr(module + 1, '['); + if (!module) + continue; + module_name_end = strchr(module + 1, ']'); + if (!module_name_end) + continue; + *(module_name_end + 1) = '\0'; + if (strcmp(map->dso->name, module)) { + map = kernel_maps__find_by_dso_name(module); + if (!map) { + fputs("/proc/{kallsyms,modules} " + "inconsistency!\n", stderr); + return -1; + } + } + start = map->map_ip(map, start); + } else + map = kernel_map; /* * Well fix up the end later, when we have all sorted. */ - sym = symbol__new(start, 0xdead, line + len + 2, - self->sym_priv_size, v); + sym = symbol__new(start, 0, symbol_name, + map->dso->sym_priv_size, v); if (sym == NULL) goto out_delete_line; if (filter && filter(map, sym)) - symbol__delete(sym, self->sym_priv_size); + symbol__delete(sym, map->dso->sym_priv_size); else { - dso__insert_symbol(self, sym); + dso__insert_symbol(map->dso, sym); count++; } } - /* - * Now that we have all sorted out, just set the ->end of all - * symbols - */ - prevnd = rb_first(&self->syms); - - if (prevnd == NULL) - goto out_delete_line; - - for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); - - prev->end = curr->start - 1; - prevnd = nd; - } - free(line); fclose(file); @@ -246,6 +294,24 @@ out_failure: return -1; } +static size_t kernel_maps__fprintf(FILE *fp) +{ + size_t printed = fprintf(stderr, "Kernel maps:\n"); + struct rb_node *nd; + + printed += map__fprintf(kernel_map, fp); + printed += dso__fprintf(kernel_map->dso, fp); + + for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + + printed += map__fprintf(pos, fp); + printed += dso__fprintf(pos->dso, fp); + } + + return printed + fprintf(stderr, "END kernel maps\n"); +} + static int dso__load_perf_map(struct dso *self, struct map *map, symbol_filter_t filter, int v) { @@ -598,6 +664,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, char *demangled; int is_label = elf_sym__is_label(&sym); const char *section_name; + u64 sh_offset = 0; if (!is_label && !elf_sym__is_function(&sym)) continue; @@ -613,14 +680,18 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, section_name = elf_sec__name(&shdr, secstrs); + if ((kernel || kmodule)) { + if (strstr(section_name, ".init")) + sh_offset = shdr.sh_offset; + } + if (self->adjust_symbols) { if (v >= 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; - } else if (kmodule) - sym.st_value += shdr.sh_offset; + } /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -631,7 +702,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (demangled != NULL) elf_name = demangled; - f = symbol__new(sym.st_value, sym.st_size, elf_name, + f = symbol__new(sym.st_value + sh_offset, sym.st_size, elf_name, self->sym_priv_size, v); free(demangled); if (!f) @@ -804,7 +875,6 @@ out: return ret; } -static struct rb_root kernel_maps; struct map *kernel_map; static void kernel_maps__insert(struct map *map) @@ -975,8 +1045,7 @@ static struct map *map__new2(u64 start, struct dso *dso) return self; } -int dsos__load_modules(unsigned int sym_priv_size, - symbol_filter_t filter, int v) +static int dsos__load_modules(unsigned int sym_priv_size) { char *line = NULL; size_t n; @@ -1034,8 +1103,7 @@ int dsos__load_modules(unsigned int sym_priv_size, free(line); fclose(file); - v = 1; - return dsos__load_modules_sym(filter, v); + return 0; out_delete_line: free(line); @@ -1075,25 +1143,37 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->map_ip = vdso__map_ip; + if (use_modules && dsos__load_modules(sym_priv_size) < 0) { + fprintf(stderr, "Failed to load list of modules in use! " + "Continuing...\n"); + use_modules = 0; + } + if (vmlinux) { err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); if (err > 0 && use_modules) { - int syms = dsos__load_modules(sym_priv_size, filter, v); + int syms = dsos__load_modules_sym(filter, v); - if (syms < 0) { - fprintf(stderr, "dsos__load_modules failed!\n"); - return syms; - } - err += syms; + if (syms < 0) + fprintf(stderr, "Failed to read module symbols!" + " Continuing...\n"); + else + err += syms; } } if (err <= 0) - err = dso__load_kallsyms(dso, kernel_map, filter, v); + err = maps__load_kallsyms(filter, use_modules, v); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); struct symbol *sym = rb_entry(node, struct symbol, rb_node); + /* + * Now that we have all sorted out, just set the ->end of all + * symbols that still don't have it. + */ + dso__set_symbols_end(dso); + kernel_maps__fixup_sym_end(); kernel_map->start = sym->start; node = rb_last(&dso->syms); @@ -1106,6 +1186,9 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, * kernel_maps__insert(kernel_map) */ dsos__add(dso); + + if (v > 0) + kernel_maps__fprintf(stderr); } return err; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5339fd82ec9..2e4522edeb0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -70,8 +70,6 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, symbol_filter_t filter, int verbose, int modules); -int dsos__load_modules(unsigned int sym_priv_size, symbol_filter_t filter, - int verbose); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int verbose); struct dso *dsos__findnew(const char *name); From a2a99e8e12798706ec1026e5d8fc36f7c86122ce Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:18 -0300 Subject: [PATCH 088/470] perf tools: /proc/modules names don't always match its name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit $ cut -d' ' -f1 /proc/modules|grep _|wc -l 29 $ cut -d' ' -f1 /proc/modules|grep _|sed 's/$/.ko'/g|while read n;do find /lib/modules/`uname -r` -name $n;done|wc -l 12 For instance: $ grep ^aes_x86 /proc/modules aes_x86_64 9056 2 - Live 0xffffffffa0091000 $ l /lib/modules/2.6.31-tip/kernel/arch/x86/crypto/aes-x86_64.ko -rw-r--r-- 1 root root 136438 2009-09-22 19:05 /lib/modules/2.6.31-tip/kernel/arch/x86/crypto/aes-x86_64.ko Handle that by introducing a strxfrchar routine that replaces dashes with underscores when matching file names to loaded modules. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/string.c | 11 +++++++++++ tools/perf/util/string.h | 1 + tools/perf/util/symbol.c | 3 ++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index c93eca9a7be..04743d3e903 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -1,3 +1,4 @@ +#include #include "string.h" static int hex(char ch) @@ -32,3 +33,13 @@ int hex2u64(const char *ptr, u64 *long_val) return p - ptr; } + +char *strxfrchar(char *s, char from, char to) +{ + char *p = s; + + while ((p = strchr(p, from)) != NULL) + *p++ = to; + + return s; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index 15c827475e7..2c84bf65ba0 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -4,6 +4,7 @@ #include "types.h" int hex2u64(const char *ptr, u64 *val); +char *strxfrchar(char *s, char from, char to); #define _STR(x) #x #define STR(x) _STR(x) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4dfdefd5ec7..e3eebdd682d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -189,7 +189,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->long_name); + size_t ret = fprintf(fp, "dso: %s\n", self->short_name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { @@ -977,6 +977,7 @@ static int dsos__load_modules_sym_dir(char *dirname, snprintf(dso_name, sizeof(dso_name), "[%.*s]", (int)(dot - dent->d_name), dent->d_name); + strxfrchar(dso_name, '-', '_'); map = kernel_maps__find_by_dso_name(dso_name); if (map == NULL) continue; From c3b32fcbc7f4fd9a9b84718b991b175b0fd53f8c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 5 Oct 2009 14:26:16 -0300 Subject: [PATCH 089/470] perf report: Use kernel_maps__find_symbol as fallback to find vdsos, etc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In resolve_symbol, as we're moving to breaking the kernel symbols list per address ranges, i.e. kernel linking sections, so that we don't have a big kernel_map that in its range covers what is in the modules. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index fe4aadc9630..12f8c868fcd 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -384,11 +384,8 @@ got_map: * the "[vdso]" dso, but for now lets use the old * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) { - map = kernel_map; - if (mapp) - *mapp = map; - } + if ((long long)ip < 0) + return kernel_maps__find_symbol(ip, mapp); } dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); From 26a50744b21fff65bd754874072857bee8967f4d Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:50 -0500 Subject: [PATCH 090/470] tracing/events: Add 'signed' field to format files The sign info used for filters in the kernel is also useful to applications that process the trace stream. Add it to the format files and make it available to userspace. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- include/trace/ftrace.h | 15 +++++++++------ kernel/trace/ring_buffer.c | 15 +++++++++------ kernel/trace/trace_events.c | 24 ++++++++++++------------ kernel/trace/trace_export.c | 25 ++++++++++++++----------- kernel/trace/trace_syscalls.c | 20 +++++++++++++------- tools/perf/util/trace-event-parse.c | 24 ++++++++++++++++++++++++ tools/perf/util/trace-event.h | 1 + 7 files changed, 82 insertions(+), 42 deletions(-) diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index cc0d9667e18..c9bbcab95fb 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -120,9 +120,10 @@ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; @@ -132,19 +133,21 @@ #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), item), \ - (unsigned int)sizeof(field.item)); \ + (unsigned int)sizeof(field.item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ - "offset:%u;\tsize:%u;\n", \ + "offset:%u;\tsize:%u;\tsigned:%u;\n", \ (unsigned int)offsetof(typeof(field), \ __data_loc_##item), \ - (unsigned int)sizeof(field.__data_loc_##item)); \ + (unsigned int)sizeof(field.__data_loc_##item), \ + (unsigned int)is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff0197054..e43c928356e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) int ret; ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" - "offset:0;\tsize:%u;\n", - (unsigned int)sizeof(field.time_stamp)); + "offset:0;\tsize:%u;\tsigned:%u;\n", + (unsigned int)sizeof(field.time_stamp), + (unsigned int)is_signed_type(u64)); ret = trace_seq_printf(s, "\tfield: local_t commit;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), commit), - (unsigned int)sizeof(field.commit)); + (unsigned int)sizeof(field.commit), + (unsigned int)is_signed_type(long)); ret = trace_seq_printf(s, "\tfield: char data;\t" - "offset:%u;\tsize:%u;\n", + "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), data), - (unsigned int)BUF_PAGE_SIZE); + (unsigned int)BUF_PAGE_SIZE, + (unsigned int)is_signed_type(char)); return ret; } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index d128f65778e..cf3cabf6ce1 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -507,7 +507,7 @@ extern char *__bad_type_size(void); #define FIELD(type, name) \ sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ #type, "common_" #name, offsetof(typeof(field), name), \ - sizeof(field.name) + sizeof(field.name), is_signed_type(type) static int trace_write_header(struct trace_seq *s) { @@ -515,17 +515,17 @@ static int trace_write_header(struct trace_seq *s) /* struct trace_entry */ return trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\n", - FIELD(unsigned short, type), - FIELD(unsigned char, flags), - FIELD(unsigned char, preempt_count), - FIELD(int, pid), - FIELD(int, lock_depth)); + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n" + "\n", + FIELD(unsigned short, type), + FIELD(unsigned char, flags), + FIELD(unsigned char, preempt_count), + FIELD(int, pid), + FIELD(int, lock_depth)); } static ssize_t diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 9753fcc61bc..31da218ee10 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \ #undef __field #define __field(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), item), \ - sizeof(field.item)); \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __field_desc #define __field_desc(type, container, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __array #define __array(type, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ - offsetof(typeof(field), item), \ - sizeof(field.item)); \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + sizeof(field.item), is_signed_type(type)); \ if (!ret) \ return 0; #undef __array_desc #define __array_desc(type, container, item, len) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ - "offset:%zu;\tsize:%zu;\n", \ + "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \ offsetof(typeof(field), container.item), \ - sizeof(field.container.item)); \ + sizeof(field.container.item), \ + is_signed_type(type)); \ if (!ret) \ return 0; #undef __dynamic_array #define __dynamic_array(type, item) \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ - "offset:%zu;\tsize:0;\n", \ - offsetof(typeof(field), item)); \ + "offset:%zu;\tsize:0;\tsigned:%u;\n", \ + offsetof(typeof(field), item), \ + is_signed_type(type)); \ if (!ret) \ return 0; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 527e17eae57..d99abc427c3 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -103,7 +103,8 @@ extern char *__bad_type_size(void); #define SYSCALL_FIELD(type, name) \ sizeof(type) != sizeof(trace.name) ? \ __bad_type_size() : \ - #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) + #type, #name, offsetof(typeof(trace), name), \ + sizeof(trace.name), is_signed_type(type) int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) { @@ -120,7 +121,8 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) if (!entry) return 0; - ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr)); if (!ret) return 0; @@ -130,8 +132,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) entry->args[i]); if (!ret) return 0; - ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, - sizeof(unsigned long)); + ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;" + "\tsigned:%u;\n", offset, + sizeof(unsigned long), + is_signed_type(unsigned long)); if (!ret) return 0; offset += sizeof(unsigned long); @@ -163,8 +167,10 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) struct syscall_trace_exit trace; ret = trace_seq_printf(s, - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" - "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n" + "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" + "\tsigned:%u;\n", SYSCALL_FIELD(int, nr), SYSCALL_FIELD(long, ret)); if (!ret) @@ -212,7 +218,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) if (ret) return ret; - ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0, + ret = trace_define_field(call, SYSCALL_FIELD(long, ret), FILTER_OTHER); return ret; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 55b41b9e383..be8412d699a 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -894,6 +894,21 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->size = strtoul(token, NULL, 0); free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + goto fail_expect; + + if (read_expected(EVENT_ITEM, (char *)"signed") < 0) + goto fail_expect; + + if (read_expected(EVENT_OP, (char *)":") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + if (strtoul(token, NULL, 0)) + field->flags |= FIELD_IS_SIGNED; + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) goto fail_expect; @@ -2843,6 +2858,15 @@ static void parse_header_field(char *type, return; *size = atoi(token); free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + return; + if (read_expected(EVENT_ITEM, (char *)"signed") < 0) + return; + if (read_expected(EVENT_OP, (char *)":") < 0) + return; + if (read_expect_type(EVENT_ITEM, &token) < 0) + return; + free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) return; if (read_expect_type(EVENT_NEWLINE, &token) < 0) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 162c3e6deb9..00b440df66d 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -26,6 +26,7 @@ enum { enum format_flags { FIELD_IS_ARRAY = 1, FIELD_IS_POINTER = 2, + FIELD_IS_SIGNED = 4, }; struct format_field { From 2774601811bedd04ee7e38624343ea80b4a62d7e Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:51 -0500 Subject: [PATCH 091/470] perf trace: Add subsystem string to struct event Needed to fully qualify event names for event stream processing. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 4 +++- tools/perf/util/trace-event.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index be8412d699a..de3fc8bf8bf 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -2950,7 +2950,7 @@ int parse_ftrace_file(char *buf, unsigned long size) return 0; } -int parse_event_file(char *buf, unsigned long size, char *system__unused __unused) +int parse_event_file(char *buf, unsigned long size, char *sys) { struct event *event; int ret; @@ -2977,6 +2977,8 @@ int parse_event_file(char *buf, unsigned long size, char *system__unused __unuse if (ret < 0) die("failed to read event print fmt"); + event->system = strdup(sys); + #define PRINT_ARGS 0 if (PRINT_ARGS && event->print_fmt.args) print_args(event->print_fmt.args); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 00b440df66d..cb92978be30 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -133,6 +133,7 @@ struct event { int flags; struct format format; struct print_fmt print_fmt; + char *system; }; enum { @@ -167,7 +168,7 @@ void print_funcs(void); void print_printk(void); int parse_ftrace_file(char *buf, unsigned long size); -int parse_event_file(char *buf, unsigned long size, char *system); +int parse_event_file(char *buf, unsigned long size, char *sys); void print_event(int cpu, void *data, int size, unsigned long long nsecs, char *comm); From 064739bc4b3d7f424b2f25547e6611bcf0132415 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 6 Oct 2009 01:09:52 -0500 Subject: [PATCH 092/470] perf trace: Add string/dynamic cases to format_flags Needed for distinguishing string fields in event stream processing. Signed-off-by: Tom Zanussi Acked-by: Frederic Weisbecker Cc: rostedt@goodmis.org Cc: lizf@cn.fujitsu.com Cc: hch@infradead.org Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1254809398-8078-4-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 24 ++++++++++++++++++++++++ tools/perf/util/trace-event.h | 2 ++ 2 files changed, 26 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index de3fc8bf8bf..6f851f98b5b 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -721,6 +721,24 @@ static int event_read_id(void) return -1; } +static int field_is_string(struct format_field *field) +{ + if ((field->flags & FIELD_IS_ARRAY) && + (!strstr(field->type, "char") || !strstr(field->type, "u8") || + !strstr(field->type, "s8"))) + return 1; + + return 0; +} + +static int field_is_dynamic(struct format_field *field) +{ + if (!strcmp(field->type, "__data_loc")) + return 1; + + return 0; +} + static int event_read_fields(struct event *event, struct format_field **fields) { struct format_field *field = NULL; @@ -865,6 +883,12 @@ static int event_read_fields(struct event *event, struct format_field **fields) free(brackets); } + if (field_is_string(field)) { + field->flags |= FIELD_IS_STRING; + if (field_is_dynamic(field)) + field->flags |= FIELD_IS_DYNAMIC; + } + if (test_type_token(type, token, EVENT_OP, (char *)";")) goto fail; free_token(token); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index cb92978be30..5f59a39fb88 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -27,6 +27,8 @@ enum format_flags { FIELD_IS_ARRAY = 1, FIELD_IS_POINTER = 2, FIELD_IS_SIGNED = 4, + FIELD_IS_STRING = 8, + FIELD_IS_DYNAMIC = 16, }; struct format_field { From 42e59d7d19dc4b49feab2a860fd9a8ca3248c833 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 6 Oct 2009 15:14:21 +0200 Subject: [PATCH 093/470] perf tools: Default to 1 KHz auto-sampling freq events Use auto-freq events by default in perf record and perf top. This allows more consistent hardware event sampling, regardless of the intensity of the underlying event. It also keeps us from over-sampling on larger/busier systems. (also make surrounding initializations more consistent) Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 50 ++++++++++++++++++------------------- tools/perf/builtin-top.c | 38 ++++++++++++++-------------- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3eeef339c78..494f8c7d752 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -29,43 +29,43 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static long default_interval = 100000; -static int nr_cpus = 0; +static int nr_cpus = 0; static unsigned int page_size; -static unsigned int mmap_pages = 128; -static int freq = 0; +static unsigned int mmap_pages = 128; +static int freq = 1000; static int output; static const char *output_name = "perf.data"; -static int group = 0; -static unsigned int realtime_prio = 0; -static int raw_samples = 0; -static int system_wide = 0; -static int profile_cpu = -1; -static pid_t target_pid = -1; -static pid_t child_pid = -1; -static int inherit = 1; -static int force = 0; -static int append_file = 0; -static int call_graph = 0; -static int inherit_stat = 0; -static int no_samples = 0; -static int sample_address = 0; -static int multiplex = 0; -static int multiplex_fd = -1; +static int group = 0; +static unsigned int realtime_prio = 0; +static int raw_samples = 0; +static int system_wide = 0; +static int profile_cpu = -1; +static pid_t target_pid = -1; +static pid_t child_pid = -1; +static int inherit = 1; +static int force = 0; +static int append_file = 0; +static int call_graph = 0; +static int inherit_stat = 0; +static int no_samples = 0; +static int sample_address = 0; +static int multiplex = 0; +static int multiplex_fd = -1; -static long samples; +static long samples = 0; static struct timeval last_read; static struct timeval this_read; -static u64 bytes_written; +static u64 bytes_written = 0; static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS]; -static int nr_poll; -static int nr_cpu; +static int nr_poll = 0; +static int nr_cpu = 0; -static int file_new = 1; +static int file_new = 1; -struct perf_header *header; +struct perf_header *header = NULL; struct mmap_data { int counter; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c574c5b3d0e..d978dc99236 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -55,26 +55,26 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static int system_wide = 0; +static int system_wide = 0; static int default_interval = 100000; -static int count_filter = 5; -static int print_entries = 15; +static int count_filter = 5; +static int print_entries = 15; -static int target_pid = -1; -static int inherit = 0; -static int profile_cpu = -1; -static int nr_cpus = 0; -static unsigned int realtime_prio = 0; -static int group = 0; +static int target_pid = -1; +static int inherit = 0; +static int profile_cpu = -1; +static int nr_cpus = 0; +static unsigned int realtime_prio = 0; +static int group = 0; static unsigned int page_size; -static unsigned int mmap_pages = 16; -static int freq = 0; +static unsigned int mmap_pages = 16; +static int freq = 1000; /* 1 KHz */ -static int delay_secs = 2; -static int zero; -static int dump_symtab; +static int delay_secs = 2; +static int zero = 0; +static int dump_symtab = 0; /* * Source @@ -87,11 +87,11 @@ struct source_line { struct source_line *next; }; -static char *sym_filter = NULL; -struct sym_entry *sym_filter_entry = NULL; -static int sym_pcnt_filter = 5; -static int sym_counter = 0; -static int display_weighted = -1; +static char *sym_filter = NULL; +struct sym_entry *sym_filter_entry = NULL; +static int sym_pcnt_filter = 5; +static int sym_counter = 0; +static int display_weighted = -1; /* * Symbols From b209aa1f83964d49a332a7b6b818ebede5cdc6ef Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 6 Oct 2009 21:21:26 +0200 Subject: [PATCH 094/470] perf tools: Start the perf.data mapping at data offset in perf trace Currently, we are mapping perf.data in the beginning of the file and use the data offset as a buffer offset. This may exceed the mapping area if the data offset is upper than page_size * mmap_window and result in a page fault (thing that happen if we merge trace.info in perf.data). Instead, let's start the mapping in the page that matches our data offset. v2: Drop a junk from another patch (trace_report() removal) Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Tom Zanussi LKML-Reference: <1254856886-10348-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 5d4c84d8637..d573d4ea6c2 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -143,6 +143,7 @@ static int __cmd_trace(void) int ret, rc = EXIT_FAILURE; unsigned long offset = 0; unsigned long head = 0; + unsigned long shift; struct stat perf_stat; event_t *event; uint32_t size; @@ -180,6 +181,10 @@ static int __cmd_trace(void) return EXIT_FAILURE; } + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); @@ -192,9 +197,9 @@ more: event = (event_t *)(buf + head); if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); int res; + shift = page_size * (head / page_size); res = munmap(buf, page_size * mmap_window); assert(res == 0); From 03456a158d9067d2f657bec170506009db81756d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 6 Oct 2009 23:36:47 +0200 Subject: [PATCH 095/470] perf tools: Merge trace.info content into perf.data This drops the trace.info file and move its contents into the common perf.data file. This is done by creating a new trace_info section into this file. A user of perf headers needs to call perf_header__set_trace_info() to save the trace meta informations into the perf.data file. A file created by perf after his patch is unsupported by previous version because the size of the headers have increased. That said, it's two new fields that have been added in the end of the headers, and those could be ignored by previous versions if they just handled the dynamic header size and then ignore the unknow part. The offsets guarantee the compatibility. We'll do a -stable fix for that. But current previous versions handle the header size using its static size, not dynamic, then it's not backward compatible with trace records. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <20091006213643.GA5343@nowhere> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 7 +++-- tools/perf/builtin-sched.c | 1 - tools/perf/builtin-trace.c | 1 - tools/perf/util/header.c | 42 ++++++++++++++++++++++++++++++ tools/perf/util/header.h | 4 ++- tools/perf/util/trace-event-info.c | 6 ++--- tools/perf/util/trace-event-read.c | 7 ++--- tools/perf/util/trace-event.h | 4 +-- 8 files changed, 54 insertions(+), 18 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 494f8c7d752..59af03d80d0 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -17,7 +17,6 @@ #include "util/header.h" #include "util/event.h" #include "util/debug.h" -#include "util/trace-event.h" #include #include @@ -566,17 +565,17 @@ static int __cmd_record(int argc, const char **argv) else header = perf_header__new(); - if (raw_samples) { - read_tracing_data(attrs, nr_counters); + perf_header__set_trace_info(); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - read_tracing_data(attrs, nr_counters); + perf_header__set_trace_info(); break; } } } + atexit(atexit_header); if (!system_wide) { diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 4470f253570..18871380b01 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1634,7 +1634,6 @@ static int read_events(void) uint32_t size; char *buf; - trace_report(); register_idle_thread(&threads, &last_match); input = open(input_name, O_RDONLY); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d573d4ea6c2..d9abb4ae5f7 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -149,7 +149,6 @@ static int __cmd_trace(void) uint32_t size; char *buf; - trace_report(); register_idle_thread(&threads, &last_match); input = open(input_name, O_RDONLY); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e306857b2c2..212fade7ee7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -5,6 +5,8 @@ #include "util.h" #include "header.h" +#include "../perf.h" +#include "trace-event.h" /* * Create new perf.data header attribute: @@ -62,6 +64,8 @@ struct perf_header *perf_header__new(void) self->data_offset = 0; self->data_size = 0; + self->trace_info_offset = 0; + self->trace_info_size = 0; return self; } @@ -145,8 +149,16 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; + struct perf_file_section trace_info; }; +static int trace_info; + +void perf_header__set_trace_info(void) +{ + trace_info = 1; +} + static void do_write(int fd, void *buf, size_t size) { while (size) { @@ -198,6 +210,23 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); + if (trace_info) { + static int trace_info_written; + + /* + * Write it only once + */ + if (!trace_info_written) { + self->trace_info_offset = lseek(fd, 0, SEEK_CUR); + read_tracing_data(fd, attrs, nr_counters); + self->trace_info_size = lseek(fd, 0, SEEK_CUR) - + self->trace_info_offset; + trace_info_written = 1; + } else { + lseek(fd, self->trace_info_offset + + self->trace_info_size, SEEK_SET); + } + } self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -217,6 +246,10 @@ void perf_header__write(struct perf_header *self, int fd) .offset = self->event_offset, .size = self->event_size, }, + .trace_info = { + .offset = self->trace_info_offset, + .size = self->trace_info_size, + }, }; lseek(fd, 0, SEEK_SET); @@ -290,6 +323,15 @@ struct perf_header *perf_header__read(int fd) do_read(fd, events, f_header.event_types.size); event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } + + self->trace_info_offset = f_header.trace_info.offset; + self->trace_info_size = f_header.trace_info.size; + + if (self->trace_info_size) { + lseek(fd, self->trace_info_offset, SEEK_SET); + trace_report(fd); + } + self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a2916b652a1..30aee5160dc 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -21,6 +21,8 @@ struct perf_header { u64 data_size; u64 event_offset; u64 event_size; + u64 trace_info_offset; + u64 trace_info_size; }; struct perf_header *perf_header__read(int fd); @@ -40,7 +42,7 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); - +void perf_header__set_trace_info(void); struct perf_header *perf_header__new(void); diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index af4b0573b37..831052d4b4f 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -496,14 +496,12 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) return path.next; } -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) { char buf[BUFSIZ]; struct tracepoint_path *tps; - output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644); - if (output_fd < 0) - die("creating file '%s'", output_file); + output_fd = fd; buf[0] = 23; buf[1] = 8; diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 1b5c847d2c2..44292e06cca 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -458,9 +458,8 @@ struct record *trace_read_data(int cpu) return data; } -void trace_report(void) +void trace_report(int fd) { - const char *input_file = "trace.info"; char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; @@ -468,9 +467,7 @@ void trace_report(void) int show_funcs = 0; int show_printk = 0; - input_fd = open(input_file, O_RDONLY); - if (input_fd < 0) - die("opening '%s'\n", input_file); + input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 5f59a39fb88..da77e073c86 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -158,7 +158,7 @@ struct record *trace_read_data(int cpu); void parse_set_info(int nr_cpus, int long_sz); -void trace_report(void); +void trace_report(int fd); void *malloc_or_die(unsigned int size); @@ -244,6 +244,6 @@ unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); -void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); +void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); #endif /* __PERF_TRACE_EVENTS_H */ From 016e92fbc9ef33689cf654f343a94383d43235e7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 7 Oct 2009 12:47:31 +0200 Subject: [PATCH 096/470] perf tools: Unify perf.data mapping and events handling This librarizes the perf.data file mapping and handling in various perf tools, roughly reducing the amount of code and fixing the places that mmap from beginning of the file whereas we want to mmap from the beginning of the data, leading to page fault because the mmap window is too small since the trace info are written in the file too. TODO: - convert perf timechart too Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arjan van de Ven LKML-Reference: <20091007104729.GD5043@nowhere> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-report.c | 231 +++++++----------------------------- tools/perf/builtin-sched.c | 140 +++++----------------- tools/perf/builtin-trace.c | 129 +++----------------- tools/perf/util/data_map.c | 222 ++++++++++++++++++++++++++++++++++ tools/perf/util/data_map.h | 31 +++++ 6 files changed, 344 insertions(+), 411 deletions(-) create mode 100644 tools/perf/util/data_map.c create mode 100644 tools/perf/util/data_map.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5a429966c99..495eb6d97fa 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -342,6 +342,7 @@ LIB_H += util/values.h LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h +LIB_H += util/data_map.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -378,6 +379,7 @@ LIB_OBJS += util/trace-event-info.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o +LIB_OBJS += util/data_map.o BUILTIN_OBJS += builtin-annotate.o BUILTIN_OBJS += builtin-help.o diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 12f8c868fcd..87c4582303b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -26,6 +26,7 @@ #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/data_map.h" #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" @@ -37,7 +38,6 @@ static char *dso_list_str, *comm_list_str, *sym_list_str, static struct strlist *dso_list, *comm_list, *sym_list; static int force; -static int input; static int full_paths; static int show_nr_samples; @@ -48,15 +48,11 @@ static struct perf_read_values show_threads_values; static char default_pretty_printing_style[] = "normal"; static char *pretty_printing_style = default_pretty_printing_style; -static unsigned long page_size; -static unsigned long mmap_window = 32; - static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -static char __cwd[PATH_MAX]; -static char *cwd = __cwd; +static char *cwd; static int cwdlen; static struct rb_root threads; @@ -815,56 +811,51 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) +static int sample_type_check(u64 type) { - trace_event(event); + sample_type = type; - switch (event->header.type) { - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MMAP: - return process_mmap_event(event, offset, head); - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_FORK: - case PERF_RECORD_EXIT: - return process_task_event(event, offset, head); - - case PERF_RECORD_LOST: - return process_lost_event(event, offset, head); - - case PERF_RECORD_READ: - return process_read_event(event, offset, head); - - /* - * We dont process them right now but they are fine: - */ - - case PERF_RECORD_THROTTLE: - case PERF_RECORD_UNTHROTTLE: - return 0; - - default: - return -1; + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (sort__has_parent) { + fprintf(stderr, "selected --sort parent, but no" + " callchain data. Did you call" + " perf record without -g?\n"); + return -1; + } + if (callchain) { + fprintf(stderr, "selected -g but no callchain data." + " Did you call perf record without" + " -g?\n"); + return -1; + } + } else if (callchain_param.mode != CHAIN_NONE && !callchain) { + callchain = 1; + if (register_callchain_param(&callchain_param) < 0) { + fprintf(stderr, "Can't register callchain" + " params\n"); + return -1; + } } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_mmap_event = process_mmap_event, + .process_comm_event = process_comm_event, + .process_exit_event = process_task_event, + .process_fork_event = process_task_event, + .process_lost_event = process_lost_event, + .process_read_event = process_read_event, + .sample_type_check = sample_type_check, +}; + + static int __cmd_report(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head, shift; - struct stat input_stat; struct thread *idle; - event_t *event; - uint32_t size; - char *buf; + int ret; idle = register_idle_thread(&threads, &last_match); thread__comm_adjust(idle); @@ -872,151 +863,19 @@ static int __cmd_report(void) if (show_threads) perf_read_values_init(&show_threads_values); - input = open(input_name, O_RDONLY); - if (input < 0) { - fprintf(stderr, " failed to open file: %s", input_name); - if (!strcmp(input_name, "perf.data")) - fprintf(stderr, " (try 'perf record' first)"); - fprintf(stderr, "\n"); - exit(-1); - } + register_perf_file_handler(&file_handler); - ret = fstat(input, &input_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - fprintf(stderr, "file: %s not owned by current user or root\n", input_name); - exit(-1); - } - - if (!input_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - header = perf_header__read(input); - head = header->data_offset; - - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { - if (sort__has_parent) { - fprintf(stderr, "selected --sort parent, but no" - " callchain data. Did you call" - " perf record without -g?\n"); - exit(-1); - } - if (callchain) { - fprintf(stderr, "selected -g but no callchain data." - " Did you call perf record without" - " -g?\n"); - exit(-1); - } - } else if (callchain_param.mode != CHAIN_NONE && !callchain) { - callchain = 1; - if (register_callchain_param(&callchain_param) < 0) { - fprintf(stderr, "Can't register callchain" - " params\n"); - exit(-1); - } - } - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - if (!full_paths) { - if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; - } - cwdlen = strlen(cwd); - } else { - cwd = NULL; - cwdlen = 0; - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - int munmap_ret; - - shift = page_size * (head / page_size); - - munmap_ret = munmap(buf, page_size * mmap_window); - assert(munmap_ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dump_printf("\n%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || process_event(event, offset, head) < 0) { - - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - - total_unknown++; - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head >= header->data_offset + header->data_size) - goto done; - - if (offset + head < (unsigned long)input_stat.st_size) - goto more; - -done: - rc = EXIT_SUCCESS; - close(input); + ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths, + &cwdlen, &cwd); + if (ret) + return ret; dump_printf(" IP events: %10ld\n", total); dump_printf(" mmap events: %10ld\n", total_mmap); dump_printf(" comm events: %10ld\n", total_comm); dump_printf(" fork events: %10ld\n", total_fork); dump_printf(" lost events: %10ld\n", total_lost); - dump_printf(" unknown events: %10ld\n", total_unknown); + dump_printf(" unknown events: %10ld\n", file_handler.total_unknown); if (dump_trace) return 0; @@ -1034,7 +893,7 @@ done: if (show_threads) perf_read_values_destroy(&show_threads_values); - return rc; + return ret; } static int @@ -1177,8 +1036,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); - argc = parse_options(argc, argv, options, report_usage, 0); setup_sorting(); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 18871380b01..e1df7055ab8 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -11,6 +11,7 @@ #include "util/trace-event.h" #include "util/debug.h" +#include "util/data_map.h" #include #include @@ -20,9 +21,6 @@ #include static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total_comm = 0; @@ -35,6 +33,9 @@ static u64 sample_type; static char default_sort_order[] = "avg, max, switch, runtime"; static char *sort_order = default_sort_order; +static char *cwd; +static int cwdlen; + #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 @@ -1594,129 +1595,43 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } static int -process_event(event_t *event, unsigned long offset, unsigned long head) +process_lost_event(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) { - trace_event(event); + nr_lost_chunks++; + nr_lost_events += event->lost.lost; - nr_events++; - switch (event->header.type) { - case PERF_RECORD_MMAP: - return 0; - case PERF_RECORD_LOST: - nr_lost_chunks++; - nr_lost_events += event->lost.lost; - return 0; + return 0; +} - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); +static int sample_type_check(u64 type) +{ + sample_type = type; - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; - - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .process_lost_event = process_lost_event, + .sample_type_check = sample_type_check, +}; + static int read_events(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - register_idle_thread(&threads, &last_match); + register_perf_file_handler(&file_handler); - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int res; - - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static void print_bad_events(void) @@ -1934,7 +1849,6 @@ static int __cmd_record(int argc, const char **argv) int cmd_sched(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d9abb4ae5f7..fb3f3c22021 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -12,11 +12,9 @@ #include "util/debug.h" #include "util/trace-event.h" +#include "util/data_map.h" static char const *input_name = "perf.data"; -static int input; -static unsigned long page_size; -static unsigned long mmap_window = 32; static unsigned long total = 0; static unsigned long total_comm = 0; @@ -27,6 +25,9 @@ static struct thread *last_match; static struct perf_header *header; static u64 sample_type; +static char *cwd; +static int cwdlen; + static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) @@ -112,125 +113,32 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_event(event_t *event, unsigned long offset, unsigned long head) +static int sample_type_check(u64 type) { - trace_event(event); + sample_type = type; - switch (event->header.type) { - case PERF_RECORD_MMAP ... PERF_RECORD_LOST: - return 0; - - case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); - - case PERF_RECORD_EXIT ... PERF_RECORD_READ: - return 0; - - case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); - - case PERF_RECORD_MAX: - default: + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); return -1; } return 0; } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .sample_type_check = sample_type_check, +}; + static int __cmd_trace(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - unsigned long shift; - struct stat perf_stat; - event_t *event; - uint32_t size; - char *buf; - register_idle_thread(&threads, &last_match); + register_perf_file_handler(&file_handler); - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &perf_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!perf_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - header = perf_header__read(input); - head = header->data_offset; - sample_type = perf_header__sample_type(header); - - if (!(sample_type & PERF_SAMPLE_RAW)) - die("No trace sample to read. Did you call perf record " - "without -R?"); - - if (load_kernel() < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; - } - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - if (head + event->header.size >= page_size * mmap_window) { - int res; - - shift = page_size * (head / page_size); - res = munmap(buf, page_size * mmap_window); - assert(res == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - if (!size || process_event(event, offset, head) < 0) { - - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)perf_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); - - return rc; + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } static const char * const annotate_usage[] = { @@ -249,7 +157,6 @@ static const struct option options[] = { int cmd_trace(int argc, const char **argv, const char *prefix __used) { symbol__init(); - page_size = getpagesize(); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c new file mode 100644 index 00000000000..242b0555ab9 --- /dev/null +++ b/tools/perf/util/data_map.c @@ -0,0 +1,222 @@ +#include "data_map.h" +#include "symbol.h" +#include "util.h" +#include "debug.h" + + +static struct perf_file_handler *curr_handler; +static unsigned long mmap_window = 32; +static char __cwd[PATH_MAX]; + +static int +process_event_stub(event_t *event __used, + unsigned long offset __used, + unsigned long head __used) +{ + return 0; +} + +void register_perf_file_handler(struct perf_file_handler *handler) +{ + if (!handler->process_sample_event) + handler->process_sample_event = process_event_stub; + if (!handler->process_mmap_event) + handler->process_mmap_event = process_event_stub; + if (!handler->process_comm_event) + handler->process_comm_event = process_event_stub; + if (!handler->process_fork_event) + handler->process_fork_event = process_event_stub; + if (!handler->process_exit_event) + handler->process_exit_event = process_event_stub; + if (!handler->process_lost_event) + handler->process_lost_event = process_event_stub; + if (!handler->process_read_event) + handler->process_read_event = process_event_stub; + if (!handler->process_throttle_event) + handler->process_throttle_event = process_event_stub; + if (!handler->process_unthrottle_event) + handler->process_unthrottle_event = process_event_stub; + + curr_handler = handler; +} + +static int +process_event(event_t *event, unsigned long offset, unsigned long head) +{ + trace_event(event); + + switch (event->header.type) { + case PERF_RECORD_SAMPLE: + return curr_handler->process_sample_event(event, offset, head); + case PERF_RECORD_MMAP: + return curr_handler->process_mmap_event(event, offset, head); + case PERF_RECORD_COMM: + return curr_handler->process_comm_event(event, offset, head); + case PERF_RECORD_FORK: + return curr_handler->process_fork_event(event, offset, head); + case PERF_RECORD_EXIT: + return curr_handler->process_exit_event(event, offset, head); + case PERF_RECORD_LOST: + return curr_handler->process_lost_event(event, offset, head); + case PERF_RECORD_READ: + return curr_handler->process_read_event(event, offset, head); + case PERF_RECORD_THROTTLE: + return curr_handler->process_throttle_event(event, offset, head); + case PERF_RECORD_UNTHROTTLE: + return curr_handler->process_unthrottle_event(event, offset, head); + default: + curr_handler->total_unknown++; + return -1; + } +} + +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd) +{ + int ret, rc = EXIT_FAILURE; + struct perf_header *header; + unsigned long head, shift; + unsigned long offset = 0; + struct stat input_stat; + size_t page_size; + u64 sample_type; + event_t *event; + uint32_t size; + int input; + char *buf; + + if (!curr_handler) + die("Forgot to register perf file handler"); + + page_size = getpagesize(); + + input = open(input_name, O_RDONLY); + if (input < 0) { + fprintf(stderr, " failed to open file: %s", input_name); + if (!strcmp(input_name, "perf.data")) + fprintf(stderr, " (try 'perf record' first)"); + fprintf(stderr, "\n"); + exit(-1); + } + + ret = fstat(input, &input_stat); + if (ret < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { + fprintf(stderr, "file: %s not owned by current user or root\n", + input_name); + exit(-1); + } + + if (!input_stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + *pheader = perf_header__read(input); + header = *pheader; + head = header->data_offset; + + sample_type = perf_header__sample_type(header); + + if (curr_handler->sample_type_check) + if (curr_handler->sample_type_check(sample_type) < 0) + exit(-1); + + if (load_kernel() < 0) { + perror("failed to load kernel symbols"); + return EXIT_FAILURE; + } + + if (!full_paths) { + if (getcwd(__cwd, sizeof(__cwd)) == NULL) { + perror("failed to get the current directory"); + return EXIT_FAILURE; + } + *cwd = __cwd; + *cwdlen = strlen(*cwd); + } else { + *cwd = NULL; + *cwdlen = 0; + } + + shift = page_size * (head / page_size); + offset += shift; + head -= shift; + +remap: + buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); + if (buf == MAP_FAILED) { + perror("failed to mmap file"); + exit(-1); + } + +more: + event = (event_t *)(buf + head); + + size = event->header.size; + if (!size) + size = 8; + + if (head + event->header.size >= page_size * mmap_window) { + int munmap_ret; + + shift = page_size * (head / page_size); + + munmap_ret = munmap(buf, page_size * mmap_window); + assert(munmap_ret == 0); + + offset += shift; + head -= shift; + goto remap; + } + + size = event->header.size; + + dump_printf("\n%p [%p]: event: %d\n", + (void *)(offset + head), + (void *)(long)event->header.size, + event->header.type); + + if (!size || process_event(event, offset, head) < 0) { + + dump_printf("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); + + /* + * assume we lost track of the stream, check alignment, and + * increment a single u64 in the hope to catch on again 'soon'. + */ + + if (unlikely(head & 7)) + head &= ~7ULL; + + size = 8; + } + + head += size; + + if (offset + head >= header->data_offset + header->data_size) + goto done; + + if (offset + head < (unsigned long)input_stat.st_size) + goto more; + +done: + rc = EXIT_SUCCESS; + close(input); + + return rc; +} + + diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h new file mode 100644 index 00000000000..716d1053b07 --- /dev/null +++ b/tools/perf/util/data_map.h @@ -0,0 +1,31 @@ +#ifndef __PERF_DATAMAP_H +#define __PERF_DATAMAP_H + +#include "event.h" +#include "header.h" + +typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long); + +struct perf_file_handler { + event_type_handler_t process_sample_event; + event_type_handler_t process_mmap_event; + event_type_handler_t process_comm_event; + event_type_handler_t process_fork_event; + event_type_handler_t process_exit_event; + event_type_handler_t process_lost_event; + event_type_handler_t process_read_event; + event_type_handler_t process_throttle_event; + event_type_handler_t process_unthrottle_event; + int (*sample_type_check)(u64 sample_type); + unsigned long total_unknown; +}; + +void register_perf_file_handler(struct perf_file_handler *handler); +int mmap_dispatch_perf_file(struct perf_header **pheader, + const char *input_name, + int force, + int full_paths, + int *cwdlen, + char **cwd); + +#endif From 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 16:37:12 +0200 Subject: [PATCH 097/470] perf tools: Improve thread comm resolution in perf sched When we get sched traces that involve a task that was already created before opening the event, we won't have the comm event for it. So if we can't find the comm event for a given thread, we look at the traces that may contain these informations. Before: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | :5124:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | :6244:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | :6245:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | After: ata/1:371 | 0.000 ms | 1 | avg: 3988.693 ms | max: 3988.693 ms | kondemand/1:421 | 0.096 ms | 3 | avg: 345.346 ms | max: 1035.989 ms | kondemand/0:420 | 0.025 ms | 3 | avg: 421.332 ms | max: 964.014 ms | firefox:5124 | 0.103 ms | 5 | avg: 74.082 ms | max: 277.194 ms | npviewer.bin:6244 | 0.691 ms | 9 | avg: 125.655 ms | max: 271.306 ms | firefox:5080 | 0.924 ms | 5 | avg: 53.833 ms | max: 257.828 ms | npviewer.bin:6225 | 21.871 ms | 53 | avg: 22.462 ms | max: 220.835 ms | npviewer.bin:6245 | 9.631 ms | 21 | avg: 41.864 ms | max: 213.349 ms | Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255012632-7882-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 44 +++++++++++++++++++++++++++++++++----- tools/perf/util/thread.c | 32 +++++++++++++++++++++------ tools/perf/util/thread.h | 3 +++ 3 files changed, 67 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index e1df7055ab8..25b91e78433 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1034,6 +1034,36 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } +static struct thread * +threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) +{ + struct thread *th; + + th = threads__findnew_nocomm(pid, &threads, &last_match); + if (th->comm) + return th; + + if (pid == switch_event->prev_pid) + thread__set_comm(th, switch_event->prev_comm); + else + thread__set_comm(th, switch_event->next_comm); + return th; +} + +static struct thread * +threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) +{ + struct thread *th; + + th = threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); + if (th->comm) + return th; + + thread__set_comm(th, wakeup_event->comm); + + return th; +} + static void latency_switch_event(struct trace_switch_event *switch_event, struct event *event __used, @@ -1059,8 +1089,10 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew_from_ctx(switch_event->prev_pid, + switch_event); + sched_in = threads__findnew_from_ctx(switch_event->next_pid, + switch_event); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1126,7 +1158,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); + wakee = threads__findnew_from_wakeup(wakeup_event); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1386,8 +1418,10 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew_from_ctx(switch_event->prev_pid, + switch_event); + sched_in = threads__findnew_from_ctx(switch_event->next_pid, + switch_event); curr_thread[this_cpu] = sched_in; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3b56aebb1f4..8bd5ca2d2f2 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,15 +6,17 @@ #include "util.h" #include "debug.h" -static struct thread *thread__new(pid_t pid) +static struct thread *thread__new(pid_t pid, int set_comm) { struct thread *self = calloc(1, sizeof(*self)); if (self != NULL) { self->pid = pid; - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); + if (set_comm) { + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); + } self->maps = RB_ROOT; INIT_LIST_HEAD(&self->removed_maps); } @@ -50,8 +52,10 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +static struct thread * +__threads__findnew(pid_t pid, struct rb_root *threads, + struct thread **last_match, + int set_comm) { struct rb_node **p = &threads->rb_node; struct rb_node *parent = NULL; @@ -80,7 +84,8 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) p = &(*p)->rb_right; } - th = thread__new(pid); + th = thread__new(pid, set_comm); + if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, threads); @@ -90,6 +95,19 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) return th; } +struct thread * +threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +{ + return __threads__findnew(pid, threads, last_match, 1); +} + +struct thread * +threads__findnew_nocomm(pid_t pid, struct rb_root *threads, + struct thread **last_match) +{ + return __threads__findnew(pid, threads, last_match, 0); +} + struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match) { diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 845d9b62f96..75bc843950c 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -18,6 +18,9 @@ int thread__set_comm(struct thread *self, const char *comm); struct thread * threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); struct thread * +threads__findnew_nocomm(pid_t pid, struct rb_root *threads, + struct thread **last_match); +struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); From da21d1b547cbaa2c026cf645753651c25d340923 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 7 Oct 2009 10:49:00 -0300 Subject: [PATCH 098/470] perf tools: Up the verbose level for some really verbose stuff Like printing every symbol created. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1254923340-4870-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-report.c | 4 ++-- tools/perf/util/symbol.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 35ed97bd0c6..8c84320ecb0 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -658,10 +658,10 @@ more: if (dump_trace) return 0; - if (verbose >= 3) + if (verbose > 3) threads__fprintf(stdout, &threads); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 87c4582303b..f57a23b19f3 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -880,10 +880,10 @@ static int __cmd_report(void) if (dump_trace) return 0; - if (verbose >= 3) + if (verbose > 3) threads__fprintf(stdout, &threads); - if (verbose >= 2) + if (verbose > 2) dsos__fprintf(stdout); collapse__resort(); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 582ce72ca4d..a6887f94dfe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -74,7 +74,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, if (!self) return NULL; - if (v >= 2) + if (v > 2) printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", start, (unsigned long)len, name, self->hist); @@ -685,7 +685,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } if (self->adjust_symbols) { - if (v >= 2) + if (v > 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); From 2e538c4a1847291cf01218d4fe7bb4dc60fef7cf Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 7 Oct 2009 13:48:56 -0300 Subject: [PATCH 099/470] perf tools: Improve kernel/modules symbol lookup This removes the ovelapping of vmlinux addresses with modules, using the ELF section name when using --vmlinux and creating a unique DSO name when using /proc/kallsyms ([kernel].N). This is done by creating multiple 'struct map' instances for address ranges backed by DSOs that have just the symbols for that range and a name that is derived from the ELF section name.o Now it is possible to ask for just the symbols in some particular kernel section: $ perf report -m --vmlinux ../build/tip-recvmmsg/vmlinux \ --dsos [kernel].vsyscall_fn | head -15 52.73% Xorg [.] vread_hpet 18.61% firefox [.] vread_hpet 14.50% npviewer.bin [.] vread_hpet 6.83% compiz [.] vread_hpet 5.73% glxgears [.] vread_hpet 0.63% java [.] vread_hpet 0.30% gnome-terminal [.] vread_hpet 0.23% perf [.] vread_hpet 0.18% xchat [.] vread_hpet $ Now we don't have to first lookup the list of modules and then, if it fails, vmlinux symbols, its just a simple lookup for the map then the symbols, just like for threads. Reports generated using /proc/kallsyms and --vmlinux should provide the same results, modulo the DSO name for sections other than ".text". But they don't right now because things like: ffffffff81011c20-ffffffff81012068 system_call ffffffff81011c30-ffffffff81011c9b system_call_after_swapgs ffffffff81011c9c-ffffffff81011cb6 system_call_fastpath ffffffff81011cb7-ffffffff81011cbb ret_from_sys_call I.e. overlapping symbols, again some ASM special case that we have to fixup. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1254934136-8503-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 292 +++++++++++++++++++++++++++------------ 1 file changed, 203 insertions(+), 89 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a6887f94dfe..faa84f5d4f5 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -26,27 +26,35 @@ enum dso_origin { static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); +static struct map *map__new2(u64 start, struct dso *dso); +static void kernel_maps__insert(struct map *map); static struct rb_root kernel_maps; -static void dso__set_symbols_end(struct dso *self) +static void dso__fixup_sym_end(struct dso *self) { struct rb_node *nd, *prevnd = rb_first(&self->syms); + struct symbol *curr, *prev; if (prevnd == NULL) return; + curr = rb_entry(prevnd, struct symbol, rb_node); + for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { - struct symbol *prev = rb_entry(prevnd, struct symbol, rb_node), - *curr = rb_entry(nd, struct symbol, rb_node); + prev = curr; + curr = rb_entry(nd, struct symbol, rb_node); if (prev->end == prev->start) prev->end = curr->start - 1; - prevnd = nd; } + + /* Last entry */ + if (curr->end == curr->start) + curr->end = roundup(curr->start, 4096); } -static void kernel_maps__fixup_sym_end(void) +static void kernel_maps__fixup_end(void) { struct map *prev, *curr; struct rb_node *nd, *prevnd = rb_first(&kernel_maps); @@ -55,13 +63,17 @@ static void kernel_maps__fixup_sym_end(void) return; curr = rb_entry(prevnd, struct map, rb_node); - dso__set_symbols_end(curr->dso); for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { prev = curr; curr = rb_entry(nd, struct map, rb_node); prev->end = curr->start - 1; - dso__set_symbols_end(curr->dso); + } + + nd = rb_last(&curr->dso->syms); + if (nd) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + curr->end = sym->end; } } @@ -200,13 +212,16 @@ size_t dso__fprintf(struct dso *self, FILE *fp) return ret; } -static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) +/* + * Loads the function entries in /proc/kallsyms into kernel_map->dso, + * so that we can in the next step set the symbol ->end address and then + * call kernel_maps__split_kallsyms. + */ +static int kernel_maps__load_all_kallsyms(int v) { - struct map *map = kernel_map; char *line = NULL; size_t n; FILE *file = fopen("/proc/kallsyms", "r"); - int count = 0; if (file == NULL) goto out_failure; @@ -216,7 +231,7 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) struct symbol *sym; int line_len, len; char symbol_type; - char *module, *symbol_name; + char *symbol_name; line_len = getline(&line, &n, file); if (line_len < 0) @@ -241,20 +256,55 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) continue; symbol_name = line + len + 2; - module = strchr(symbol_name, '\t'); - if (module) { - char *module_name_end; + /* + * Will fix up the end later, when we have all symbols sorted. + */ + sym = symbol__new(start, 0, symbol_name, + kernel_map->dso->sym_priv_size, v); + if (sym == NULL) + goto out_delete_line; + + dso__insert_symbol(kernel_map->dso, sym); + } + + free(line); + fclose(file); + + return 0; + +out_delete_line: + free(line); +out_failure: + return -1; +} + +/* + * Split the symbols into maps, making sure there are no overlaps, i.e. the + * kernel range is broken in several maps, named [kernel].N, as we don't have + * the original ELF section names vmlinux have. + */ +static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) +{ + struct map *map = kernel_map; + struct symbol *pos; + int count = 0; + struct rb_node *next = rb_first(&kernel_map->dso->syms); + int kernel_range = 0; + + while (next) { + char *module; + + pos = rb_entry(next, struct symbol, rb_node); + next = rb_next(&pos->rb_node); + + module = strchr(pos->name, '\t'); + if (module) { if (!use_modules) - continue; - *module = '\0'; - module = strchr(module + 1, '['); - if (!module) - continue; - module_name_end = strchr(module + 1, ']'); - if (!module_name_end) - continue; - *(module_name_end + 1) = '\0'; + goto delete_symbol; + + *module++ = '\0'; + if (strcmp(map->dso->name, module)) { map = kernel_maps__find_by_dso_name(module); if (!map) { @@ -263,50 +313,77 @@ static int maps__load_kallsyms(symbol_filter_t filter, int use_modules, int v) return -1; } } - start = map->map_ip(map, start); - } else - map = kernel_map; - /* - * Well fix up the end later, when we have all sorted. - */ - sym = symbol__new(start, 0, symbol_name, - map->dso->sym_priv_size, v); + /* + * So that we look just like we get from .ko files, + * i.e. not prelinked, relative to map->start. + */ + pos->start = map->map_ip(map, pos->start); + pos->end = map->map_ip(map, pos->end); + } else if (map != kernel_map) { + char dso_name[PATH_MAX]; + struct dso *dso; - if (sym == NULL) - goto out_delete_line; + snprintf(dso_name, sizeof(dso_name), "[kernel].%d", + kernel_range++); - if (filter && filter(map, sym)) - symbol__delete(sym, map->dso->sym_priv_size); - else { - dso__insert_symbol(map->dso, sym); + dso = dso__new(dso_name, + kernel_map->dso->sym_priv_size); + if (dso == NULL) + return -1; + + map = map__new2(pos->start, dso); + if (map == NULL) { + dso__delete(dso); + return -1; + } + + map->map_ip = vdso__map_ip; + kernel_maps__insert(map); + ++kernel_range; + } + + if (filter && filter(map, pos)) { +delete_symbol: + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + symbol__delete(pos, kernel_map->dso->sym_priv_size); + } else { + if (map != kernel_map) { + rb_erase(&pos->rb_node, &kernel_map->dso->syms); + dso__insert_symbol(map->dso, pos); + } count++; } } - free(line); - fclose(file); - return count; - -out_delete_line: - free(line); -out_failure: - return -1; } -static size_t kernel_maps__fprintf(FILE *fp) + +static int kernel_maps__load_kallsyms(symbol_filter_t filter, + int use_modules, int v) +{ + if (kernel_maps__load_all_kallsyms(v)) + return -1; + + dso__fixup_sym_end(kernel_map->dso); + + return kernel_maps__split_kallsyms(filter, use_modules); +} + +static size_t kernel_maps__fprintf(FILE *fp, int v) { size_t printed = fprintf(stderr, "Kernel maps:\n"); struct rb_node *nd; - printed += map__fprintf(kernel_map, fp); - printed += dso__fprintf(kernel_map->dso, fp); - for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); + printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - printed += dso__fprintf(pos->dso, fp); + if (v > 1) { + printed += dso__fprintf(pos->dso, fp); + printed += fprintf(fp, "--\n"); + } } return printed + fprintf(stderr, "END kernel maps\n"); @@ -594,6 +671,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kernel, int kmodule, int v) { + struct map *curr_map = map; + struct dso *curr_dso = self; + size_t dso_name_len = strlen(self->short_name); Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -660,10 +740,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name; - char *demangled; + char *demangled = NULL; int is_label = elf_sym__is_label(&sym); const char *section_name; - u64 sh_offset = 0; if (!is_label && !elf_sym__is_function(&sym)) continue; @@ -677,14 +756,51 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (is_label && !elf_sec__is_text(&shdr, secstrs)) continue; + elf_name = elf_sym__name(&sym, symstrs); section_name = elf_sec__name(&shdr, secstrs); - if ((kernel || kmodule)) { - if (strstr(section_name, ".init")) - sh_offset = shdr.sh_offset; + if (kernel || kmodule) { + char dso_name[PATH_MAX]; + + if (strcmp(section_name, + curr_dso->short_name + dso_name_len) == 0) + goto new_symbol; + + if (strcmp(section_name, ".text") == 0) { + curr_map = map; + curr_dso = self; + goto new_symbol; + } + + snprintf(dso_name, sizeof(dso_name), + "%s%s", self->short_name, section_name); + + curr_map = kernel_maps__find_by_dso_name(dso_name); + if (curr_map == NULL) { + u64 start = sym.st_value; + + if (kmodule) + start += map->start + shdr.sh_offset; + + curr_dso = dso__new(dso_name, self->sym_priv_size); + if (curr_dso == NULL) + goto out_elf_end; + curr_map = map__new2(start, curr_dso); + if (curr_map == NULL) { + dso__delete(curr_dso); + goto out_elf_end; + } + curr_map->map_ip = vdso__map_ip; + curr_dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(curr_map); + dsos__add(curr_dso); + } else + curr_dso = curr_map->dso; + + goto new_symbol; } - if (self->adjust_symbols) { + if (curr_dso->adjust_symbols) { if (v > 2) printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); @@ -696,25 +812,29 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, * DWARF DW_compile_unit has this, but we don't always have access * to it... */ - elf_name = elf_sym__name(&sym, symstrs); demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); if (demangled != NULL) elf_name = demangled; - - f = symbol__new(sym.st_value + sh_offset, sym.st_size, elf_name, - self->sym_priv_size, v); +new_symbol: + f = symbol__new(sym.st_value, sym.st_size, elf_name, + curr_dso->sym_priv_size, v); free(demangled); if (!f) goto out_elf_end; - if (filter && filter(map, f)) - symbol__delete(f, self->sym_priv_size); + if (filter && filter(curr_map, f)) + symbol__delete(f, curr_dso->sym_priv_size); else { - dso__insert_symbol(self, f); + dso__insert_symbol(curr_dso, f); nr++; } } + /* + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) + dso__fixup_sym_end(self); err = nr; out_elf_end: elf_end(elf); @@ -883,27 +1003,17 @@ static void kernel_maps__insert(struct map *map) struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) { - /* - * We can't have kernel_map in kernel_maps because it spans an address - * space that includes the modules. The right way to fix this is to - * create several maps, so that we don't have overlapping ranges with - * modules. For now lets look first on the kernel dso. - */ struct map *map = maps__find(&kernel_maps, ip); - struct symbol *sym; - - if (map) { - ip = map->map_ip(map, ip); - sym = map->dso->find_symbol(map->dso, ip); - } else { - map = kernel_map; - sym = map->dso->find_symbol(map->dso, ip); - } if (mapp) *mapp = map; - return sym; + if (map) { + ip = map->map_ip(map, ip); + return map->dso->find_symbol(map->dso, ip); + } + + return NULL; } struct map *kernel_maps__find_by_dso_name(const char *name) @@ -994,6 +1104,14 @@ static int dsos__load_modules_sym_dir(char *dirname, last = rb_last(&map->dso->syms); if (last) { struct symbol *sym; + /* + * We do this here as well, even having the + * symbol size found in the symtab because + * misannotated ASM symbols may have the size + * set to zero. + */ + dso__fixup_sym_end(map->dso); + sym = rb_entry(last, struct symbol, rb_node); map->end = map->start + sym->end; } @@ -1163,17 +1281,11 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, } if (err <= 0) - err = maps__load_kallsyms(filter, use_modules, v); + err = kernel_maps__load_kallsyms(filter, use_modules, v); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); struct symbol *sym = rb_entry(node, struct symbol, rb_node); - /* - * Now that we have all sorted out, just set the ->end of all - * symbols that still don't have it. - */ - dso__set_symbols_end(dso); - kernel_maps__fixup_sym_end(); kernel_map->start = sym->start; node = rb_last(&dso->syms); @@ -1181,14 +1293,16 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->end = sym->end; dso->origin = DSO__ORIG_KERNEL; + kernel_maps__insert(kernel_map); /* - * XXX See kernel_maps__find_symbol comment - * kernel_maps__insert(kernel_map) + * Now that we have all sorted out, just set the ->end of all + * maps: */ + kernel_maps__fixup_end(); dsos__add(dso); if (v > 0) - kernel_maps__fprintf(stderr); + kernel_maps__fprintf(stderr, v); } return err; From 97ea1a7fa62af0d8d49a0fc12796b0073537c9d8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 21:04:17 +0200 Subject: [PATCH 100/470] perf tools: Fix thread comm resolution in perf sched This reverts commit 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 ("perf tools: Improve thread comm resolution in perf sched") and fixes the real bug. The bug was elsewhere: We are failing to resolve thread names in perf sched because the table of threads we are building, on top of comm events, has a per process granularity. But perf sched, unlike the other perf tools, needs a per thread granularity as we are profiling every tasks individually. So fix it by building our threads table using the tid instead of the pid as the thread identifier. v2: Revert the previous fix - it is not really needed Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255028657-11158-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 46 +++++--------------------------------- tools/perf/util/thread.c | 32 ++++++-------------------- tools/perf/util/thread.h | 3 --- 3 files changed, 13 insertions(+), 68 deletions(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 25b91e78433..6b00529ce34 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -638,7 +638,7 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) { struct thread *thread; - thread = threads__findnew(event->comm.pid, &threads, &last_match); + thread = threads__findnew(event->comm.tid, &threads, &last_match); dump_printf("%p [%p]: perf_event_comm: %s:%d\n", (void *)(offset + head), @@ -1034,36 +1034,6 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } -static struct thread * -threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) -{ - struct thread *th; - - th = threads__findnew_nocomm(pid, &threads, &last_match); - if (th->comm) - return th; - - if (pid == switch_event->prev_pid) - thread__set_comm(th, switch_event->prev_comm); - else - thread__set_comm(th, switch_event->next_comm); - return th; -} - -static struct thread * -threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) -{ - struct thread *th; - - th = threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); - if (th->comm) - return th; - - thread__set_comm(th, wakeup_event->comm); - - return th; -} - static void latency_switch_event(struct trace_switch_event *switch_event, struct event *event __used, @@ -1089,10 +1059,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew_from_ctx(switch_event->prev_pid, - switch_event); - sched_in = threads__findnew_from_ctx(switch_event->next_pid, - switch_event); + sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); + sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1158,7 +1126,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew_from_wakeup(wakeup_event); + wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1418,10 +1386,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew_from_ctx(switch_event->prev_pid, - switch_event); - sched_in = threads__findnew_from_ctx(switch_event->next_pid, - switch_event); + sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); + sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); curr_thread[this_cpu] = sched_in; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8bd5ca2d2f2..3b56aebb1f4 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,17 +6,15 @@ #include "util.h" #include "debug.h" -static struct thread *thread__new(pid_t pid, int set_comm) +static struct thread *thread__new(pid_t pid) { struct thread *self = calloc(1, sizeof(*self)); if (self != NULL) { self->pid = pid; - if (set_comm) { - self->comm = malloc(32); - if (self->comm) - snprintf(self->comm, 32, ":%d", self->pid); - } + self->comm = malloc(32); + if (self->comm) + snprintf(self->comm, 32, ":%d", self->pid); self->maps = RB_ROOT; INIT_LIST_HEAD(&self->removed_maps); } @@ -52,10 +50,8 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -static struct thread * -__threads__findnew(pid_t pid, struct rb_root *threads, - struct thread **last_match, - int set_comm) +struct thread * +threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) { struct rb_node **p = &threads->rb_node; struct rb_node *parent = NULL; @@ -84,8 +80,7 @@ __threads__findnew(pid_t pid, struct rb_root *threads, p = &(*p)->rb_right; } - th = thread__new(pid, set_comm); - + th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, threads); @@ -95,19 +90,6 @@ __threads__findnew(pid_t pid, struct rb_root *threads, return th; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) -{ - return __threads__findnew(pid, threads, last_match, 1); -} - -struct thread * -threads__findnew_nocomm(pid_t pid, struct rb_root *threads, - struct thread **last_match) -{ - return __threads__findnew(pid, threads, last_match, 0); -} - struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match) { diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 75bc843950c..845d9b62f96 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -18,9 +18,6 @@ int thread__set_comm(struct thread *self, const char *comm); struct thread * threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); struct thread * -threads__findnew_nocomm(pid_t pid, struct rb_root *threads, - struct thread **last_match); -struct thread * register_idle_thread(struct rb_root *threads, struct thread **last_match); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); From 26dd2cb074d9dc41c9e3cddd7bf175fd0a41febc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 8 Oct 2009 22:07:29 +0200 Subject: [PATCH 101/470] perf tools: Provide backward compatibility with previous perf.data version We have merged the trace.info file into perf.data by adding one section in the perf headers. This makes it incompatible with previous version: the new perf tools can't read the older perf.data. To support the previous format, we check the headers size. If they have the same size than in the previous format, then ignore the trace info section that doesn't exist. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1255032449-12022-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 212fade7ee7..9aae360c0f2 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -287,10 +287,16 @@ struct perf_header *perf_header__read(int fd) do_read(fd, &f_header, sizeof(f_header)); if (f_header.magic != PERF_MAGIC || - f_header.size != sizeof(f_header) || f_header.attr_size != sizeof(f_attr)) die("incompatible file format"); + if (f_header.size != sizeof(f_header)) { + /* Support the previous format */ + if (f_header.size == offsetof(typeof(f_header), trace_info)) + f_header.trace_info.size = 0; + else + die("incompatible file format"); + } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); From 04a705df47d1ea27ca2b066f24b1951c51792d0d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 6 Oct 2009 16:42:08 +0200 Subject: [PATCH 102/470] perf_events: Check for filters on fixed counter events Intel fixed counters do not support all the filters possible with a generic counter. Thus, if a fixed counter event is passed but with certain filters set, then the fixed_mode_idx() function must fail and the event must be measured in a generic counter instead. Reject filters are: inv, edge, cnt-mask. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1254840129-6198-2-git-send-email-eranian@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/perf_event.h | 13 ++++++++++++- arch/x86/kernel/cpu/perf_event.c | 6 ++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ad7ce3fd506..8d9f8548a87 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -28,9 +28,20 @@ */ #define ARCH_PERFMON_EVENT_MASK 0xffff +/* + * filter mask to validate fixed counter events. + * the following filters disqualify for fixed counters: + * - inv + * - edge + * - cnt-mask + * The other filters are supported by fixed counters. + * The any-thread option is supported starting with v3. + */ +#define ARCH_PERFMON_EVENT_FILTER_MASK 0xff840000 + #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 +#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b5801c31184..1d16bd69551 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1349,6 +1349,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) if (!x86_pmu.num_events_fixed) return -1; + /* + * fixed counters do not take all possible filters + */ + if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK) + return -1; + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) return X86_PMC_IDX_FIXED_INSTRUCTIONS; if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) From b690081d4d3f6a23541493f1682835c3cd5c54a1 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 6 Oct 2009 16:42:09 +0200 Subject: [PATCH 103/470] perf_events: Add event constraints support for Intel processors On some Intel processors, not all events can be measured in all counters. Some events can only be measured in one particular counter, for instance. Assigning an event to the wrong counter does not crash the machine but this yields bogus counts, i.e., silent error. This patch changes the event to counter assignment logic to take into account event constraints for Intel P6, Core and Nehalem processors. There is no contraints on Intel Atom. There are constraints on Intel Yonah (Core Duo) but they are not provided in this patch given that this processor is not yet supported by perf_events. As a result of the constraints, it is possible for some event groups to never actually be loaded onto the PMU if they contain two events which can only be measured on a single counter. That situation can be detected with the scaling information extracted with read(). Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1254840129-6198-3-git-send-email-eranian@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 109 +++++++++++++++++++++++++++++-- 1 file changed, 105 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d16bd69551..9c758548a0e 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -77,6 +77,18 @@ struct cpu_hw_events { struct debug_store *ds; }; +struct event_constraint { + unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; + int code; +}; + +#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) } +#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 } + +#define for_each_event_constraint(e, c) \ + for ((e) = (c); (e)->idxmsk[0]; (e)++) + + /* * struct x86_pmu - generic x86 pmu */ @@ -102,6 +114,7 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); + int (*get_event_idx)(struct hw_perf_event *hwc); }; static struct x86_pmu x86_pmu __read_mostly; @@ -110,6 +123,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; +static const struct event_constraint *event_constraint; + /* * Not sure about some of these */ @@ -155,6 +170,16 @@ static u64 p6_pmu_raw_event(u64 hw_event) return hw_event & P6_EVNTSEL_MASK; } +static const struct event_constraint intel_p6_event_constraints[] = +{ + EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT_END +}; /* * Intel PerfMon v3. Used on Core2 and later. @@ -170,6 +195,35 @@ static const u64 intel_perfmon_event_map[] = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; +static const struct event_constraint intel_core_event_constraints[] = +{ + EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ + EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ + EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ + EVENT_CONSTRAINT(0x13, 0x2), /* DIV */ + EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */ + EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ + EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ + EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ + EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ + EVENT_CONSTRAINT_END +}; + +static const struct event_constraint intel_nehalem_event_constraints[] = +{ + EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ + EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ + EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ + EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */ + EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */ + EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */ + EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ + EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */ + EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */ + EVENT_CONSTRAINT_END +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -932,6 +986,8 @@ static int __hw_perf_event_init(struct perf_event *event) */ hwc->config = ARCH_PERFMON_EVENTSEL_INT; + hwc->idx = -1; + /* * Count user and OS events unless requested not to. */ @@ -1365,6 +1421,45 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) return -1; } +/* + * generic counter allocator: get next free counter + */ +static int gen_get_event_idx(struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx; + + idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); + return idx == x86_pmu.num_events ? -1 : idx; +} + +/* + * intel-specific counter allocator: check event constraints + */ +static int intel_get_event_idx(struct hw_perf_event *hwc) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + const struct event_constraint *event_constraint; + int i, code; + + if (!event_constraint) + goto skip; + + code = hwc->config & 0xff; + + for_each_event_constraint(event_constraint, event_constraint) { + if (code == event_constraint->code) { + for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { + if (!test_and_set_bit(i, cpuc->used_mask)) + return i; + } + return -1; + } + } +skip: + return gen_get_event_idx(hwc); +} + /* * Find a PMC slot for the freshly enabled / scheduled in event: */ @@ -1402,11 +1497,10 @@ static int x86_pmu_enable(struct perf_event *event) } else { idx = hwc->idx; /* Try to get the previous generic event again */ - if (test_and_set_bit(idx, cpuc->used_mask)) { + if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = find_first_zero_bit(cpuc->used_mask, - x86_pmu.num_events); - if (idx == x86_pmu.num_events) + idx = x86_pmu.get_event_idx(hwc); + if (idx == -1) return -EAGAIN; set_bit(idx, cpuc->used_mask); @@ -1883,6 +1977,7 @@ static struct x86_pmu p6_pmu = { */ .event_bits = 32, .event_mask = (1ULL << 32) - 1, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu intel_pmu = { @@ -1906,6 +2001,7 @@ static struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, + .get_event_idx = intel_get_event_idx, }; static struct x86_pmu amd_pmu = { @@ -1926,6 +2022,7 @@ static struct x86_pmu amd_pmu = { .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, + .get_event_idx = gen_get_event_idx, }; static int p6_pmu_init(void) @@ -1938,10 +2035,12 @@ static int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ + event_constraint = intel_p6_event_constraints; break; case 9: case 13: /* Pentium M */ + event_constraint = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2013,12 +2112,14 @@ static int intel_pmu_init(void) sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); + event_constraint = intel_core_event_constraints; break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + event_constraint = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: From fe9081cc9bdabb0be953a39ad977cea14e35bce5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 8 Oct 2009 11:56:07 +0200 Subject: [PATCH 104/470] perf, x86: Add simple group validation Refuse to add events when the group wouldn't fit onto the PMU anymore. Naive implementation. Signed-off-by: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <1254911461.26976.239.camel@twins> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 90 ++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9c758548a0e..9961d845719 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -114,7 +114,8 @@ struct x86_pmu { u64 intel_ctrl; void (*enable_bts)(u64 config); void (*disable_bts)(void); - int (*get_event_idx)(struct hw_perf_event *hwc); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); }; static struct x86_pmu x86_pmu __read_mostly; @@ -523,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL #define CORE_EVNTSEL_INV_MASK 0x00800000ULL -#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL +#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_MASK \ (CORE_EVNTSEL_EVENT_MASK | \ @@ -1390,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) x86_pmu_enable_event(hwc, idx); } -static int -fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) +static int fixed_mode_idx(struct hw_perf_event *hwc) { unsigned int hw_event; @@ -1424,9 +1424,9 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) /* * generic counter allocator: get next free counter */ -static int gen_get_event_idx(struct hw_perf_event *hwc) +static int +gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); int idx; idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); @@ -1436,16 +1436,16 @@ static int gen_get_event_idx(struct hw_perf_event *hwc) /* * intel-specific counter allocator: check event constraints */ -static int intel_get_event_idx(struct hw_perf_event *hwc) +static int +intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); const struct event_constraint *event_constraint; int i, code; if (!event_constraint) goto skip; - code = hwc->config & 0xff; + code = hwc->config & CORE_EVNTSEL_EVENT_MASK; for_each_event_constraint(event_constraint, event_constraint) { if (code == event_constraint->code) { @@ -1457,26 +1457,22 @@ static int intel_get_event_idx(struct hw_perf_event *hwc) } } skip: - return gen_get_event_idx(hwc); + return gen_get_event_idx(cpuc, hwc); } -/* - * Find a PMC slot for the freshly enabled / scheduled in event: - */ -static int x86_pmu_enable(struct perf_event *event) +static int +x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; int idx; - idx = fixed_mode_idx(event, hwc); + idx = fixed_mode_idx(hwc); if (idx == X86_PMC_IDX_FIXED_BTS) { /* BTS is already occupied. */ if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; hwc->config_base = 0; - hwc->event_base = 0; + hwc->event_base = 0; hwc->idx = idx; } else if (idx >= 0) { /* @@ -1499,17 +1495,33 @@ static int x86_pmu_enable(struct perf_event *event) /* Try to get the previous generic event again */ if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { try_generic: - idx = x86_pmu.get_event_idx(hwc); + idx = x86_pmu.get_event_idx(cpuc, hwc); if (idx == -1) return -EAGAIN; set_bit(idx, cpuc->used_mask); hwc->idx = idx; } - hwc->config_base = x86_pmu.eventsel; - hwc->event_base = x86_pmu.perfctr; + hwc->config_base = x86_pmu.eventsel; + hwc->event_base = x86_pmu.perfctr; } + return idx; +} + +/* + * Find a PMC slot for the freshly enabled / scheduled in event: + */ +static int x86_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + idx = x86_schedule_event(cpuc, hwc); + if (idx < 0) + return idx; + perf_events_lapic_init(); x86_pmu.disable(hwc, idx); @@ -2212,11 +2224,47 @@ static const struct pmu pmu = { .unthrottle = x86_pmu_unthrottle, }; +static int +validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu != &pmu) + return 0; + + return x86_schedule_event(cpuc, &fake_event); +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + const struct pmu *hw_perf_event_init(struct perf_event *event) { int err; err = __hw_perf_event_init(event); + if (!err) { + if (event->group_leader != event) + err = validate_group(event); + } if (err) { if (event->destroy) event->destroy(event); From 7e4ff9e3e8f88de8a8536f43294cd32b4e7d9123 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 12 Oct 2009 07:56:03 +0200 Subject: [PATCH 105/470] perf tools: Fix counter sample frequency breakage Commit 42e59d7d19dc4b4 switched to a default sample frequency of 1KHz, which overrides any user supplied count, causing sched, top and timechart to miss events due to their discrete events being flagged PERF_SAMPLE_PERIOD. Override default sample frequency when the user profides a period count, and make both record and top honor that user supplied option. Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Arjan van de Ven LKML-Reference: <1255326963.15107.2.camel@marge.simson.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 14 +++++++++++++- tools/perf/builtin-top.c | 28 +++++++++++++++++++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 59af03d80d0..4e3a374e7aa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -26,7 +26,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; -static long default_interval = 100000; +static long default_interval = 0; static int nr_cpus = 0; static unsigned int page_size; @@ -730,6 +730,18 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; } + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + for (counter = 0; counter < nr_counters; counter++) { if (attrs[counter].sample_period) continue; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d978dc99236..c0f69e80b2c 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -57,7 +57,7 @@ static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static int system_wide = 0; -static int default_interval = 100000; +static int default_interval = 0; static int count_filter = 5; static int print_entries = 15; @@ -975,7 +975,13 @@ static void start_counter(int i, int counter) attr = attrs + counter; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; - attr->freq = freq; + + if (freq) { + attr->sample_type |= PERF_SAMPLE_PERIOD; + attr->freq = 1; + attr->sample_freq = freq; + } + attr->inherit = (cpu < 0) && inherit; try_again: @@ -1130,11 +1136,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (argc) usage_with_options(top_usage, options); - if (freq) { - default_interval = freq; - freq = 1; - } - /* CPU and PID are mutually exclusive */ if (target_pid != -1 && profile_cpu != -1) { printf("WARNING: PID switch overriding CPU\n"); @@ -1151,6 +1152,19 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) parse_symbols(); parse_source(sym_filter_entry); + + /* + * User specified count overrides default frequency. + */ + if (default_interval) + freq = 0; + else if (freq) { + default_interval = freq; + } else { + fprintf(stderr, "frequency and count are zero, aborting\n"); + exit(EXIT_FAILURE); + } + /* * Fill in the ones not specifically initialized via -c: */ From 55ffb7a6bd45d0083ffb132381cb46964a4afe01 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 10 Oct 2009 14:46:04 +0200 Subject: [PATCH 106/470] perf sched: Add -C option to measure on a specific CPU To refresh, trying to sched record only one CPU results in bogus latencies as below. I fixed^Wmade it stop doing the bad thing today, by following task migration events properly. Before: marge:/root/tmp # taskset -c 1 perf sched record -C 0 -- sleep 10 marge:/root/tmp # perf sched lat ----------------------------------------------------------------------------------------- Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | ----------------------------------------------------------------------------------------- Xorg:4943 | 1.290 ms | 1 | avg: 1670.132 ms | max: 1670.132 ms | hald-addon-stor:3569 | 0.091 ms | 3 | avg: 658.609 ms | max: 1975.797 ms | hald-addon-stor:3573 | 0.209 ms | 4 | avg: 499.138 ms | max: 1990.565 ms | audispd:4270 | 0.012 ms | 1 | avg: 0.015 ms | max: 0.015 ms | .... marge:/root/tmp # perf sched trace|grep 'Xorg:4943' swapper-0 [000] 401.184013288: sched_stat_runtime: task: Xorg:4943 runtime: 1233188 [ns], vruntime: 19105169779 [ns] rt2870TimerQHan-4947 [000] 402.854140127: sched_stat_wait: task: Xorg:4943 wait: 580073 [ns] rt2870TimerQHan-4947 [000] 402.854141770: sched_migrate_task: task Xorg:4943 [140] from: 1 to: 0 rt2870TimerQHan-4947 [000] 402.854143854: sched_stat_wait: task: Xorg:4943 wait: 0 [ns] rt2870TimerQHan-4947 [000] 402.854145397: sched_switch: task rt2870TimerQHan:4947 [140] (D) ==> Xorg:4943 [140] Xorg-4943 [000] 402.854193133: sched_stat_runtime: task: Xorg:4943 runtime: 56546 [ns], vruntime: 11766332500 [ns] Xorg-4943 [000] 402.854196842: sched_switch: task Xorg:4943 [140] (S) ==> swapper:0 [140] After: marge:/root/tmp # taskset -c 1 perf sched record -C 0 -- sleep 10 marge:/root/tmp # perf sched lat ----------------------------------------------------------------------------------------- Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | ----------------------------------------------------------------------------------------- amarokapp:11150 | 271.297 ms | 878 | avg: 0.130 ms | max: 1.057 ms | konsole:5965 | 1.370 ms | 12 | avg: 0.092 ms | max: 0.855 ms | Xorg:4943 | 179.980 ms | 1109 | avg: 0.087 ms | max: 1.206 ms | hald-addon-stor:3574 | 0.212 ms | 9 | avg: 0.040 ms | max: 0.169 ms | hald-addon-stor:3570 | 0.223 ms | 9 | avg: 0.037 ms | max: 0.223 ms | klauncher:5864 | 0.550 ms | 8 | avg: 0.032 ms | max: 0.048 ms | The 'Maximum delay ms' results are now sane. Signed-off-by: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-sched.c | 101 ++++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 6b00529ce34..387a4423436 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -33,6 +33,8 @@ static u64 sample_type; static char default_sort_order[] = "avg, max, switch, runtime"; static char *sort_order = default_sort_order; +static int profile_cpu = -1; + static char *cwd; static int cwdlen; @@ -75,6 +77,7 @@ enum sched_event_type { SCHED_EVENT_RUN, SCHED_EVENT_SLEEP, SCHED_EVENT_WAKEUP, + SCHED_EVENT_MIGRATION, }; struct sched_atom { @@ -399,6 +402,8 @@ process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) ret = sem_post(atom->wait_sem); BUG_ON(ret); break; + case SCHED_EVENT_MIGRATION: + break; default: BUG_ON(1); } @@ -746,6 +751,22 @@ struct trace_fork_event { u32 child_pid; }; +struct trace_migrate_task_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char comm[16]; + u32 pid; + + u32 prio; + u32 cpu; +}; + struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, struct event *, @@ -770,6 +791,12 @@ struct trace_sched_handler { int cpu, u64 timestamp, struct thread *thread); + + void (*migrate_task_event)(struct trace_migrate_task_event *, + struct event *, + int cpu, + u64 timestamp, + struct thread *thread); }; @@ -1140,7 +1167,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom = list_entry(atoms->work_list.prev, struct work_atom, list); - if (atom->state != THREAD_SLEEPING) + /* + * You WILL be missing events if you've recorded only + * one CPU, or are only looking at only one, so don't + * make useless noise. + */ + if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) nr_state_machine_bugs++; nr_timestamps++; @@ -1153,11 +1185,51 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, atom->wake_up_time = timestamp; } +static void +latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, + struct event *__event __used, + int cpu __used, + u64 timestamp, + struct thread *thread __used) +{ + struct work_atoms *atoms; + struct work_atom *atom; + struct thread *migrant; + + /* + * Only need to worry about migration when profiling one CPU. + */ + if (profile_cpu == -1) + return; + + migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) { + thread_atoms_insert(migrant); + register_pid(migrant->pid, migrant->comm); + atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + if (!atoms) + die("migration-event: Internal tree error"); + add_sched_out_event(atoms, 'R', timestamp); + } + + BUG_ON(list_empty(&atoms->work_list)); + + atom = list_entry(atoms->work_list.prev, struct work_atom, list); + atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; + + nr_timestamps++; + + if (atom->sched_out_time > timestamp) + nr_unordered_timestamps++; +} + static struct trace_sched_handler lat_ops = { .wakeup_event = latency_wakeup_event, .switch_event = latency_switch_event, .runtime_event = latency_runtime_event, .fork_event = latency_fork_event, + .migrate_task_event = latency_migrate_task_event, }; static void output_lat_thread(struct work_atoms *work_list) @@ -1517,6 +1589,26 @@ process_sched_exit_event(struct event *event, printf("sched_exit event %p\n", event); } +static void +process_sched_migrate_task_event(struct raw_event_sample *raw, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ + struct trace_migrate_task_event migrate_task_event; + + FILL_COMMON_FIELDS(migrate_task_event, event, raw->data); + + FILL_ARRAY(migrate_task_event, comm, event, raw->data); + FILL_FIELD(migrate_task_event, pid, event, raw->data); + FILL_FIELD(migrate_task_event, prio, event, raw->data); + FILL_FIELD(migrate_task_event, cpu, event, raw->data); + + if (trace_handler->migrate_task_event) + trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread); +} + static void process_raw_event(event_t *raw_event __used, void *more_data, int cpu, u64 timestamp, struct thread *thread) @@ -1540,6 +1632,8 @@ process_raw_event(event_t *raw_event __used, void *more_data, process_sched_fork_event(raw, event, cpu, timestamp, thread); if (!strcmp(event->name, "sched_process_exit")) process_sched_exit_event(event, cpu, timestamp, thread); + if (!strcmp(event->name, "sched_migrate_task")) + process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); } static int @@ -1589,6 +1683,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } + if (profile_cpu != -1 && profile_cpu != (int) cpu) + return 0; + process_raw_event(event, more_data, cpu, timestamp, thread); return 0; @@ -1771,6 +1868,8 @@ static const struct option latency_options[] = { "sort by key(s): runtime, switch, avg, max"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &profile_cpu, + "CPU to profile on"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_END() From fb2531953fd8855abdcf458459020fd382c5deca Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 7 Oct 2009 13:20:38 +0200 Subject: [PATCH 107/470] mce, edac: Use an atomic notifier for MCEs decoding Add an atomic notifier which ensures proper locking when conveying MCE info to EDAC for decoding. The actual notifier call overrides a default, negative priority notifier. Note: make sure we register the default decoder only once since mcheck_init() runs on each CPU. Signed-off-by: Borislav Petkov LKML-Reference: <20091003065752.GA8935@liondog.tnic> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 3 ++- arch/x86/kernel/cpu/mcheck/mce.c | 29 ++++++++++++++++++++--------- drivers/edac/edac_mce_amd.c | 21 ++++++++++++--------- 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index f1363b72364..227a72df644 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -108,6 +108,8 @@ struct mce_log { #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) +extern struct atomic_notifier_head x86_mce_decoder_chain; + #ifdef __KERNEL__ #include @@ -213,6 +215,5 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); void intel_init_thermal(struct cpuinfo_x86 *c); void mce_log_therm_throt_event(__u64 status); - #endif /* __KERNEL__ */ #endif /* _ASM_X86_MCE_H */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b1598a9436d..15ba9c972d7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -85,18 +85,26 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait); static DEFINE_PER_CPU(struct mce, mces_seen); static int cpu_missing; -static void default_decode_mce(struct mce *m) +/* + * CPU/chipset specific EDAC code can register a notifier call here to print + * MCE errors in a human-readable form. + */ +ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); +EXPORT_SYMBOL_GPL(x86_mce_decoder_chain); + +static int default_decode_mce(struct notifier_block *nb, unsigned long val, + void *data) { pr_emerg("No human readable MCE decoding support on this CPU type.\n"); pr_emerg("Run the message through 'mcelog --ascii' to decode.\n"); + + return NOTIFY_STOP; } -/* - * CPU/chipset specific EDAC code can register a callback here to print - * MCE errors in a human-readable form: - */ -void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce; -EXPORT_SYMBOL(x86_mce_decode_callback); +static struct notifier_block mce_dec_nb = { + .notifier_call = default_decode_mce, + .priority = -1, +}; /* MCA banks polled by the period polling timer for corrected events */ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { @@ -204,9 +212,9 @@ static void print_mce(struct mce *m) /* * Print out human-readable details about the MCE error, - * (if the CPU has an implementation for that): + * (if the CPU has an implementation for that) */ - x86_mce_decode_callback(m); + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m); } static void print_mce_head(void) @@ -1420,6 +1428,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c) mce_cpu_features(c); mce_init_timer(); INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); + + if (raw_smp_processor_id() == 0) + atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); } /* diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 713ed7d3724..689cc6a6214 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c @@ -3,7 +3,6 @@ static bool report_gart_errors; static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); -static void (*orig_mce_callback)(struct mce *m); void amd_report_gart_errors(bool v) { @@ -363,8 +362,10 @@ static inline void amd_decode_err_code(unsigned int ec) pr_warning("Huh? Unknown MCE error 0x%x\n", ec); } -static void amd_decode_mce(struct mce *m) +static int amd_decode_mce(struct notifier_block *nb, unsigned long val, + void *data) { + struct mce *m = (struct mce *)data; struct err_regs regs; int node, ecc; @@ -420,20 +421,22 @@ static void amd_decode_mce(struct mce *m) } amd_decode_err_code(m->status & 0xffff); + + return NOTIFY_STOP; } +static struct notifier_block amd_mce_dec_nb = { + .notifier_call = amd_decode_mce, +}; + static int __init mce_amd_init(void) { /* * We can decode MCEs for Opteron and later CPUs: */ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && - (boot_cpu_data.x86 >= 0xf)) { - /* safe the default decode mce callback */ - orig_mce_callback = x86_mce_decode_callback; - - x86_mce_decode_callback = amd_decode_mce; - } + (boot_cpu_data.x86 >= 0xf)) + atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); return 0; } @@ -442,7 +445,7 @@ early_initcall(mce_amd_init); #ifdef MODULE static void __exit mce_amd_exit(void) { - x86_mce_decode_callback = orig_mce_callback; + atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); } MODULE_DESCRIPTION("AMD MCE decoder"); From 405b2651e4bedf8d3932b64cad649b4d26b067f5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:27:40 -0400 Subject: [PATCH 108/470] tracing/kprobes: Add $ prefix to special variables Add $ prefix to the special variables(e.g. sa, rv) of kprobe-tracer. This resolves consistency issues between kprobe_events and perf-kprobe. The main goal is to avoid conflicts between local variable names of probed functions, used by perf probe, and special variables used in the kprobe event creation interface (stack values, etc...) and also available from perf probe. ie: we don't want rv (return value) to conflict with a local variable named rv in a probed function. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222740.1684.91170.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 20 +++++----- kernel/trace/trace_kprobe.c | 60 ++++++++++++++++++----------- 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 9b8f7c6040a..33f531858c5 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -36,13 +36,13 @@ Synopsis of kprobe_events FETCHARGS : Arguments. Each probe can have up to 128 args. %REG : Fetch register REG - sN : Fetch Nth entry of stack (N >= 0) - sa : Fetch stack address. @ADDR : Fetch memory at ADDR (ADDR should be in kernel) @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) - aN : Fetch function argument. (N >= 0)(*) - rv : Fetch return value.(**) - ra : Fetch return address.(**) + $sN : Fetch Nth entry of stack (N >= 0) + $sa : Fetch stack address. + $aN : Fetch function argument. (N >= 0)(*) + $rv : Fetch return value.(**) + $ra : Fetch return address.(**) +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) NAME=FETCHARG: Set NAME as the argument name of FETCHARG. @@ -85,13 +85,13 @@ Usage examples To add a probe as a new event, write a new definition to kprobe_events as below. - echo p:myprobe do_sys_open dfd=a0 filename=a1 flags=a2 mode=a3 > /sys/kernel/debug/tracing/kprobe_events + echo p:myprobe do_sys_open dfd=$a0 filename=$a1 flags=$a2 mode=$a3 > /sys/kernel/debug/tracing/kprobe_events This sets a kprobe on the top of do_sys_open() function with recording 1st to 4th arguments as "myprobe" event. As this example shows, users can choose more familiar names for each arguments. - echo r:myretprobe do_sys_open rv ra >> /sys/kernel/debug/tracing/kprobe_events + echo r:myretprobe do_sys_open $rv $ra >> /sys/kernel/debug/tracing/kprobe_events This sets a kretprobe on the return point of do_sys_open() function with recording return value and return address as "myretprobe" event. @@ -138,11 +138,11 @@ events, you need to enable it. # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 - <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) rv=fffffffffffffffe ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $rv=fffffffffffffffe $ra=ffffffff81367a3a <...>-1447 [001] 1038282.286885: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=40413c flags=8000 mode=1b6 - <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) rv=3 ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 $ra=ffffffff81367a3a <...>-1447 [001] 1038282.286969: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=4041c6 flags=98800 mode=10 - <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) rv=3 ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 $ra=ffffffff81367a3a Each line shows when the kernel hits an event, and <- SYMBOL means kernel diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 97309d4714f..f63ead0cc5c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -220,24 +220,24 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) int ret = -EINVAL; if (ff->func == fetch_argument) - ret = snprintf(buf, n, "a%lu", (unsigned long)ff->data); + ret = snprintf(buf, n, "$a%lu", (unsigned long)ff->data); else if (ff->func == fetch_register) { const char *name; name = regs_query_register_name((unsigned int)((long)ff->data)); ret = snprintf(buf, n, "%%%s", name); } else if (ff->func == fetch_stack) - ret = snprintf(buf, n, "s%lu", (unsigned long)ff->data); + ret = snprintf(buf, n, "$s%lu", (unsigned long)ff->data); else if (ff->func == fetch_memory) ret = snprintf(buf, n, "@0x%p", ff->data); else if (ff->func == fetch_symbol) { struct symbol_cache *sc = ff->data; ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); } else if (ff->func == fetch_retvalue) - ret = snprintf(buf, n, "rv"); + ret = snprintf(buf, n, "$rv"); else if (ff->func == fetch_ip) - ret = snprintf(buf, n, "ra"); + ret = snprintf(buf, n, "$ra"); else if (ff->func == fetch_stack_address) - ret = snprintf(buf, n, "sa"); + ret = snprintf(buf, n, "$sa"); else if (ff->func == fetch_indirect) { struct indirect_fetch_data *id = ff->data; size_t l = 0; @@ -429,12 +429,10 @@ static int split_symbol_offset(char *symbol, unsigned long *offset) #define PARAM_MAX_ARGS 16 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) -static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) +static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) { int ret = 0; unsigned long param; - long offset; - char *tmp; switch (arg[0]) { case 'a': /* argument */ @@ -456,14 +454,6 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) } else ret = -EINVAL; break; - case '%': /* named register */ - ret = regs_query_register_offset(arg + 1); - if (ret >= 0) { - ff->func = fetch_register; - ff->data = (void *)(unsigned long)ret; - ret = 0; - } - break; case 's': /* stack */ if (arg[1] == 'a') { ff->func = fetch_stack_address; @@ -478,6 +468,31 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) } } break; + default: + ret = -EINVAL; + } + return ret; +} + +static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) +{ + int ret = 0; + unsigned long param; + long offset; + char *tmp; + + switch (arg[0]) { + case '$': + ret = parse_probe_vars(arg + 1, ff, is_return); + break; + case '%': /* named register */ + ret = regs_query_register_offset(arg + 1); + if (ret >= 0) { + ff->func = fetch_register; + ff->data = (void *)(unsigned long)ret; + ret = 0; + } + break; case '@': /* memory or symbol */ if (isdigit(arg[1])) { ret = strict_strtoul(arg + 1, 0, ¶m); @@ -489,8 +504,7 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) ret = split_symbol_offset(arg + 1, &offset); if (ret) break; - ff->data = alloc_symbol_cache(arg + 1, - offset); + ff->data = alloc_symbol_cache(arg + 1, offset); if (ff->data) ff->func = fetch_symbol; else @@ -544,11 +558,11 @@ static int create_trace_probe(int argc, char **argv) * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] * Fetch args: - * aN : fetch Nth of function argument. (N:0-) - * rv : fetch return value - * ra : fetch return address - * sa : fetch stack address - * sN : fetch Nth of stack (N:0-) + * $aN : fetch Nth of function argument. (N:0-) + * $rv : fetch return value + * $ra : fetch return address + * $sa : fetch stack address + * $sN : fetch Nth of stack (N:0-) * @ADDR : fetch memory at ADDR (ADDR should be in kernel) * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) * %REG : fetch register REG From 99329c44f28a1b7ac83beebfb4319e612042e319 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:27:48 -0400 Subject: [PATCH 109/470] tracing/kprobes: Remove '$ra' special variable Remove '$ra' (return address) because it is already shown at the head of each entry. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222748.1684.12711.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 13 ++++++------- kernel/trace/trace_kprobe.c | 11 ----------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 33f531858c5..4208253b5a5 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -1,4 +1,4 @@ - Kprobe-based Event Tracer + Kprobe-based Event Tracer ========================= Documentation is written by Masami Hiramatsu @@ -42,7 +42,6 @@ Synopsis of kprobe_events $sa : Fetch stack address. $aN : Fetch function argument. (N >= 0)(*) $rv : Fetch return value.(**) - $ra : Fetch return address.(**) +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) NAME=FETCHARG: Set NAME as the argument name of FETCHARG. @@ -91,10 +90,10 @@ as below. 1st to 4th arguments as "myprobe" event. As this example shows, users can choose more familiar names for each arguments. - echo r:myretprobe do_sys_open $rv $ra >> /sys/kernel/debug/tracing/kprobe_events + echo r:myretprobe do_sys_open $rv >> /sys/kernel/debug/tracing/kprobe_events This sets a kretprobe on the return point of do_sys_open() function with -recording return value and return address as "myretprobe" event. +recording return value as "myretprobe" event. You can see the format of these events via /sys/kernel/debug/tracing/events/kprobes//format. @@ -138,11 +137,11 @@ events, you need to enable it. # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 - <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $rv=fffffffffffffffe $ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $rv=fffffffffffffffe <...>-1447 [001] 1038282.286885: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=40413c flags=8000 mode=1b6 - <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 $ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 <...>-1447 [001] 1038282.286969: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=4041c6 flags=98800 mode=10 - <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 $ra=ffffffff81367a3a + <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 Each line shows when the kernel hits an event, and <- SYMBOL means kernel diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f63ead0cc5c..ba6d3bd4888 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -85,11 +85,6 @@ static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, return regs_return_value(regs); } -static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy) -{ - return instruction_pointer(regs); -} - static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, void *dummy) { @@ -234,8 +229,6 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); } else if (ff->func == fetch_retvalue) ret = snprintf(buf, n, "$rv"); - else if (ff->func == fetch_ip) - ret = snprintf(buf, n, "$ra"); else if (ff->func == fetch_stack_address) ret = snprintf(buf, n, "$sa"); else if (ff->func == fetch_indirect) { @@ -448,9 +441,6 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) if (is_return && arg[1] == 'v') { ff->func = fetch_retvalue; ff->data = NULL; - } else if (is_return && arg[1] == 'a') { - ff->func = fetch_ip; - ff->data = NULL; } else ret = -EINVAL; break; @@ -560,7 +550,6 @@ static int create_trace_probe(int argc, char **argv) * Fetch args: * $aN : fetch Nth of function argument. (N:0-) * $rv : fetch return value - * $ra : fetch return address * $sa : fetch stack address * $sN : fetch Nth of stack (N:0-) * @ADDR : fetch memory at ADDR (ADDR should be in kernel) From 2e06ff6389aedafc4a3a374344ac70672252f9b5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:27:59 -0400 Subject: [PATCH 110/470] tracing/kprobes: Make special variable names more self-explainable Rename special variables to more self-explainable names as below: - $rv to $retval - $sa to $stack - $aN to $argN - $sN to $stackN Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222759.1684.3319.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- Documentation/trace/kprobetrace.txt | 22 ++++++------ kernel/trace/trace_kprobe.c | 52 +++++++++++++---------------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 4208253b5a5..15415243a9a 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -35,13 +35,13 @@ Synopsis of kprobe_events MEMADDR : Address where the probe is inserted. FETCHARGS : Arguments. Each probe can have up to 128 args. - %REG : Fetch register REG - @ADDR : Fetch memory at ADDR (ADDR should be in kernel) + %REG : Fetch register REG + @ADDR : Fetch memory at ADDR (ADDR should be in kernel) @SYM[+|-offs] : Fetch memory at SYM +|- offs (SYM should be a data symbol) - $sN : Fetch Nth entry of stack (N >= 0) - $sa : Fetch stack address. - $aN : Fetch function argument. (N >= 0)(*) - $rv : Fetch return value.(**) + $stackN : Fetch Nth entry of stack (N >= 0) + $stack : Fetch stack address. + $argN : Fetch function argument. (N >= 0)(*) + $retval : Fetch return value.(**) +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***) NAME=FETCHARG: Set NAME as the argument name of FETCHARG. @@ -84,13 +84,13 @@ Usage examples To add a probe as a new event, write a new definition to kprobe_events as below. - echo p:myprobe do_sys_open dfd=$a0 filename=$a1 flags=$a2 mode=$a3 > /sys/kernel/debug/tracing/kprobe_events + echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events This sets a kprobe on the top of do_sys_open() function with recording 1st to 4th arguments as "myprobe" event. As this example shows, users can choose more familiar names for each arguments. - echo r:myretprobe do_sys_open $rv >> /sys/kernel/debug/tracing/kprobe_events + echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events This sets a kretprobe on the return point of do_sys_open() function with recording return value as "myretprobe" event. @@ -137,11 +137,11 @@ events, you need to enable it. # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | <...>-1447 [001] 1038282.286875: myprobe: (do_sys_open+0x0/0xd6) dfd=3 filename=7fffd1ec4440 flags=8000 mode=0 - <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $rv=fffffffffffffffe + <...>-1447 [001] 1038282.286878: myretprobe: (sys_openat+0xc/0xe <- do_sys_open) $retval=fffffffffffffffe <...>-1447 [001] 1038282.286885: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=40413c flags=8000 mode=1b6 - <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 + <...>-1447 [001] 1038282.286915: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $retval=3 <...>-1447 [001] 1038282.286969: myprobe: (do_sys_open+0x0/0xd6) dfd=ffffff9c filename=4041c6 flags=98800 mode=10 - <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $rv=3 + <...>-1447 [001] 1038282.286976: myretprobe: (sys_open+0x1b/0x1d <- do_sys_open) $retval=3 Each line shows when the kernel hits an event, and <- SYMBOL means kernel diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index ba6d3bd4888..3313fa74ce5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -215,22 +215,22 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) int ret = -EINVAL; if (ff->func == fetch_argument) - ret = snprintf(buf, n, "$a%lu", (unsigned long)ff->data); + ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data); else if (ff->func == fetch_register) { const char *name; name = regs_query_register_name((unsigned int)((long)ff->data)); ret = snprintf(buf, n, "%%%s", name); } else if (ff->func == fetch_stack) - ret = snprintf(buf, n, "$s%lu", (unsigned long)ff->data); + ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data); else if (ff->func == fetch_memory) ret = snprintf(buf, n, "@0x%p", ff->data); else if (ff->func == fetch_symbol) { struct symbol_cache *sc = ff->data; ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); } else if (ff->func == fetch_retvalue) - ret = snprintf(buf, n, "$rv"); + ret = snprintf(buf, n, "$retval"); else if (ff->func == fetch_stack_address) - ret = snprintf(buf, n, "$sa"); + ret = snprintf(buf, n, "$stack"); else if (ff->func == fetch_indirect) { struct indirect_fetch_data *id = ff->data; size_t l = 0; @@ -427,40 +427,36 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) int ret = 0; unsigned long param; - switch (arg[0]) { - case 'a': /* argument */ - ret = strict_strtoul(arg + 1, 10, ¶m); - if (ret || param > PARAM_MAX_ARGS) - ret = -EINVAL; - else { - ff->func = fetch_argument; - ff->data = (void *)param; - } - break; - case 'r': /* retval or retaddr */ - if (is_return && arg[1] == 'v') { + if (strcmp(arg, "retval") == 0) { + if (is_return) { ff->func = fetch_retvalue; ff->data = NULL; } else ret = -EINVAL; - break; - case 's': /* stack */ - if (arg[1] == 'a') { + } else if (strncmp(arg, "stack", 5) == 0) { + if (arg[5] == '\0') { ff->func = fetch_stack_address; ff->data = NULL; - } else { - ret = strict_strtoul(arg + 1, 10, ¶m); + } else if (isdigit(arg[5])) { + ret = strict_strtoul(arg + 5, 10, ¶m); if (ret || param > PARAM_MAX_STACK) ret = -EINVAL; else { ff->func = fetch_stack; ff->data = (void *)param; } + } else + ret = -EINVAL; + } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) { + ret = strict_strtoul(arg + 3, 10, ¶m); + if (ret || param > PARAM_MAX_ARGS) + ret = -EINVAL; + else { + ff->func = fetch_argument; + ff->data = (void *)param; } - break; - default: + } else ret = -EINVAL; - } return ret; } @@ -548,10 +544,10 @@ static int create_trace_probe(int argc, char **argv) * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] * Fetch args: - * $aN : fetch Nth of function argument. (N:0-) - * $rv : fetch return value - * $sa : fetch stack address - * $sN : fetch Nth of stack (N:0-) + * $argN : fetch Nth of function argument. (N:0-) + * $retval : fetch return value + * $stack : fetch stack address + * $stackN : fetch Nth of stack (N:0-) * @ADDR : fetch memory at ADDR (ADDR should be in kernel) * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) * %REG : fetch register REG From a703d946e883d8e447d0597de556e2effd110372 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:28:07 -0400 Subject: [PATCH 111/470] tracing/kprobes: Avoid field name confliction Check whether the argument name is in conflict with other field names while creating a kprobe through the debugfs interface. Changes in v3: - Check strcmp() == 0 instead of !strcmp(). Changes in v2: - Add common_lock_depth to reserved name list. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222807.1684.26880.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 65 ++++++++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3313fa74ce5..bb6cb2bc6fa 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -38,6 +38,25 @@ #define MAX_EVENT_NAME_LEN 64 #define KPROBE_EVENT_SYSTEM "kprobes" +/* Reserved field names */ +#define FIELD_STRING_IP "ip" +#define FIELD_STRING_NARGS "nargs" +#define FIELD_STRING_RETIP "ret_ip" +#define FIELD_STRING_FUNC "func" + +const char *reserved_field_names[] = { + "common_type", + "common_flags", + "common_preempt_count", + "common_pid", + "common_tgid", + "common_lock_depth", + FIELD_STRING_IP, + FIELD_STRING_NARGS, + FIELD_STRING_RETIP, + FIELD_STRING_FUNC, +}; + /* currently, trace_kprobe only supports X86. */ struct fetch_func { @@ -537,6 +556,20 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) return ret; } +/* Return 1 if name is reserved or already used by another argument */ +static int conflict_field_name(const char *name, + struct probe_arg *args, int narg) +{ + int i; + for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++) + if (strcmp(reserved_field_names[i], name) == 0) + return 1; + for (i = 0; i < narg; i++) + if (strcmp(args[i].name, name) == 0) + return 1; + return 0; +} + static int create_trace_probe(int argc, char **argv) { /* @@ -637,6 +670,12 @@ static int create_trace_probe(int argc, char **argv) *arg++ = '\0'; else arg = argv[i]; + + if (conflict_field_name(argv[i], tp->args, i)) { + ret = -EINVAL; + goto error; + } + tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); /* Parse fetch argument */ @@ -1039,8 +1078,8 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) if (!ret) return ret; - DEFINE_FIELD(unsigned long, ip, "ip", 0); - DEFINE_FIELD(int, nargs, "nargs", 1); + DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); + DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); @@ -1057,9 +1096,9 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) if (!ret) return ret; - DEFINE_FIELD(unsigned long, func, "func", 0); - DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0); - DEFINE_FIELD(int, nargs, "nargs", 1); + DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); + DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); + DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); /* Set argument names as fields */ for (i = 0; i < tp->nr_args; i++) DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0); @@ -1108,15 +1147,16 @@ static int kprobe_event_show_format(struct ftrace_event_call *call, int ret, i; struct trace_probe *tp = (struct trace_probe *)call->data; - SHOW_FIELD(unsigned long, ip, "ip"); - SHOW_FIELD(int, nargs, "nargs"); + SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP); + SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); /* Show fields */ for (i = 0; i < tp->nr_args; i++) SHOW_FIELD(unsigned long, args[i], tp->args[i].name); trace_seq_puts(s, "\n"); - return __probe_event_show_format(s, tp, "(%lx)", "REC->ip"); + return __probe_event_show_format(s, tp, "(%lx)", + "REC->" FIELD_STRING_IP); } static int kretprobe_event_show_format(struct ftrace_event_call *call, @@ -1126,9 +1166,9 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, int ret, i; struct trace_probe *tp = (struct trace_probe *)call->data; - SHOW_FIELD(unsigned long, func, "func"); - SHOW_FIELD(unsigned long, ret_ip, "ret_ip"); - SHOW_FIELD(int, nargs, "nargs"); + SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC); + SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP); + SHOW_FIELD(int, nargs, FIELD_STRING_NARGS); /* Show fields */ for (i = 0; i < tp->nr_args; i++) @@ -1136,7 +1176,8 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, trace_seq_puts(s, "\n"); return __probe_event_show_format(s, tp, "(%lx <- %lx)", - "REC->func, REC->ret_ip"); + "REC->" FIELD_STRING_FUNC + ", REC->" FIELD_STRING_RETIP); } #ifdef CONFIG_EVENT_PROFILE From e93f4d8539d5e9dd59f4af9d8ef4e9b62cfa1f81 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:28:14 -0400 Subject: [PATCH 112/470] tracing/kprobes: Robustify fixed field names against variable field names conflicts Rename probe-common fixed field names to harder conflictable names, because current 'ip', 'func', and other probe field names are easily in conflict with user-specified variable names. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222814.1684.407.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_kprobe.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bb6cb2bc6fa..739f70e8e92 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -39,10 +39,10 @@ #define KPROBE_EVENT_SYSTEM "kprobes" /* Reserved field names */ -#define FIELD_STRING_IP "ip" -#define FIELD_STRING_NARGS "nargs" -#define FIELD_STRING_RETIP "ret_ip" -#define FIELD_STRING_FUNC "func" +#define FIELD_STRING_IP "__probe_ip" +#define FIELD_STRING_NARGS "__probe_nargs" +#define FIELD_STRING_RETIP "__probe_ret_ip" +#define FIELD_STRING_FUNC "__probe_func" const char *reserved_field_names[] = { "common_type", From 4ea42b181434bfc6a0a18d32214130a242d489bf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 8 Oct 2009 17:17:38 -0400 Subject: [PATCH 113/470] perf: Add perf probe subcommand, a kprobe-event setup helper Add perf probe subcommand that implements a kprobe-event setup helper to the perf command. This allows user to define kprobe events using C expressions (C line numbers, C function names, and C local variables). Usage ----- perf probe [] -P 'PROBEDEF' [-P 'PROBEDEF' ...] -k, --vmlinux vmlinux/module pathname -P, --probe probe point definition, where p: kprobe probe r: kretprobe probe GRP: Group name (optional) NAME: Event name FUNC: Function name OFFS: Offset from function entry (in byte) SRC: Source code path LINE: Line number ARG: Probe argument (local variable name or kprobe-tracer argument format is supported.) Changes in v4: - Add _GNU_SOURCE macro for strndup(). Changes in v3: - Remove -r option because perf always be used for online kernel. - Check malloc/calloc results. Changes in v2: - Check synthesized string length. - Rename perf kprobe to perf probe. - Use spaces for separator and update usage comment. - Check error paths in parse_probepoint(). - Check optimized-out variables. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091008211737.29299.14784.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Makefile | 10 + tools/perf/builtin-probe.c | 358 +++++++++++++++++ tools/perf/builtin.h | 1 + tools/perf/perf.c | 3 + tools/perf/util/probe-finder.c | 690 +++++++++++++++++++++++++++++++++ tools/perf/util/probe-finder.h | 68 ++++ 6 files changed, 1130 insertions(+) create mode 100644 tools/perf/builtin-probe.c create mode 100644 tools/perf/util/probe-finder.c create mode 100644 tools/perf/util/probe-finder.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5f1953b614..6dabcf1a4df 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -419,6 +419,16 @@ ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif +ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + msg := $(warning No libdwarf.h found, disables probe subcommand. Please install libdwarf-dev/libdwarf-devel); +else + EXTLIBS += -lelf -ldwarf + LIB_H += util/probe-finder.h + LIB_OBJS += util/probe-finder.o + BUILTIN_OBJS += builtin-probe.o + BASIC_CFLAGS += -DSUPPORT_DWARF +endif + ifdef NO_DEMANGLE BASIC_CFLAGS += -DNO_DEMANGLE else diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c new file mode 100644 index 00000000000..24b64b5cefc --- /dev/null +++ b/tools/perf/builtin-probe.c @@ -0,0 +1,358 @@ +/* + * builtin-probe.c + * + * Builtin probe command: Set up probe events by C expression + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef _GNU_SOURCE +#include "perf.h" +#include "builtin.h" +#include "util/util.h" +#include "util/parse-options.h" +#include "util/parse-events.h" /* For debugfs_path */ +#include "util/probe-finder.h" + +/* Default vmlinux search paths */ +#define NR_SEARCH_PATH 3 +const char *default_search_path[NR_SEARCH_PATH] = { +"/lib/modules/%s/build/vmlinux", /* Custom build kernel */ +"/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */ +"/boot/vmlinux-debug-%s", /* Ubuntu */ +}; + +#define MAX_PATH_LEN 256 +#define MAX_PROBES 128 + +/* Session management structure */ +static struct { + char *vmlinux; + char *release; + int nr_probe; + struct probe_point probes[MAX_PROBES]; + char *events[MAX_PROBES]; +} session; + +static void semantic_error(const char *msg) +{ + fprintf(stderr, "Semantic error: %s\n", msg); + exit(1); +} + +static void perror_exit(const char *msg) +{ + perror(msg); + exit(1); +} + +#define MAX_PROBE_ARGS 128 + +static int parse_probepoint(const struct option *opt __used, + const char *str, int unset __used) +{ + char *argv[MAX_PROBE_ARGS + 2]; /* Event + probe + args */ + int argc, i; + char *arg, *ptr; + struct probe_point *pp = &session.probes[session.nr_probe]; + char **event = &session.events[session.nr_probe]; + int retp = 0; + + if (!str) /* The end of probe points */ + return 0; + + debug("Probe-define(%d): %s\n", session.nr_probe, str); + if (++session.nr_probe == MAX_PROBES) + semantic_error("Too many probes"); + + /* Separate arguments, similar to argv_split */ + argc = 0; + do { + /* Skip separators */ + while (isspace(*str)) + str++; + + /* Add an argument */ + if (*str != '\0') { + const char *s = str; + + /* Skip the argument */ + while (!isspace(*str) && *str != '\0') + str++; + + /* Duplicate the argument */ + argv[argc] = strndup(s, str - s); + if (argv[argc] == NULL) + perror_exit("strndup"); + if (++argc == MAX_PROBE_ARGS) + semantic_error("Too many arguments"); + debug("argv[%d]=%s\n", argc, argv[argc - 1]); + } + } while (*str != '\0'); + if (argc < 2) + semantic_error("Need event-name and probe-point at least."); + + /* Parse the event name */ + if (argv[0][0] == 'r') + retp = 1; + else if (argv[0][0] != 'p') + semantic_error("You must specify 'p'(kprobe) or" + " 'r'(kretprobe) first."); + /* TODO: check event name */ + *event = argv[0]; + + /* Parse probe point */ + arg = argv[1]; + if (arg[0] == '@') { + /* Source Line */ + arg++; + ptr = strchr(arg, ':'); + if (!ptr || !isdigit(ptr[1])) + semantic_error("Line number is required."); + *ptr++ = '\0'; + if (strlen(arg) == 0) + semantic_error("No file name."); + pp->file = strdup(arg); + pp->line = atoi(ptr); + if (!pp->file || !pp->line) + semantic_error("Failed to parse line."); + debug("file:%s line:%d\n", pp->file, pp->line); + } else { + /* Function name */ + ptr = strchr(arg, '+'); + if (ptr) { + if (!isdigit(ptr[1])) + semantic_error("Offset is required."); + *ptr++ = '\0'; + pp->offset = atoi(ptr); + } else + ptr = arg; + ptr = strchr(ptr, '@'); + if (ptr) { + *ptr++ = '\0'; + pp->file = strdup(ptr); + } + pp->function = strdup(arg); + debug("symbol:%s file:%s offset:%d\n", + pp->function, pp->file, pp->offset); + } + free(argv[1]); + + /* Copy arguments */ + pp->nr_args = argc - 2; + if (pp->nr_args > 0) { + pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); + if (!pp->args) + perror_exit("malloc"); + memcpy(pp->args, &argv[2], sizeof(char *) * pp->nr_args); + } + + /* Ensure return probe has no C argument */ + if (retp) + for (i = 0; i < pp->nr_args; i++) + if (is_c_varname(pp->args[i])) + semantic_error("You can't specify local" + " variable for kretprobe"); + debug("%d arguments\n", pp->nr_args); + return 0; +} + +static int open_default_vmlinux(void) +{ + struct utsname uts; + char fname[MAX_PATH_LEN]; + int fd, ret, i; + + ret = uname(&uts); + if (ret) { + debug("uname() failed.\n"); + return -errno; + } + session.release = uts.release; + for (i = 0; i < NR_SEARCH_PATH; i++) { + ret = snprintf(fname, MAX_PATH_LEN, + default_search_path[i], session.release); + if (ret >= MAX_PATH_LEN || ret < 0) { + debug("Filename(%d,%s) is too long.\n", i, uts.release); + errno = E2BIG; + return -E2BIG; + } + debug("try to open %s\n", fname); + fd = open(fname, O_RDONLY); + if (fd >= 0) + break; + } + return fd; +} + +static const char * const probe_usage[] = { + "perf probe [] -P 'PROBEDEF' [-P 'PROBEDEF' ...]", + NULL +}; + +static const struct option options[] = { + OPT_STRING('k', "vmlinux", &session.vmlinux, "file", + "vmlinux/module pathname"), + OPT_CALLBACK('P', "probe", NULL, + "p|r:[GRP/]NAME FUNC[+OFFS][@SRC]|@SRC:LINE [ARG ...]", + "probe point definition, where\n" + "\t\tp:\tkprobe probe\n" + "\t\tr:\tkretprobe probe\n" + "\t\tGRP:\tGroup name (optional)\n" + "\t\tNAME:\tEvent name\n" + "\t\tFUNC:\tFunction name\n" + "\t\tOFFS:\tOffset from function entry (in byte)\n" + "\t\tSRC:\tSource code path\n" + "\t\tLINE:\tLine number\n" + "\t\tARG:\tProbe argument (local variable name or\n" + "\t\t\tkprobe-tracer argument format is supported.)\n", + parse_probepoint), + OPT_END() +}; + +static int write_new_event(int fd, const char *buf) +{ + int ret; + + printf("Adding new event: %s\n", buf); + ret = write(fd, buf, strlen(buf)); + if (ret <= 0) + perror("Error: Failed to create event"); + + return ret; +} + +#define MAX_CMDLEN 256 + +static int synthesize_probepoint(struct probe_point *pp) +{ + char *buf; + int i, len, ret; + pp->probes[0] = buf = (char *)calloc(MAX_CMDLEN, sizeof(char)); + if (!buf) + perror_exit("calloc"); + ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); + if (ret <= 0 || ret >= MAX_CMDLEN) + goto error; + len = ret; + + for (i = 0; i < pp->nr_args; i++) { + ret = snprintf(&buf[len], MAX_CMDLEN - len, " %s", + pp->args[i]); + if (ret <= 0 || ret >= MAX_CMDLEN - len) + goto error; + len += ret; + } + pp->found = 1; + return pp->found; +error: + free(pp->probes[0]); + if (ret > 0) + ret = -E2BIG; + return ret; +} + +int cmd_probe(int argc, const char **argv, const char *prefix __used) +{ + int i, j, fd, ret, need_dwarf = 0; + struct probe_point *pp; + char buf[MAX_CMDLEN]; + + argc = parse_options(argc, argv, options, probe_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc || session.nr_probe == 0) + usage_with_options(probe_usage, options); + + /* Synthesize return probes */ + for (j = 0; j < session.nr_probe; j++) { + if (session.events[j][0] != 'r') { + need_dwarf = 1; + continue; + } + ret = synthesize_probepoint(&session.probes[j]); + if (ret == -E2BIG) + semantic_error("probe point is too long."); + else if (ret < 0) { + perror("snprintf"); + return -1; + } + } + + if (!need_dwarf) + goto setup_probes; + + if (session.vmlinux) + fd = open(session.vmlinux, O_RDONLY); + else + fd = open_default_vmlinux(); + if (fd < 0) { + perror("vmlinux/module file open"); + return -1; + } + + /* Searching probe points */ + for (j = 0; j < session.nr_probe; j++) { + pp = &session.probes[j]; + if (pp->found) + continue; + + lseek(fd, SEEK_SET, 0); + ret = find_probepoint(fd, pp); + if (ret <= 0) { + fprintf(stderr, "Error: No probe point found.\n"); + return -1; + } + debug("probe event %s found\n", session.events[j]); + } + close(fd); + +setup_probes: + /* Settng up probe points */ + snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); + fd = open(buf, O_WRONLY, O_APPEND); + if (fd < 0) { + perror("kprobe_events open"); + return -1; + } + for (j = 0; j < session.nr_probe; j++) { + pp = &session.probes[j]; + if (pp->found == 1) { + snprintf(buf, MAX_CMDLEN, "%s %s\n", + session.events[j], pp->probes[0]); + write_new_event(fd, buf); + } else + for (i = 0; i < pp->found; i++) { + snprintf(buf, MAX_CMDLEN, "%s%d %s\n", + session.events[j], i, pp->probes[i]); + write_new_event(fd, buf); + } + } + close(fd); + return 0; +} + diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index e11d8d231c3..ad5f0f4c49e 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -25,5 +25,6 @@ extern int cmd_timechart(int argc, const char **argv, const char *prefix); extern int cmd_top(int argc, const char **argv, const char *prefix); extern int cmd_trace(int argc, const char **argv, const char *prefix); extern int cmd_version(int argc, const char **argv, const char *prefix); +extern int cmd_probe(int argc, const char **argv, const char *prefix); #endif diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 19fc7feb9d5..f598120c009 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -295,6 +295,9 @@ static void handle_internal_command(int argc, const char **argv) { "version", cmd_version, 0 }, { "trace", cmd_trace, 0 }, { "sched", cmd_sched, 0 }, +#ifdef SUPPORT_DWARF + { "probe", cmd_probe, 0 }, +#endif }; unsigned int i; static const char ext[] = STRIP_EXTENSION; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c new file mode 100644 index 00000000000..ec6f53f29e0 --- /dev/null +++ b/tools/perf/util/probe-finder.c @@ -0,0 +1,690 @@ +/* + * probe-finder.c : C expression to kprobe event converter + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "probe-finder.h" + + +/* Dwarf_Die Linkage to parent Die */ +struct die_link { + struct die_link *parent; /* Parent die */ + Dwarf_Die die; /* Current die */ +}; + +static Dwarf_Debug __dw_debug; +static Dwarf_Error __dw_error; + +static void msg_exit(int ret, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fprintf(stderr, "Error: "); + vfprintf(stderr, fmt, ap); + va_end(ap); + + fprintf(stderr, "\n"); + exit(ret); +} + + +/* + * Generic dwarf analysis helpers + */ + +#define X86_32_MAX_REGS 8 +const char *x86_32_regs_table[X86_32_MAX_REGS] = { + "%ax", + "%cx", + "%dx", + "%bx", + "$stack", /* Stack address instead of %sp */ + "%bp", + "%si", + "%di", +}; + +#define X86_64_MAX_REGS 16 +const char *x86_64_regs_table[X86_64_MAX_REGS] = { + "%ax", + "%dx", + "%cx", + "%bx", + "%si", + "%di", + "%bp", + "%sp", + "%r8", + "%r9", + "%r10", + "%r11", + "%r12", + "%r13", + "%r14", + "%r15", +}; + +/* TODO: switching by dwarf address size */ +#ifdef __x86_64__ +#define ARCH_MAX_REGS X86_64_MAX_REGS +#define arch_regs_table x86_64_regs_table +#else +#define ARCH_MAX_REGS X86_32_MAX_REGS +#define arch_regs_table x86_32_regs_table +#endif + +/* Return architecture dependent register string (for kprobe-tracer) */ +static const char *get_arch_regstr(unsigned int n) +{ + return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; +} + +/* + * Compare the tail of two strings. + * Return 0 if whole of either string is same as another's tail part. + */ +static int strtailcmp(const char *s1, const char *s2) +{ + int i1 = strlen(s1); + int i2 = strlen(s2); + while (--i1 > 0 && --i2 > 0) { + if (s1[i1] != s2[i2]) + return s1[i1] - s2[i2]; + } + return 0; +} + +/* Find the fileno of the target file. */ +static Dwarf_Unsigned die_get_fileno(Dwarf_Die cu_die, const char *fname) +{ + Dwarf_Signed cnt, i; + Dwarf_Unsigned found = 0; + char **srcs; + int ret; + + if (!fname) + return 0; + + ret = dwarf_srcfiles(cu_die, &srcs, &cnt, &__dw_error); + if (ret == DW_DLV_OK) { + for (i = 0; i < cnt && !found; i++) { + if (strtailcmp(srcs[i], fname) == 0) + found = i + 1; + dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); + } + for (; i < cnt; i++) + dwarf_dealloc(__dw_debug, srcs[i], DW_DLA_STRING); + dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST); + } + if (found) + debug("found fno: %d\n", (int)found); + return found; +} + +/* Compare diename and tname */ +static int die_compare_name(Dwarf_Die die, const char *tname) +{ + char *name; + int ret; + ret = dwarf_diename(die, &name, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) { + ret = strcmp(tname, name); + dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); + } else + ret = -1; + return ret; +} + +/* Check the address is in the subprogram(function). */ +static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr, + Dwarf_Signed *offs) +{ + Dwarf_Addr lopc, hipc; + int ret; + + /* TODO: check ranges */ + ret = dwarf_lowpc(sp_die, &lopc, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_NO_ENTRY) + return 0; + ret = dwarf_highpc(sp_die, &hipc, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + if (lopc <= addr && addr < hipc) { + *offs = addr - lopc; + return 1; + } else + return 0; +} + +/* Check the die is inlined function */ +static Dwarf_Bool die_inlined_subprogram(Dwarf_Die die) +{ + /* TODO: check strictly */ + Dwarf_Bool inl; + int ret; + + ret = dwarf_hasattr(die, DW_AT_inline, &inl, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + return inl; +} + +/* Get the offset of abstruct_origin */ +static Dwarf_Off die_get_abstract_origin(Dwarf_Die die) +{ + Dwarf_Attribute attr; + Dwarf_Off cu_offs; + int ret; + + ret = dwarf_attr(die, DW_AT_abstract_origin, &attr, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + ret = dwarf_formref(attr, &cu_offs, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); + return cu_offs; +} + +/* Get entry pc(or low pc, 1st entry of ranges) of the die */ +static Dwarf_Addr die_get_entrypc(Dwarf_Die die) +{ + Dwarf_Attribute attr; + Dwarf_Addr addr; + Dwarf_Off offs; + Dwarf_Ranges *ranges; + Dwarf_Signed cnt; + int ret; + + /* Try to get entry pc */ + ret = dwarf_attr(die, DW_AT_entry_pc, &attr, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) { + ret = dwarf_formaddr(attr, &addr, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); + return addr; + } + + /* Try to get low pc */ + ret = dwarf_lowpc(die, &addr, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) + return addr; + + /* Try to get ranges */ + ret = dwarf_attr(die, DW_AT_ranges, &attr, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + ret = dwarf_formref(attr, &offs, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + ret = dwarf_get_ranges(__dw_debug, offs, &ranges, &cnt, NULL, + &__dw_error); + ERR_IF(ret != DW_DLV_OK); + addr = ranges[0].dwr_addr1; + dwarf_ranges_dealloc(__dw_debug, ranges, cnt); + return addr; +} + +/* + * Search a Die from Die tree. + * Note: cur_link->die should be deallocated in this function. + */ +static int __search_die_tree(struct die_link *cur_link, + int (*die_cb)(struct die_link *, void *), + void *data) +{ + Dwarf_Die new_die; + struct die_link new_link; + int ret; + + if (!die_cb) + return 0; + + /* Check current die */ + while (!(ret = die_cb(cur_link, data))) { + /* Check child die */ + ret = dwarf_child(cur_link->die, &new_die, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) { + new_link.parent = cur_link; + new_link.die = new_die; + ret = __search_die_tree(&new_link, die_cb, data); + if (ret) + break; + } + + /* Move to next sibling */ + ret = dwarf_siblingof(__dw_debug, cur_link->die, &new_die, + &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE); + cur_link->die = new_die; + if (ret == DW_DLV_NO_ENTRY) + return 0; + } + dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE); + return ret; +} + +/* Search a die in its children's die tree */ +static int search_die_from_children(Dwarf_Die parent_die, + int (*die_cb)(struct die_link *, void *), + void *data) +{ + struct die_link new_link; + int ret; + + new_link.parent = NULL; + ret = dwarf_child(parent_die, &new_link.die, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) + return __search_die_tree(&new_link, die_cb, data); + else + return 0; +} + +/* Find a locdesc corresponding to the address */ +static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, + Dwarf_Addr addr) +{ + Dwarf_Signed lcnt; + Dwarf_Locdesc **llbuf; + int ret, i; + + ret = dwarf_loclist_n(attr, &llbuf, &lcnt, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + ret = DW_DLV_NO_ENTRY; + for (i = 0; i < lcnt; ++i) { + if (llbuf[i]->ld_lopc <= addr && + llbuf[i]->ld_hipc > addr) { + memcpy(desc, llbuf[i], sizeof(Dwarf_Locdesc)); + desc->ld_s = + malloc(sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); + ERR_IF(desc->ld_s == NULL); + memcpy(desc->ld_s, llbuf[i]->ld_s, + sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); + ret = DW_DLV_OK; + break; + } + dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK); + dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC); + } + /* Releasing loop */ + for (; i < lcnt; ++i) { + dwarf_dealloc(__dw_debug, llbuf[i]->ld_s, DW_DLA_LOC_BLOCK); + dwarf_dealloc(__dw_debug, llbuf[i], DW_DLA_LOCDESC); + } + dwarf_dealloc(__dw_debug, llbuf, DW_DLA_LIST); + return ret; +} + +/* + * Probe finder related functions + */ + +/* Show a location */ +static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) +{ + Dwarf_Small op; + Dwarf_Unsigned regn; + Dwarf_Signed offs; + int deref = 0, ret; + const char *regs; + + op = loc->lr_atom; + + /* If this is based on frame buffer, set the offset */ + if (op == DW_OP_fbreg) { + deref = 1; + offs = (Dwarf_Signed)loc->lr_number; + op = pf->fbloc.ld_s[0].lr_atom; + loc = &pf->fbloc.ld_s[0]; + } else + offs = 0; + + if (op >= DW_OP_breg0 && op <= DW_OP_breg31) { + regn = op - DW_OP_breg0; + offs += (Dwarf_Signed)loc->lr_number; + deref = 1; + } else if (op >= DW_OP_reg0 && op <= DW_OP_reg31) { + regn = op - DW_OP_reg0; + } else if (op == DW_OP_bregx) { + regn = loc->lr_number; + offs += (Dwarf_Signed)loc->lr_number2; + deref = 1; + } else if (op == DW_OP_regx) { + regn = loc->lr_number; + } else + msg_exit(-EINVAL, "Dwarf_OP %d is not supported.\n", op); + + regs = get_arch_regstr(regn); + if (!regs) + msg_exit(-EINVAL, "%lld exceeds max register number.\n", regn); + + if (deref) + ret = snprintf(pf->buf, pf->len, + " %s=%+lld(%s)", pf->var, offs, regs); + else + ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs); + ERR_IF(ret < 0); + ERR_IF(ret >= pf->len); +} + +/* Show a variables in kprobe event format */ +static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) +{ + Dwarf_Attribute attr; + Dwarf_Locdesc ld; + int ret; + + ret = dwarf_attr(vr_die, DW_AT_location, &attr, &__dw_error); + if (ret != DW_DLV_OK) + goto error; + ret = attr_get_locdesc(attr, &ld, (pf->addr - pf->cu_base)); + if (ret != DW_DLV_OK) + goto error; + /* TODO? */ + ERR_IF(ld.ld_cents != 1); + show_location(&ld.ld_s[0], pf); + free(ld.ld_s); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); + return ; +error: + msg_exit(-1, "Failed to find the location of %s at this address.\n" + " Perhaps, it was optimized out.\n", pf->var); +} + +static int variable_callback(struct die_link *dlink, void *data) +{ + struct probe_finder *pf = (struct probe_finder *)data; + Dwarf_Half tag; + int ret; + + ret = dwarf_tag(dlink->die, &tag, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if ((tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) && + (die_compare_name(dlink->die, pf->var) == 0)) { + show_variable(dlink->die, pf); + return 1; + } + /* TODO: Support struct members and arrays */ + return 0; +} + +/* Find a variable in a subprogram die */ +static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) +{ + int ret; + + if (!is_c_varname(pf->var)) { + /* Output raw parameters */ + ret = snprintf(pf->buf, pf->len, " %s", pf->var); + ERR_IF(ret < 0); + ERR_IF(ret >= pf->len); + return ; + } + + debug("Searching '%s' variable in context.\n", pf->var); + /* Search child die for local variables and parameters. */ + ret = search_die_from_children(sp_die, variable_callback, pf); + if (!ret) + msg_exit(-1, "Failed to find '%s' in this function.\n", + pf->var); +} + +/* Get a frame base on the address */ +static void get_current_frame_base(Dwarf_Die sp_die, struct probe_finder *pf) +{ + Dwarf_Attribute attr; + int ret; + + ret = dwarf_attr(sp_die, DW_AT_frame_base, &attr, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + ret = attr_get_locdesc(attr, &pf->fbloc, (pf->addr - pf->cu_base)); + ERR_IF(ret != DW_DLV_OK); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); +} + +static void free_current_frame_base(struct probe_finder *pf) +{ + free(pf->fbloc.ld_s); + memset(&pf->fbloc, 0, sizeof(Dwarf_Locdesc)); +} + +/* Show a probe point to output buffer */ +static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, + struct probe_finder *pf) +{ + struct probe_point *pp = pf->pp; + char *name; + char tmp[MAX_PROBE_BUFFER]; + int ret, i, len; + + /* Output name of probe point */ + ret = dwarf_diename(sp_die, &name, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_OK) { + ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name, + (unsigned int)offs); + dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); + } else { + /* This function has no name. */ + ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr); + } + ERR_IF(ret < 0); + ERR_IF(ret >= MAX_PROBE_BUFFER); + len = ret; + + /* Find each argument */ + get_current_frame_base(sp_die, pf); + for (i = 0; i < pp->nr_args; i++) { + pf->var = pp->args[i]; + pf->buf = &tmp[len]; + pf->len = MAX_PROBE_BUFFER - len; + find_variable(sp_die, pf); + len += strlen(pf->buf); + } + free_current_frame_base(pf); + + pp->probes[pp->found] = strdup(tmp); + pp->found++; +} + +static int probeaddr_callback(struct die_link *dlink, void *data) +{ + struct probe_finder *pf = (struct probe_finder *)data; + Dwarf_Half tag; + Dwarf_Signed offs; + int ret; + + ret = dwarf_tag(dlink->die, &tag, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + /* Check the address is in this subprogram */ + if (tag == DW_TAG_subprogram && + die_within_subprogram(dlink->die, pf->addr, &offs)) { + show_probepoint(dlink->die, offs, pf); + return 1; + } + return 0; +} + +/* Find probe point from its line number */ +static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) +{ + struct probe_point *pp = pf->pp; + Dwarf_Signed cnt, i; + Dwarf_Line *lines; + Dwarf_Unsigned lineno = 0; + Dwarf_Addr addr; + Dwarf_Unsigned fno; + int ret; + + ret = dwarf_srclines(cu_die, &lines, &cnt, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + + for (i = 0; i < cnt; i++) { + ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + if (fno != pf->fno) + continue; + + ret = dwarf_lineno(lines[i], &lineno, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + if (lineno != (Dwarf_Unsigned)pp->line) + continue; + + ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + debug("Probe point found: 0x%llx\n", addr); + pf->addr = addr; + /* Search a real subprogram including this line, */ + ret = search_die_from_children(cu_die, probeaddr_callback, pf); + if (ret == 0) + msg_exit(-1, + "Probe point is not found in subprograms.\n"); + /* Continuing, because target line might be inlined. */ + } + dwarf_srclines_dealloc(__dw_debug, lines, cnt); +} + +/* Search function from function name */ +static int probefunc_callback(struct die_link *dlink, void *data) +{ + struct probe_finder *pf = (struct probe_finder *)data; + struct probe_point *pp = pf->pp; + struct die_link *lk; + Dwarf_Signed offs; + Dwarf_Half tag; + int ret; + + ret = dwarf_tag(dlink->die, &tag, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (tag == DW_TAG_subprogram) { + if (die_compare_name(dlink->die, pp->function) == 0) { + if (die_inlined_subprogram(dlink->die)) { + /* Inlined function, save it. */ + ret = dwarf_die_CU_offset(dlink->die, + &pf->inl_offs, + &__dw_error); + ERR_IF(ret != DW_DLV_OK); + debug("inline definition offset %lld\n", + pf->inl_offs); + return 0; + } + /* Get probe address */ + pf->addr = die_get_entrypc(dlink->die); + pf->addr += pp->offset; + /* TODO: Check the address in this function */ + show_probepoint(dlink->die, pp->offset, pf); + /* Continue to search */ + } + } else if (tag == DW_TAG_inlined_subroutine && pf->inl_offs) { + if (die_get_abstract_origin(dlink->die) == pf->inl_offs) { + /* Get probe address */ + pf->addr = die_get_entrypc(dlink->die); + pf->addr += pp->offset; + debug("found inline addr: 0x%llx\n", pf->addr); + /* Inlined function. Get a real subprogram */ + for (lk = dlink->parent; lk != NULL; lk = lk->parent) { + tag = 0; + dwarf_tag(lk->die, &tag, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (tag == DW_TAG_subprogram && + !die_inlined_subprogram(lk->die)) + goto found; + } + msg_exit(-1, "Failed to find real subprogram.\n"); +found: + /* Get offset from subprogram */ + ret = die_within_subprogram(lk->die, pf->addr, &offs); + ERR_IF(!ret); + show_probepoint(lk->die, offs, pf); + /* Continue to search */ + } + } + return 0; +} + +static void find_by_func(Dwarf_Die cu_die, struct probe_finder *pf) +{ + search_die_from_children(cu_die, probefunc_callback, pf); +} + +/* Find a probe point */ +int find_probepoint(int fd, struct probe_point *pp) +{ + Dwarf_Half addr_size = 0; + Dwarf_Unsigned next_cuh = 0; + Dwarf_Die cu_die = 0; + int cu_number = 0, ret; + struct probe_finder pf = {.pp = pp}; + + ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); + if (ret != DW_DLV_OK) + msg_exit(-1, "Failed to call dwarf_init(). " + "Maybe, not a dwarf file?\n"); + + pp->found = 0; + while (++cu_number) { + /* Search CU (Compilation Unit) */ + ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, + &addr_size, &next_cuh, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_NO_ENTRY) + break; + + /* Get the DIE(Debugging Information Entry) of this CU */ + ret = dwarf_siblingof(__dw_debug, 0, &cu_die, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + + /* Check if target file is included. */ + if (pp->file) + pf.fno = die_get_fileno(cu_die, pp->file); + + if (!pp->file || pf.fno) { + /* Save CU base address (for frame_base) */ + ret = dwarf_lowpc(cu_die, &pf.cu_base, &__dw_error); + ERR_IF(ret == DW_DLV_ERROR); + if (ret == DW_DLV_NO_ENTRY) + pf.cu_base = 0; + if (pp->line) + find_by_line(cu_die, &pf); + if (pp->function) + find_by_func(cu_die, &pf); + } + dwarf_dealloc(__dw_debug, cu_die, DW_DLA_DIE); + } + ret = dwarf_finish(__dw_debug, &__dw_error); + ERR_IF(ret != DW_DLV_OK); + + return pp->found; +} + diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h new file mode 100644 index 00000000000..af920de6486 --- /dev/null +++ b/tools/perf/util/probe-finder.h @@ -0,0 +1,68 @@ +#ifndef _PROBE_FINDER_H +#define _PROBE_FINDER_H + +#define _stringify(n) #n +#define stringify(n) _stringify(n) + +#ifdef DEBUG +#define debug(fmt ...) \ + fprintf(stderr, "DBG(" __FILE__ ":" stringify(__LINE__) "): " fmt) +#else +#define debug(fmt ...) do {} while (0) +#endif + +#define ERR_IF(cnd) \ + do { if (cnd) { \ + fprintf(stderr, "Error (" __FILE__ ":" stringify(__LINE__) \ + "): " stringify(cnd) "\n"); \ + exit(1); \ + } } while (0) + +#define MAX_PATH_LEN 256 +#define MAX_PROBE_BUFFER 1024 +#define MAX_PROBES 128 + +static inline int is_c_varname(const char *name) +{ + /* TODO */ + return isalpha(name[0]) || name[0] == '_'; +} + +struct probe_point { + /* Inputs */ + char *file; /* File name */ + int line; /* Line number */ + + char *function; /* Function name */ + int offset; /* Offset bytes */ + + int nr_args; /* Number of arguments */ + char **args; /* Arguments */ + + /* Output */ + int found; /* Number of found probe points */ + char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ +}; + +extern int find_probepoint(int fd, struct probe_point *pp); + +#include +#include + +struct probe_finder { + struct probe_point *pp; /* Target probe point */ + + /* For function searching */ + Dwarf_Addr addr; /* Address */ + Dwarf_Unsigned fno; /* File number */ + Dwarf_Off inl_offs; /* Inline offset */ + + /* For variable searching */ + Dwarf_Addr cu_base; /* Current CU base address */ + Dwarf_Locdesc fbloc; /* Location of Current Frame Base */ + const char *var; /* Current variable name */ + char *buf; /* Current output buffer */ + int len; /* Length of output buffer */ +}; + +#endif /*_PROBE_FINDER_H */ From 23e8ec0d1c410f2f1d81050ee155db229abb1707 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 7 Oct 2009 18:28:30 -0400 Subject: [PATCH 114/470] perf probe: Add perf probe command support without libdwarf Enables 'perf probe' even if libdwarf is not installed. If libdwarf is not found, 'perf probe' just disables dwarf support. Users can use 'perf probe' to set up new events by using kprobe_events format. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Christoph Hellwig Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Frank Ch. Eigler LKML-Reference: <20091007222830.1684.25665.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- tools/perf/Makefile | 6 ++--- tools/perf/builtin-probe.c | 42 ++++++++++++++++++++++++++++------ tools/perf/perf.c | 2 -- tools/perf/util/probe-finder.h | 2 ++ 4 files changed, 40 insertions(+), 12 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 6dabcf1a4df..52b1f438e71 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -385,6 +385,7 @@ BUILTIN_OBJS += builtin-stat.o BUILTIN_OBJS += builtin-timechart.o BUILTIN_OBJS += builtin-top.o BUILTIN_OBJS += builtin-trace.o +BUILTIN_OBJS += builtin-probe.o PERFLIBS = $(LIB_FILE) @@ -420,13 +421,12 @@ ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * endif ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(warning No libdwarf.h found, disables probe subcommand. Please install libdwarf-dev/libdwarf-devel); + msg := $(warning No libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel); + BASIC_CFLAGS += -DNO_LIBDWARF else EXTLIBS += -lelf -ldwarf LIB_H += util/probe-finder.h LIB_OBJS += util/probe-finder.o - BUILTIN_OBJS += builtin-probe.o - BASIC_CFLAGS += -DSUPPORT_DWARF endif ifdef NO_DEMANGLE diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 24b64b5cefc..73c883b715c 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -54,6 +54,7 @@ const char *default_search_path[NR_SEARCH_PATH] = { static struct { char *vmlinux; char *release; + int need_dwarf; int nr_probe; struct probe_point probes[MAX_PROBES]; char *events[MAX_PROBES]; @@ -162,6 +163,8 @@ static int parse_probepoint(const struct option *opt __used, pp->function, pp->file, pp->offset); } free(argv[1]); + if (pp->file) + session.need_dwarf = 1; /* Copy arguments */ pp->nr_args = argc - 2; @@ -173,15 +176,19 @@ static int parse_probepoint(const struct option *opt __used, } /* Ensure return probe has no C argument */ - if (retp) - for (i = 0; i < pp->nr_args; i++) - if (is_c_varname(pp->args[i])) + for (i = 0; i < pp->nr_args; i++) + if (is_c_varname(pp->args[i])) { + if (retp) semantic_error("You can't specify local" " variable for kretprobe"); + session.need_dwarf = 1; + } + debug("%d arguments\n", pp->nr_args); return 0; } +#ifndef NO_LIBDWARF static int open_default_vmlinux(void) { struct utsname uts; @@ -209,6 +216,7 @@ static int open_default_vmlinux(void) } return fd; } +#endif static const char * const probe_usage[] = { "perf probe [] -P 'PROBEDEF' [-P 'PROBEDEF' ...]", @@ -216,10 +224,16 @@ static const char * const probe_usage[] = { }; static const struct option options[] = { +#ifndef NO_LIBDWARF OPT_STRING('k', "vmlinux", &session.vmlinux, "file", "vmlinux/module pathname"), +#endif OPT_CALLBACK('P', "probe", NULL, +#ifdef NO_LIBDWARF + "p|r:[GRP/]NAME FUNC[+OFFS] [ARG ...]", +#else "p|r:[GRP/]NAME FUNC[+OFFS][@SRC]|@SRC:LINE [ARG ...]", +#endif "probe point definition, where\n" "\t\tp:\tkprobe probe\n" "\t\tr:\tkretprobe probe\n" @@ -227,9 +241,13 @@ static const struct option options[] = { "\t\tNAME:\tEvent name\n" "\t\tFUNC:\tFunction name\n" "\t\tOFFS:\tOffset from function entry (in byte)\n" +#ifdef NO_LIBDWARF + "\t\tARG:\tProbe argument (only \n" +#else "\t\tSRC:\tSource code path\n" "\t\tLINE:\tLine number\n" "\t\tARG:\tProbe argument (local variable name or\n" +#endif "\t\t\tkprobe-tracer argument format is supported.)\n", parse_probepoint), OPT_END() @@ -279,7 +297,7 @@ error: int cmd_probe(int argc, const char **argv, const char *prefix __used) { - int i, j, fd, ret, need_dwarf = 0; + int i, j, fd, ret; struct probe_point *pp; char buf[MAX_CMDLEN]; @@ -288,12 +306,19 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (argc || session.nr_probe == 0) usage_with_options(probe_usage, options); - /* Synthesize return probes */ +#ifdef NO_LIBDWARF + if (session.need_dwarf) + semantic_error("Dwarf-analysis is not supported"); +#endif + + /* Synthesize probes without dwarf */ for (j = 0; j < session.nr_probe; j++) { +#ifndef NO_LIBDWARF if (session.events[j][0] != 'r') { - need_dwarf = 1; + session.need_dwarf = 1; continue; } +#endif ret = synthesize_probepoint(&session.probes[j]); if (ret == -E2BIG) semantic_error("probe point is too long."); @@ -303,7 +328,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) } } - if (!need_dwarf) +#ifndef NO_LIBDWARF + if (!session.need_dwarf) goto setup_probes; if (session.vmlinux) @@ -332,6 +358,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) close(fd); setup_probes: +#endif /* !NO_LIBDWARF */ + /* Settng up probe points */ snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); fd = open(buf, O_WRONLY, O_APPEND); diff --git a/tools/perf/perf.c b/tools/perf/perf.c index f598120c009..c570d177d5c 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -295,9 +295,7 @@ static void handle_internal_command(int argc, const char **argv) { "version", cmd_version, 0 }, { "trace", cmd_trace, 0 }, { "sched", cmd_sched, 0 }, -#ifdef SUPPORT_DWARF { "probe", cmd_probe, 0 }, -#endif }; unsigned int i; static const char ext[] = STRIP_EXTENSION; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index af920de6486..306810c32f6 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -44,6 +44,7 @@ struct probe_point { char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ }; +#ifndef NO_LIBDWARF extern int find_probepoint(int fd, struct probe_point *pp); #include @@ -64,5 +65,6 @@ struct probe_finder { char *buf; /* Current output buffer */ int len; /* Length of output buffer */ }; +#endif /* NO_LIBDWARF */ #endif /*_PROBE_FINDER_H */ From 7a693d3f0d10f978ebdf3082c41404ab97106567 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Oct 2009 08:16:30 +0200 Subject: [PATCH 115/470] perf_events, x86: Fix event constraints code There was namespace overlap due to a rename i did - this caused the following build warning, reported by Stephen Rothwell against linux-next x86_64 allmodconfig: arch/x86/kernel/cpu/perf_event.c: In function 'intel_get_event_idx': arch/x86/kernel/cpu/perf_event.c:1445: warning: 'event_constraint' is used uninitialized in this function This is a real bug not just a warning: fix it by renaming the global event-constraints table pointer to 'event_constraints'. Reported-by: Stephen Rothwell Cc: Stephane Eranian Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091013144223.369d616d.sfr@canb.auug.org.au> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9961d845719..2e20bca3cca 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -124,7 +124,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -static const struct event_constraint *event_constraint; +static const struct event_constraint *event_constraints; /* * Not sure about some of these @@ -1442,12 +1442,12 @@ intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) const struct event_constraint *event_constraint; int i, code; - if (!event_constraint) + if (!event_constraints) goto skip; code = hwc->config & CORE_EVNTSEL_EVENT_MASK; - for_each_event_constraint(event_constraint, event_constraint) { + for_each_event_constraint(event_constraint, event_constraints) { if (code == event_constraint->code) { for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) { if (!test_and_set_bit(i, cpuc->used_mask)) @@ -2047,12 +2047,12 @@ static int p6_pmu_init(void) case 7: case 8: case 11: /* Pentium III */ - event_constraint = intel_p6_event_constraints; + event_constraints = intel_p6_event_constraints; break; case 9: case 13: /* Pentium M */ - event_constraint = intel_p6_event_constraints; + event_constraints = intel_p6_event_constraints; break; default: pr_cont("unsupported p6 CPU model %d ", @@ -2124,14 +2124,14 @@ static int intel_pmu_init(void) sizeof(hw_cache_event_ids)); pr_cont("Core2 events, "); - event_constraint = intel_core_event_constraints; + event_constraints = intel_core_event_constraints; break; default: case 26: memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - event_constraint = intel_nehalem_event_constraints; + event_constraints = intel_nehalem_event_constraints; pr_cont("Nehalem/Corei7 events, "); break; case 28: From 8968f9d3dc23d9a1821d97c6f11e72a59382e56c Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Tue, 13 Oct 2009 16:19:41 +0900 Subject: [PATCH 116/470] perf_event, x86, mce: Use TRACE_EVENT() for MCE logging This approach is the first baby step towards solving many of the structural problems the x86 MCE logging code is having today: - It has a private ring-buffer implementation that has a number of limitations and has been historically fragile and buggy. - It is using a quirky /dev/mcelog ioctl driven ABI that is MCE specific. /dev/mcelog is not part of any larger logging framework and hence has remained on the fringes for many years. - The MCE logging code is still very unclean partly due to its ABI limitations. Fields are being reused for multiple purposes, and the whole message structure is limited and x86 specific to begin with. All in one, the x86 tree would like to move away from this private implementation of an event logging facility to a broader framework. By using perf events we gain the following advantages: - Multiple user-space agents can access MCE events. We can have an mcelog daemon running but also a system-wide tracer capturing important events in flight-recorder mode. - Sampling support: the kernel and the user-space call-chain of MCE events can be stored and analyzed as well. This way actual patterns of bad behavior can be matched to precisely what kind of activity happened in the kernel (and/or in the app) around that moment in time. - Coupling with other hardware and software events: the PMU can track a number of other anomalies - monitoring software might chose to monitor those plus the MCE events as well - in one coherent stream of events. - Discovery of MCE sources - tracepoints are enumerated and tools can act upon the existence (or non-existence) of various channels of MCE information. - Filtering support: we just subscribe to and act upon the events we are interested in. Then even on a per event source basis there's in-kernel filter expressions available that can restrict the amount of data that hits the event channel. - Arbitrary deep per cpu buffering of events - we can buffer 32 entries or we can buffer as much as we want, as long as we have the RAM. - An NMI-safe ring-buffer implementation - mappable to user-space. - Built-in support for timestamping of events, PID markers, CPU markers, etc. - A rich ABI accessible over system call interface. Per cpu, per task and per workload monitoring of MCE events can be done this way. The ABI itself has a nice, meaningful structure. - Extensible ABI: new fields can be added without breaking tooling. New tracepoints can be added as the hardware side evolves. There's various parsers that can be used. - Lots of scheduling/buffering/batching modes of operandi for MCE events. poll() support. mmap() support. read() support. You name it. - Rich tooling support: even without any MCE specific extensions added the 'perf' tool today offers various views of MCE data: perf report, perf stat, perf trace can all be used to view logged MCE events and perhaps correlate them to certain user-space usage patterns. But it can be used directly as well, for user-space agents and policy action in mcelog, etc. With this we hope to achieve significant code cleanup and feature improvements in the MCE code, and we hope to be able to drop the /dev/mcelog facility in the end. This patch is just a plain dumb dump of mce_log() records to the tracepoints / perf events framework - a first proof of concept step. Signed-off-by: Hidetoshi Seto Cc: Huang Ying Cc: Andi Kleen LKML-Reference: <4AD42A0D.7050104@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 6 +++ include/trace/events/mce.h | 69 ++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 include/trace/events/mce.h diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b1598a9436d..39caea3d8bc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -46,6 +46,9 @@ #include "mce-internal.h" +#define CREATE_TRACE_POINTS +#include + int mce_disabled __read_mostly; #define MISC_MCELOG_MINOR 227 @@ -141,6 +144,9 @@ void mce_log(struct mce *mce) { unsigned next, entry; + /* Emit the trace record: */ + trace_mce_record(mce); + mce->finished = 0; wmb(); for (;;) { diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h new file mode 100644 index 00000000000..7eee77895cb --- /dev/null +++ b/include/trace/events/mce.h @@ -0,0 +1,69 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mce + +#if !defined(_TRACE_MCE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MCE_H + +#include +#include +#include + +TRACE_EVENT(mce_record, + + TP_PROTO(struct mce *m), + + TP_ARGS(m), + + TP_STRUCT__entry( + __field( u64, mcgcap ) + __field( u64, mcgstatus ) + __field( u8, bank ) + __field( u64, status ) + __field( u64, addr ) + __field( u64, misc ) + __field( u64, ip ) + __field( u8, cs ) + __field( u64, tsc ) + __field( u64, walltime ) + __field( u32, cpu ) + __field( u32, cpuid ) + __field( u32, apicid ) + __field( u32, socketid ) + __field( u8, cpuvendor ) + ), + + TP_fast_assign( + __entry->mcgcap = m->mcgcap; + __entry->mcgstatus = m->mcgstatus; + __entry->bank = m->bank; + __entry->status = m->status; + __entry->addr = m->addr; + __entry->misc = m->misc; + __entry->ip = m->ip; + __entry->cs = m->cs; + __entry->tsc = m->tsc; + __entry->walltime = m->time; + __entry->cpu = m->extcpu; + __entry->cpuid = m->cpuid; + __entry->apicid = m->apicid; + __entry->socketid = m->socketid; + __entry->cpuvendor = m->cpuvendor; + ), + + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", + __entry->cpu, + __entry->mcgcap, __entry->mcgstatus, + __entry->bank, __entry->status, + __entry->addr, __entry->misc, + __entry->cs, __entry->ip, + __entry->tsc, + __entry->cpuvendor, __entry->cpuid, + __entry->walltime, + __entry->socketid, + __entry->apicid) +); + +#endif /* _TRACE_MCE_H */ + +/* This part must be outside protection */ +#include From cfed95a693e1ea5d08b9c9019bc30e448437ee2f Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Tue, 13 Oct 2009 10:18:16 +0200 Subject: [PATCH 117/470] perf tools: Do not manually count string lengths Use strlen & macros instead of manually counting string lengths as this is error prone and may lend to bugs. Signed-off-by: Vincent Legoll Cc: Linus Torvalds LKML-Reference: <4727185d0910130118m5387058dndb02ac9b384af9f0@mail.gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 16 ++++++++-------- tools/perf/util/cache.h | 5 +++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 19fc7feb9d5..624e62d9d1e 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -89,8 +89,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) /* * Check remaining flags. */ - if (!prefixcmp(cmd, "--exec-path")) { - cmd += 11; + if (!prefixcmp(cmd, CMD_EXEC_PATH)) { + cmd += strlen(CMD_EXEC_PATH); if (*cmd == '=') perf_set_argv_exec_path(cmd + 1); else { @@ -117,8 +117,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) (*argv)++; (*argc)--; handled++; - } else if (!prefixcmp(cmd, "--perf-dir=")) { - setenv(PERF_DIR_ENVIRONMENT, cmd + 10, 1); + } else if (!prefixcmp(cmd, CMD_PERF_DIR)) { + setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--work-tree")) { @@ -131,8 +131,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--work-tree=")) { - setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + 12, 1); + } else if (!prefixcmp(cmd, CMD_WORK_TREE)) { + setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1); if (envchanged) *envchanged = 1; } else if (!strcmp(cmd, "--debugfs-dir")) { @@ -146,8 +146,8 @@ static int handle_options(const char*** argv, int* argc, int* envchanged) *envchanged = 1; (*argv)++; (*argc)--; - } else if (!prefixcmp(cmd, "--debugfs-dir=")) { - strncpy(debugfs_mntpt, cmd + 14, MAXPATHLEN); + } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) { + strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN); debugfs_mntpt[MAXPATHLEN - 1] = '\0'; if (envchanged) *envchanged = 1; diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index f26172c0c91..918eb376abe 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -5,6 +5,11 @@ #include "strbuf.h" #include "../perf.h" +#define CMD_EXEC_PATH "--exec-path" +#define CMD_PERF_DIR "--perf-dir=" +#define CMD_WORK_TREE "--work-tree=" +#define CMD_DEBUGFS_DIR "--debugfs-dir=" + #define PERF_DIR_ENVIRONMENT "PERF_DIR" #define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" #define DEFAULT_PERF_DIR_ENVIRONMENT ".perf" From f4f0b418188cc7995375acbb54e87c80f21861bd Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Tue, 13 Oct 2009 14:57:20 +0200 Subject: [PATCH 118/470] perf tools: Remove expensive old debug code from perf top Calling gettimeofday() at high frequency is painful for handicapped boxen. The spot calling gettimeofday() is old unneeded debug code, so remove it. Reported-by: Ingo Molnar Signed-off-by: Mike Galbraith Cc: Peter Zijlstra Cc: Peter Zijlstra LKML-Reference: <1255438640.7173.1.camel@marge.simson.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index c0f69e80b2c..2d8806bac25 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -870,8 +870,6 @@ static unsigned int mmap_read_head(struct mmap_data *md) return head; } -struct timeval last_read, this_read; - static void mmap_read_counter(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -879,8 +877,6 @@ static void mmap_read_counter(struct mmap_data *md) unsigned char *data = md->base + page_size; int diff; - gettimeofday(&this_read, NULL); - /* * If we're further behind than half the buffer, there's a chance * the writer will bite our tail and mess up the samples under us. @@ -891,14 +887,7 @@ static void mmap_read_counter(struct mmap_data *md) */ diff = head - old; if (diff > md->mask / 2 || diff < 0) { - struct timeval iv; - unsigned long msecs; - - timersub(&this_read, &last_read, &iv); - msecs = iv.tv_sec*1000 + iv.tv_usec/1000; - - fprintf(stderr, "WARNING: failed to keep up with mmap data." - " Last read %lu msecs ago.\n", msecs); + fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); /* * head points to a known good entry, start there. @@ -906,8 +895,6 @@ static void mmap_read_counter(struct mmap_data *md) old = head; } - last_read = this_read; - for (; old != head;) { event_t *event = (event_t *)&data[old & md->mask]; From d5b889f2ecec7849e851ddd31c34bdfb3482b5de Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 13 Oct 2009 11:16:29 -0300 Subject: [PATCH 119/470] perf tools: Move threads & last_match to threads.c This was just being copy'n'pasted all over. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <20091013141629.GD21809@ghostprotocols.net> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 25 +++++++------------------ tools/perf/builtin-report.c | 26 +++++++------------------- tools/perf/builtin-sched.c | 30 +++++++++++------------------- tools/perf/builtin-trace.c | 13 +++---------- tools/perf/util/thread.c | 27 ++++++++++++++------------- tools/perf/util/thread.h | 8 +++----- 6 files changed, 45 insertions(+), 84 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 8c84320ecb0..3fe0de03004 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,10 +37,6 @@ static int print_line; static unsigned long page_size; static unsigned long mmap_window = 32; -static struct rb_root threads; -static struct thread *last_match; - - struct sym_ext { struct rb_node node; double percent; @@ -96,12 +92,10 @@ static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; - struct thread *thread; u64 ip = event->ip.ip; struct map *map = NULL; struct symbol *sym = NULL; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", (void *)(offset + head), @@ -166,10 +160,8 @@ got_map: static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, NULL, 0); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -194,9 +186,8 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; + struct thread *thread = threads__findnew(event->comm.pid); - thread = threads__findnew(event->comm.pid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -215,11 +206,9 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_fork_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", (void *)(offset + head), (void *)(long)(event->header.size), @@ -558,7 +547,7 @@ static int __cmd_annotate(void) uint32_t size; char *buf; - register_idle_thread(&threads, &last_match); + register_idle_thread(); input = open(input_name, O_RDONLY); if (input < 0) { @@ -659,7 +648,7 @@ more: return 0; if (verbose > 3) - threads__fprintf(stdout, &threads); + threads__fprintf(stdout); if (verbose > 2) dsos__fprintf(stdout); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f57a23b19f3..015c7974596 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -55,9 +55,6 @@ static char callchain_default_opt[] = "fractal,0.5"; static char *cwd; static int cwdlen; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -593,15 +590,13 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) { char level; struct symbol *sym = NULL; - struct thread *thread; u64 ip = event->ip.ip; u64 period = 1; struct map *map = NULL; void *more_data = event->ip.__more_data; struct ip_callchain *chain = NULL; int cpumode; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_PERIOD) { period = *(u64 *)more_data; @@ -685,10 +680,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; struct map *map = map__new(&event->mmap, cwd, cwdlen); - - thread = threads__findnew(event->mmap.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", (void *)(offset + head), @@ -714,9 +707,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -736,11 +727,8 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_task_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - struct thread *parent; - - thread = threads__findnew(event->fork.pid, &threads, &last_match); - parent = threads__findnew(event->fork.ppid, &threads, &last_match); + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", (void *)(offset + head), @@ -857,7 +845,7 @@ static int __cmd_report(void) struct thread *idle; int ret; - idle = register_idle_thread(&threads, &last_match); + idle = register_idle_thread(); thread__comm_adjust(idle); if (show_threads) @@ -881,7 +869,7 @@ static int __cmd_report(void) return 0; if (verbose > 3) - threads__fprintf(stdout, &threads); + threads__fprintf(stdout); if (verbose > 2) dsos__fprintf(stdout); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 387a4423436..73bdad02973 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -24,9 +24,6 @@ static char const *input_name = "perf.data"; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -641,9 +638,7 @@ static void test_calibrations(void) static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.tid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.tid); dump_printf("%p [%p]: perf_event_comm: %s:%d\n", (void *)(offset + head), @@ -1086,8 +1081,8 @@ latency_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { @@ -1120,13 +1115,10 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, u64 timestamp, struct thread *this_thread __used) { - struct work_atoms *atoms; - struct thread *thread; + struct thread *thread = threads__findnew(runtime_event->pid); + struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); BUG_ON(cpu >= MAX_CPUS || cpu < 0); - - thread = threads__findnew(runtime_event->pid, &threads, &last_match); - atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); if (!atoms) { thread_atoms_insert(thread); atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); @@ -1153,7 +1145,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, if (!wakeup_event->success) return; - wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); + wakee = threads__findnew(wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { thread_atoms_insert(wakee); @@ -1202,7 +1194,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (profile_cpu == -1) return; - migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match); + migrant = threads__findnew(migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { thread_atoms_insert(migrant); @@ -1458,8 +1450,8 @@ map_switch_event(struct trace_switch_event *switch_event, die("hm, delta: %Ld < 0 ?\n", delta); - sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); - sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); + sched_out = threads__findnew(switch_event->prev_pid); + sched_in = threads__findnew(switch_event->next_pid); curr_thread[this_cpu] = sched_in; @@ -1649,7 +1641,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (!(sample_type & PERF_SAMPLE_RAW)) return 0; - thread = threads__findnew(event->ip.pid, &threads, &last_match); + thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -1725,7 +1717,7 @@ static struct perf_file_handler file_handler = { static int read_events(void) { - register_idle_thread(&threads, &last_match); + register_idle_thread(); register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index fb3f3c22021..ccf867dbab5 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -19,9 +19,6 @@ static char const *input_name = "perf.data"; static unsigned long total = 0; static unsigned long total_comm = 0; -static struct rb_root threads; -static struct thread *last_match; - static struct perf_header *header; static u64 sample_type; @@ -32,9 +29,7 @@ static int cwdlen; static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; - - thread = threads__findnew(event->comm.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->comm.pid); dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", (void *)(offset + head), @@ -54,14 +49,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) static int process_sample_event(event_t *event, unsigned long offset, unsigned long head) { - struct thread *thread; u64 ip = event->ip.ip; u64 timestamp = -1; u32 cpu = -1; u64 period = 1; void *more_data = event->ip.__more_data; - - thread = threads__findnew(event->ip.pid, &threads, &last_match); + struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_TIME) { timestamp = *(u64 *)more_data; @@ -135,7 +128,7 @@ static struct perf_file_handler file_handler = { static int __cmd_trace(void) { - register_idle_thread(&threads, &last_match); + register_idle_thread(); register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3b56aebb1f4..f53fad7c0a8 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -6,6 +6,9 @@ #include "util.h" #include "debug.h" +static struct rb_root threads; +static struct thread *last_match; + static struct thread *thread__new(pid_t pid) { struct thread *self = calloc(1, sizeof(*self)); @@ -50,10 +53,9 @@ static size_t thread__fprintf(struct thread *self, FILE *fp) return ret; } -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) +struct thread *threads__findnew(pid_t pid) { - struct rb_node **p = &threads->rb_node; + struct rb_node **p = &threads.rb_node; struct rb_node *parent = NULL; struct thread *th; @@ -62,15 +64,15 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) * so most of the time we dont have to look up * the full rbtree: */ - if (*last_match && (*last_match)->pid == pid) - return *last_match; + if (last_match && last_match->pid == pid) + return last_match; while (*p != NULL) { parent = *p; th = rb_entry(parent, struct thread, rb_node); if (th->pid == pid) { - *last_match = th; + last_match = th; return th; } @@ -83,17 +85,16 @@ threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match) th = thread__new(pid); if (th != NULL) { rb_link_node(&th->rb_node, parent, p); - rb_insert_color(&th->rb_node, threads); - *last_match = th; + rb_insert_color(&th->rb_node, &threads); + last_match = th; } return th; } -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match) +struct thread *register_idle_thread(void) { - struct thread *thread = threads__findnew(0, threads, last_match); + struct thread *thread = threads__findnew(0); if (!thread || thread__set_comm(thread, "swapper")) { fprintf(stderr, "problem inserting idle task.\n"); @@ -197,12 +198,12 @@ int thread__fork(struct thread *self, struct thread *parent) return 0; } -size_t threads__fprintf(FILE *fp, struct rb_root *threads) +size_t threads__fprintf(FILE *fp) { size_t ret = 0; struct rb_node *nd; - for (nd = rb_first(threads); nd; nd = rb_next(nd)) { + for (nd = rb_first(&threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 845d9b62f96..1abef3b7455 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -15,13 +15,11 @@ struct thread { }; int thread__set_comm(struct thread *self, const char *comm); -struct thread * -threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match); -struct thread * -register_idle_thread(struct rb_root *threads, struct thread **last_match); +struct thread *threads__findnew(pid_t pid); +struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); -size_t threads__fprintf(FILE *fp, struct rb_root *threads); +size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); From 924a79af2cdee26a034b9bdce8c9c76995b5c901 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:32 -0400 Subject: [PATCH 120/470] perf tools: Handle print concatenations in event format file kmem_alloc ftrace event format had a string that was broken up by two tokens. "string 1" "string 2". This patch lets the parser be able to handle the concatenation. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.253818714@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index eef60df7a5b..a05c7144ade 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1734,6 +1734,7 @@ static int event_read_print(struct event *event) if (read_expect_type(EVENT_DQUOTE, &token) < 0) goto fail; + concat: event->print_fmt.format = token; event->print_fmt.args = NULL; @@ -1743,6 +1744,21 @@ static int event_read_print(struct event *event) if (type == EVENT_NONE) return 0; + /* Handle concatination of print lines */ + if (type == EVENT_DQUOTE) { + char *cat; + + cat = malloc_or_die(strlen(event->print_fmt.format) + + strlen(token) + 1); + strcpy(cat, event->print_fmt.format); + strcat(cat, token); + free_token(token); + free_token(event->print_fmt.format); + event->print_fmt.format = NULL; + token = cat; + goto concat; + } + if (test_type_token(type, token, EVENT_DELIM, (char *)",")) goto fail; From 91ff2bc191827f0d3f5ad0a433ff7df7d2dd9aee Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:33 -0400 Subject: [PATCH 121/470] perf tools: Fix backslash processing on trace print formats The handling of backslashes was broken. It would stop parsing when encountering one. Also, '\n', '\t', '\r' and '\\' were not converted. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.521974680@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index a05c7144ade..2b75ec2f57e 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -522,7 +522,10 @@ static enum event_type __read_token(char **tok) last_ch = ch; ch = __read_char(); buf[i++] = ch; - } while (ch != quote_ch && last_ch != '\\'); + /* the '\' '\' will cancel itself */ + if (ch == '\\' && last_ch == '\\') + last_ch = 0; + } while (ch != quote_ch || last_ch == '\\'); /* remove the last quote */ i--; goto out; @@ -2325,7 +2328,27 @@ static void pretty_print(void *data, int size, struct event *event) for (; *ptr; ptr++) { ls = 0; - if (*ptr == '%') { + if (*ptr == '\\') { + ptr++; + switch (*ptr) { + case 'n': + printf("\n"); + break; + case 't': + printf("\t"); + break; + case 'r': + printf("\r"); + break; + case '\\': + printf("\\"); + break; + default: + printf("%c", *ptr); + break; + } + + } else if (*ptr == '%') { saveptr = ptr; show_func = 0; cont_process: From 298ebc3ef2a6c569b3eb51651f04e26aecbf8a1d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:34 -0400 Subject: [PATCH 122/470] perf tools: Handle trace parsing of < and > The code to handle the '<' and '>' ops was all in place, but they were not in the switch statement to consider them as valid ops. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194357.807434040@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2b75ec2f57e..3e643f5da20 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1170,6 +1170,8 @@ process_op(struct event *event, struct print_arg *arg, char **tok) strcmp(token, "*") == 0 || strcmp(token, "^") == 0 || strcmp(token, "/") == 0 || + strcmp(token, "<") == 0 || + strcmp(token, ">") == 0 || strcmp(token, "==") == 0 || strcmp(token, "!=") == 0) { From 0959b8d65ce26131c2d5ccfa518a7b76529280fa Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:35 -0400 Subject: [PATCH 123/470] perf tools: Handle arrays in print fields for trace parsing The array used by the ftrace stack events (caller[x]) causes issues with the parser. This adds code to handle the case, but it also assumes that the array is of type long. Note, this is a special case used (currently) only by the ftrace user and kernel stack records. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.124833639@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 62 +++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 3e643f5da20..7aeedb09ea7 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1046,6 +1046,35 @@ out_free: return EVENT_ERROR; } +static enum event_type +process_array(struct event *event, struct print_arg *top, char **tok) +{ + struct print_arg *arg; + enum event_type type; + char *token = NULL; + + arg = malloc_or_die(sizeof(*arg)); + memset(arg, 0, sizeof(*arg)); + + *tok = NULL; + type = process_arg(event, arg, &token); + if (test_type_token(type, token, EVENT_OP, (char *)"]")) + goto out_free; + + top->op.right = arg; + + free_token(token); + type = read_token_item(&token); + *tok = token; + + return type; + +out_free: + free_token(*tok); + free_arg(arg); + return EVENT_ERROR; +} + static int get_op_prio(char *op) { if (!op[1]) { @@ -1192,6 +1221,18 @@ process_op(struct event *event, struct print_arg *arg, char **tok) arg->op.right = right; + } else if (strcmp(token, "[") == 0) { + + left = malloc_or_die(sizeof(*left)); + *left = *arg; + + arg->type = PRINT_OP; + arg->op.op = token; + arg->op.left = left; + + arg->op.prio = 0; + type = process_array(event, arg, tok); + } else { die("unknown op '%s'", token); /* the arg is now the left side */ @@ -1931,6 +1972,7 @@ static unsigned long long eval_num_arg(void *data, int size, { unsigned long long val = 0; unsigned long long left, right; + struct print_arg *larg; switch (arg->type) { case PRINT_NULL: @@ -1957,6 +1999,26 @@ static unsigned long long eval_num_arg(void *data, int size, return 0; break; case PRINT_OP: + if (strcmp(arg->op.op, "[") == 0) { + /* + * Arrays are special, since we don't want + * to read the arg as is. + */ + if (arg->op.left->type != PRINT_FIELD) + goto default_op; /* oops, all bets off */ + larg = arg->op.left; + if (!larg->field.field) { + larg->field.field = + find_any_field(event, larg->field.name); + if (!larg->field.field) + die("field %s not found", larg->field.name); + } + right = eval_num_arg(data, size, event, arg->op.right); + val = read_size(data + larg->field.field->offset + + right * long_size, long_size); + break; + } + default_op: left = eval_num_arg(data, size, event, arg->op.left); right = eval_num_arg(data, size, event, arg->op.right); switch (arg->op.op[0]) { From b99af874829cba2b30d212bc6fd31b56275ee4d2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:36 -0400 Subject: [PATCH 124/470] perf tools: Handle * as typecast in trace parsing The '*' is currently only treated as a multiplication, and it needs to be handled as a typecast pointer. This is the version used by trace-cmd. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.409327875@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 50 +++++++++++++---------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 7aeedb09ea7..f73ee55b51e 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1217,7 +1217,24 @@ process_op(struct event *event, struct print_arg *arg, char **tok) right = malloc_or_die(sizeof(*right)); - type = process_arg(event, right, tok); + type = read_token_item(&token); + *tok = token; + + /* could just be a type pointer */ + if ((strcmp(arg->op.op, "*") == 0) && + type == EVENT_DELIM && (strcmp(token, ")") == 0)) { + if (left->type != PRINT_ATOM) + die("bad pointer type"); + left->atom.atom = realloc(left->atom.atom, + sizeof(left->atom.atom) + 3); + strcat(left->atom.atom, " *"); + *arg = *left; + free(arg); + + return type; + } + + type = process_arg_token(event, right, tok, type); arg->op.right = right; @@ -1548,7 +1565,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) { struct print_arg *item_arg; enum event_type type; - int ptr_cast = 0; char *token; type = process_arg(event, arg, &token); @@ -1556,26 +1572,11 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) if (type == EVENT_ERROR) return EVENT_ERROR; - if (type == EVENT_OP) { - /* handle the ptr casts */ - if (!strcmp(token, "*")) { - /* - * FIXME: should we zapp whitespaces before ')' ? - * (may require a peek_token_item()) - */ - if (__peek_char() == ')') { - ptr_cast = 1; - free_token(token); - type = read_token_item(&token); - } - } - if (!ptr_cast) { - type = process_op(event, arg, &token); + if (type == EVENT_OP) + type = process_op(event, arg, &token); - if (type == EVENT_ERROR) - return EVENT_ERROR; - } - } + if (type == EVENT_ERROR) + return EVENT_ERROR; if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { free_token(token); @@ -1601,13 +1602,6 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) item_arg = malloc_or_die(sizeof(*item_arg)); arg->type = PRINT_TYPE; - if (ptr_cast) { - char *old = arg->atom.atom; - - arg->atom.atom = malloc_or_die(strlen(old + 3)); - sprintf(arg->atom.atom, "%s *", old); - free(old); - } arg->typecast.type = arg->atom.atom; arg->typecast.item = item_arg; type = process_arg_token(event, item_arg, &token, type); From f1d1feecf07261d083859ecfef0d4399036f9683 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:37 -0400 Subject: [PATCH 125/470] perf tools: Handle newlines in trace parsing better New lines between args in the trace format can break the parsing. This should not be the case. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.637991808@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index f73ee55b51e..59e4e4db743 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1716,12 +1716,18 @@ process_arg_token(struct event *event, struct print_arg *arg, static int event_read_print_args(struct event *event, struct print_arg **list) { - enum event_type type; + enum event_type type = EVENT_ERROR; struct print_arg *arg; char *token; int args = 0; do { + if (type == EVENT_NEWLINE) { + free_token(token); + type = read_token_item(&token); + continue; + } + arg = malloc_or_die(sizeof(*arg)); memset(arg, 0, sizeof(*arg)); From 13999e59343b042b0807be2df6ae5895d29782a0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:38 -0400 Subject: [PATCH 126/470] perf tools: Handle the case with and without the "signed" trace field The trace format files now have a "signed" field. But we should still be able to handle the kernels that do not have this field. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194358.888239553@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 81 ++++++++++++++++++----------- 1 file changed, 52 insertions(+), 29 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 59e4e4db743..0739b12675f 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -924,23 +924,30 @@ static int event_read_fields(struct event *event, struct format_field **fields) if (read_expected(EVENT_OP, (char *)";") < 0) goto fail_expect; - if (read_expected(EVENT_ITEM, (char *)"signed") < 0) - goto fail_expect; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (test_type_token(type, token, EVENT_ITEM, (char *)"signed")) + goto fail; - if (read_expected(EVENT_OP, (char *)":") < 0) - goto fail_expect; + free_token(token); - if (read_expect_type(EVENT_ITEM, &token)) - goto fail; - if (strtoul(token, NULL, 0)) - field->flags |= FIELD_IS_SIGNED; - free_token(token); + if (read_expected(EVENT_OP, (char *)":") < 0) + goto fail_expect; - if (read_expected(EVENT_OP, (char *)";") < 0) - goto fail_expect; + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + /* add signed type */ + + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + goto fail_expect; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } - if (read_expect_type(EVENT_NEWLINE, &token) < 0) - goto fail; free_token(token); *fields = field; @@ -2949,21 +2956,23 @@ static void print_args(struct print_arg *args) } } -static void parse_header_field(char *type, +static void parse_header_field(char *field, int *offset, int *size) { char *token; + int type; if (read_expected(EVENT_ITEM, (char *)"field") < 0) return; if (read_expected(EVENT_OP, (char *)":") < 0) return; + /* type */ if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; free_token(token); - if (read_expected(EVENT_ITEM, type) < 0) + if (read_expected(EVENT_ITEM, field) < 0) return; if (read_expected(EVENT_OP, (char *)";") < 0) return; @@ -2972,7 +2981,7 @@ static void parse_header_field(char *type, if (read_expected(EVENT_OP, (char *)":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *offset = atoi(token); free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) @@ -2982,22 +2991,36 @@ static void parse_header_field(char *type, if (read_expected(EVENT_OP, (char *)":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) - return; + goto fail; *size = atoi(token); free_token(token); if (read_expected(EVENT_OP, (char *)";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"signed") < 0) - return; - if (read_expected(EVENT_OP, (char *)":") < 0) - return; - if (read_expect_type(EVENT_ITEM, &token) < 0) - return; - free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) - return; - if (read_expect_type(EVENT_NEWLINE, &token) < 0) - return; + type = read_token(&token); + if (type != EVENT_NEWLINE) { + /* newer versions of the kernel have a "signed" type */ + if (type != EVENT_ITEM) + goto fail; + + if (strcmp(token, (char *)"signed") != 0) + goto fail; + + free_token(token); + + if (read_expected(EVENT_OP, (char *)":") < 0) + return; + + if (read_expect_type(EVENT_ITEM, &token)) + goto fail; + + free_token(token); + if (read_expected(EVENT_OP, (char *)";") < 0) + return; + + if (read_expect_type(EVENT_NEWLINE, &token)) + goto fail; + } + fail: free_token(token); } From 07a4bdddcf2546ccfbfb3c782deab636c371edeb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:39 -0400 Subject: [PATCH 127/470] perf tools: Still continue on failed parsing of an event Even though an event may fail to parse, we should not kill the entire report. The trace should still be able to show what it can. If an event fails to parse, a warning is printed, and the output continues. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.190809589@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 38 ++++++++++++++++++++--------- tools/perf/util/trace-event.h | 14 ++++++----- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 0739b12675f..eda0a2488c1 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -613,7 +613,7 @@ static enum event_type read_token_item(char **tok) static int test_type(enum event_type type, enum event_type expect) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } @@ -624,13 +624,13 @@ static int test_type_token(enum event_type type, char *token, enum event_type expect, const char *expect_tok) { if (type != expect) { - die("Error: expected type %d but read %d", + warning("Error: expected type %d but read %d", expect, type); return -1; } if (strcmp(token, expect_tok) != 0) { - die("Error: expected '%s' but read '%s'", + warning("Error: expected '%s' but read '%s'", expect_tok, token); return -1; } @@ -668,7 +668,7 @@ static int __read_expected(enum event_type expect, const char *str, int newline_ free_token(token); - return 0; + return ret; } static int read_expected(enum event_type expect, const char *str) @@ -1258,12 +1258,12 @@ process_op(struct event *event, struct print_arg *arg, char **tok) type = process_array(event, arg, tok); } else { - die("unknown op '%s'", token); + warning("unknown op '%s'", token); + event->flags |= EVENT_FL_FAILED; /* the arg is now the left side */ return EVENT_NONE; } - if (type == EVENT_OP) { int prio; @@ -2873,7 +2873,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, event = trace_find_event(type); if (!event) { - printf("ug! no event found for type %d\n", type); + warning("ug! no event found for type %d", type); return; } @@ -2887,6 +2887,12 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, comm, pid, cpu, secs, nsecs, event->name); + if (event->flags & EVENT_FL_FAILED) { + printf("EVENT '%s' FAILED TO PARSE\n", + event->name); + return; + } + pretty_print(data, size, event); printf("\n"); } @@ -3120,12 +3126,16 @@ int parse_event_file(char *buf, unsigned long size, char *sys) die("failed to read event id"); ret = event_read_format(event); - if (ret < 0) - die("failed to read event format"); + if (ret < 0) { + warning("failed to read event format for %s", event->name); + goto event_failed; + } ret = event_read_print(event); - if (ret < 0) - die("failed to read event print fmt"); + if (ret < 0) { + warning("failed to read event print fmt for %s", event->name); + goto event_failed; + } event->system = strdup(sys); @@ -3135,6 +3145,12 @@ int parse_event_file(char *buf, unsigned long size, char *sys) add_event(event); return 0; + + event_failed: + event->flags |= EVENT_FL_FAILED; + /* still add it even if it failed */ + add_event(event); + return -1; } void parse_set_info(int nr_cpus, int long_sz) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index da77e073c86..29821acc8db 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -139,12 +139,14 @@ struct event { }; enum { - EVENT_FL_ISFTRACE = 1, - EVENT_FL_ISPRINT = 2, - EVENT_FL_ISBPRINT = 4, - EVENT_FL_ISFUNC = 8, - EVENT_FL_ISFUNCENT = 16, - EVENT_FL_ISFUNCRET = 32, + EVENT_FL_ISFTRACE = 0x01, + EVENT_FL_ISPRINT = 0x02, + EVENT_FL_ISBPRINT = 0x04, + EVENT_FL_ISFUNC = 0x08, + EVENT_FL_ISFUNCENT = 0x10, + EVENT_FL_ISFUNCRET = 0x20, + + EVENT_FL_FAILED = 0x80000000 }; struct record { From ffa1895561645103d8f8059b35d9c06e6eeead2e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:40 -0400 Subject: [PATCH 128/470] perf tools: Fix bprintk reading in trace output The bprintk parsing was broken in more ways than one. The file parsing was incorrect, and the words used by the arguments are always 4 bytes aligned, even on 64-bit machines. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.520931637@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index eda0a2488c1..93a82fead95 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -284,18 +284,16 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) char *line; char *next = NULL; char *addr_str; - int ret; + char *fmt; int i; line = strtok_r(file, "\n", &next); while (line) { item = malloc_or_die(sizeof(*item)); - ret = sscanf(line, "%as : %as", - (float *)(void *)&addr_str, /* workaround gcc warning */ - (float *)(void *)&item->printk); + addr_str = strtok_r(line, ":", &fmt); item->addr = strtoull(addr_str, NULL, 16); - free(addr_str); - + /* fmt still has a space, skip it */ + item->printk = strdup(fmt+1); item->next = list; list = item; line = strtok_r(NULL, "\n", &next); @@ -2274,8 +2272,9 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc case 'u': case 'x': case 'i': - bptr = (void *)(((unsigned long)bptr + (long_size - 1)) & - ~(long_size - 1)); + /* the pointers are always 4 bytes aligned */ + bptr = (void *)(((unsigned long)bptr + 3) & + ~3); switch (ls) { case 0: case 1: From 0d1da915c76838c9ee7af7cdefbcb2bae9424161 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:41 -0400 Subject: [PATCH 129/470] perf tools: Handle both versions of ftrace output The ftrace output events can have either arguments or no arguments. The parser needs to be able to handle both. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194359.790221427@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 93a82fead95..c174765d405 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1819,7 +1819,7 @@ static int event_read_print(struct event *event) if (ret < 0) return -1; - return 0; + return ret; fail: free_token(token); @@ -3088,6 +3088,9 @@ int parse_ftrace_file(char *buf, unsigned long size) if (ret < 0) die("failed to read ftrace event print fmt"); + /* New ftrace handles args */ + if (ret > 0) + return 0; /* * The arguments for ftrace files are parsed by the fields. * Set up the fields as their arguments. From cda48461c7fb8431a99b7960480f5f42cc1a5324 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:42 -0400 Subject: [PATCH 130/470] perf tools: Add latency format to trace output Add the irqs disabled, preemption count, need resched, and other info that is shown in the latency format of ftrace. # perf trace -l perf-16457 2..s2. 53636.260344: kmem_cache_free: call_site=ffffffff811198f perf-16457 2..s2. 53636.264330: kmem_cache_free: call_site=ffffffff811198f perf-16457 2d.s4. 53636.300006: kmem_cache_free: call_site=ffffffff810d889 Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.076588953@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 2 + tools/perf/util/trace-event-parse.c | 122 +++++++++++++++++++++++----- tools/perf/util/trace-event.h | 11 +++ 3 files changed, 115 insertions(+), 20 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ccf867dbab5..ce8459ac284 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -144,6 +144,8 @@ static const struct option options[] = { "dump raw trace in ASCII"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('l', "latency", &latency_format, + "show latency attributes (irqs/preemption disabled, etc)"), OPT_END() }; diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index c174765d405..fde1a434d63 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -40,6 +40,8 @@ int header_page_size_size; int header_page_data_offset; int header_page_data_size; +int latency_format; + static char *input_buf; static unsigned long long input_buf_ptr; static unsigned long long input_buf_siz; @@ -1928,37 +1930,67 @@ static int get_common_info(const char *type, int *offset, int *size) return 0; } +static int __parse_common(void *data, int *size, int *offset, + char *name) +{ + int ret; + + if (!*size) { + ret = get_common_info(name, offset, size); + if (ret < 0) + return ret; + } + return read_size(data + *offset, *size); +} + int trace_parse_common_type(void *data) { static int type_offset; static int type_size; - int ret; - if (!type_size) { - ret = get_common_info("common_type", - &type_offset, - &type_size); - if (ret < 0) - return ret; - } - return read_size(data + type_offset, type_size); + return __parse_common(data, &type_size, &type_offset, + (char *)"common_type"); } static int parse_common_pid(void *data) { static int pid_offset; static int pid_size; + + return __parse_common(data, &pid_size, &pid_offset, + (char *)"common_pid"); +} + +static int parse_common_pc(void *data) +{ + static int pc_offset; + static int pc_size; + + return __parse_common(data, &pc_size, &pc_offset, + (char *)"common_preempt_count"); +} + +static int parse_common_flags(void *data) +{ + static int flags_offset; + static int flags_size; + + return __parse_common(data, &flags_size, &flags_offset, + (char *)"common_flags"); +} + +static int parse_common_lock_depth(void *data) +{ + static int ld_offset; + static int ld_size; int ret; - if (!pid_size) { - ret = get_common_info("common_pid", - &pid_offset, - &pid_size); - if (ret < 0) - return ret; - } + ret = __parse_common(data, &ld_size, &ld_offset, + (char *)"common_lock_depth"); + if (ret < 0) + return -1; - return read_size(data + pid_offset, pid_size); + return ret; } struct event *trace_find_event(int id) @@ -2525,6 +2557,41 @@ static inline int log10_cpu(int nb) return 1; } +static void print_lat_fmt(void *data, int size __unused) +{ + unsigned int lat_flags; + unsigned int pc; + int lock_depth; + int hardirq; + int softirq; + + lat_flags = parse_common_flags(data); + pc = parse_common_pc(data); + lock_depth = parse_common_lock_depth(data); + + hardirq = lat_flags & TRACE_FLAG_HARDIRQ; + softirq = lat_flags & TRACE_FLAG_SOFTIRQ; + + printf("%c%c%c", + (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' : + (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ? + 'X' : '.', + (lat_flags & TRACE_FLAG_NEED_RESCHED) ? + 'N' : '.', + (hardirq && softirq) ? 'H' : + hardirq ? 'h' : softirq ? 's' : '.'); + + if (pc) + printf("%x", pc); + else + printf("."); + + if (lock_depth < 0) + printf("."); + else + printf("%d", lock_depth); +} + /* taken from Linux, written by Frederic Weisbecker */ static void print_graph_cpu(int cpu) { @@ -2768,6 +2835,11 @@ pretty_print_func_ent(void *data, int size, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "func"); if (!field) die("function entry does not have func field"); @@ -2811,6 +2883,11 @@ pretty_print_func_ret(void *data, int size __unused, struct event *event, printf(" | "); + if (latency_format) { + print_lat_fmt(data, size); + printf(" | "); + } + field = find_field(event, "rettime"); if (!field) die("can't find rettime in return graph"); @@ -2882,9 +2959,14 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, return pretty_print_func_graph(data, size, event, cpu, pid, comm, secs, usecs); - printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ", - comm, pid, cpu, - secs, nsecs, event->name); + if (latency_format) { + printf("%8.8s-%-5d %3d", + comm, pid, cpu); + print_lat_fmt(data, size); + } else + printf("%16s-%-5d [%03d]", comm, pid, cpu); + + printf(" %5lu.%06lu: %s: ", secs, usecs, event->name); if (event->flags & EVENT_FL_FAILED) { printf("EVENT '%s' FAILED TO PARSE\n", diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index 29821acc8db..f6637c2fa1f 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -239,6 +239,8 @@ extern int header_page_size_size; extern int header_page_data_offset; extern int header_page_data_size; +extern int latency_format; + int parse_header_page(char *buf, unsigned long size); int trace_parse_common_type(void *data); struct event *trace_find_event(int id); @@ -248,4 +250,13 @@ void *raw_field_ptr(struct event *event, const char *name, void *data); void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); +/* taken from kernel/trace/trace.h */ +enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED = 0x04, + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, +}; + #endif /* __PERF_TRACE_EVENTS_H */ From afdf1a404eed236d6f762ee44cc0f1dcc97206e0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:43 -0400 Subject: [PATCH 131/470] perf tools: Handle - and + in parsing trace print format The opterators '-' and '+' are not handled in the trace print format. To do: '++' and '--'. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.330843045@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index fde1a434d63..2d424ff50ec 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -2106,6 +2106,12 @@ static unsigned long long eval_num_arg(void *data, int size, die("unknown op '%s'", arg->op.op); val = left == right; break; + case '-': + val = left - right; + break; + case '+': + val = left + right; + break; default: die("unknown op '%s'", arg->op.op); } From c4dc775f53136cd6af8f88bce67cce9b42751768 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 14 Oct 2009 15:43:44 -0400 Subject: [PATCH 132/470] perf tools: Remove all char * typecasts and use const in prototype The (char *) for all the static strings was a fix for the symptom and not the disease. The real issue was that the function prototypes needed to be declared "const char *". Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091014194400.635935008@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 122 ++++++++++++++-------------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2d424ff50ec..4b61b497040 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -685,10 +685,10 @@ static char *event_read_name(void) { char *token; - if (read_expected(EVENT_ITEM, (char *)"name") < 0) + if (read_expected(EVENT_ITEM, "name") < 0) return NULL; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return NULL; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -706,10 +706,10 @@ static int event_read_id(void) char *token; int id; - if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0) + if (read_expected_item(EVENT_ITEM, "ID") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -759,7 +759,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) count++; - if (test_type_token(type, token, EVENT_ITEM, (char *)"field")) + if (test_type_token(type, token, EVENT_ITEM, "field")) goto fail; free_token(token); @@ -774,7 +774,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) type = read_token(&token); } - if (test_type_token(type, token, EVENT_OP, (char *)":") < 0) + if (test_type_token(type, token, EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -892,14 +892,14 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->flags |= FIELD_IS_DYNAMIC; } - if (test_type_token(type, token, EVENT_OP, (char *)";")) + if (test_type_token(type, token, EVENT_OP, ";")) goto fail; free_token(token); - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -907,13 +907,13 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->offset = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) goto fail_expect; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -921,18 +921,18 @@ static int event_read_fields(struct event *event, struct format_field **fields) field->size = strtoul(token, NULL, 0); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; type = read_token(&token); if (type != EVENT_NEWLINE) { /* newer versions of the kernel have a "signed" type */ - if (test_type_token(type, token, EVENT_ITEM, (char *)"signed")) + if (test_type_token(type, token, EVENT_ITEM, "signed")) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) goto fail_expect; if (read_expect_type(EVENT_ITEM, &token)) @@ -941,7 +941,7 @@ static int event_read_fields(struct event *event, struct format_field **fields) /* add signed type */ free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) goto fail_expect; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -970,10 +970,10 @@ static int event_read_format(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"format") < 0) + if (read_expected_item(EVENT_ITEM, "format") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -1033,7 +1033,7 @@ process_cond(struct event *event, struct print_arg *top, char **tok) *tok = NULL; type = process_arg(event, left, &token); - if (test_type_token(type, token, EVENT_OP, (char *)":")) + if (test_type_token(type, token, EVENT_OP, ":")) goto out_free; arg->op.op = token; @@ -1065,7 +1065,7 @@ process_array(struct event *event, struct print_arg *top, char **tok) *tok = NULL; type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_OP, (char *)"]")) + if (test_type_token(type, token, EVENT_OP, "]")) goto out_free; top->op.right = arg; @@ -1287,7 +1287,7 @@ process_entry(struct event *event __unused, struct print_arg *arg, char *field; char *token; - if (read_expected(EVENT_OP, (char *)"->") < 0) + if (read_expected(EVENT_OP, "->") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1447,14 +1447,14 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) do { free_token(token); type = read_token_item(&token); - if (test_type_token(type, token, EVENT_OP, (char *)"{")) + if (test_type_token(type, token, EVENT_OP, "{")) break; arg = malloc_or_die(sizeof(*arg)); free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; field = malloc_or_die(sizeof(*field)); @@ -1465,7 +1465,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok) free_token(token); type = process_arg(event, arg, &token); - if (test_type_token(type, token, EVENT_OP, (char *)"}")) + if (test_type_token(type, token, EVENT_OP, "}")) goto out_free; value = arg_eval(arg); @@ -1500,13 +1500,13 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_FLAGS; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->flags.field = field; @@ -1517,11 +1517,11 @@ process_flags(struct event *event, struct print_arg *arg, char **tok) type = read_token_item(&token); } - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; type = process_fields(event, &arg->flags.flags, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1543,19 +1543,19 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok) memset(arg, 0, sizeof(*arg)); arg->type = PRINT_SYMBOL; - if (read_expected_item(EVENT_DELIM, (char *)"(") < 0) + if (read_expected_item(EVENT_DELIM, "(") < 0) return EVENT_ERROR; field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; arg->symbol.field = field; type = process_fields(event, &arg->symbol.symbols, &token); - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) + if (test_type_token(type, token, EVENT_DELIM, ")")) goto out_free; free_token(token); @@ -1585,7 +1585,7 @@ process_paren(struct event *event, struct print_arg *arg, char **tok) if (type == EVENT_ERROR) return EVENT_ERROR; - if (test_type_token(type, token, EVENT_DELIM, (char *)")")) { + if (test_type_token(type, token, EVENT_DELIM, ")")) { free_token(token); return EVENT_ERROR; } @@ -1626,7 +1626,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) enum event_type type; char *token; - if (read_expected(EVENT_DELIM, (char *)"(") < 0) + if (read_expected(EVENT_DELIM, "(") < 0) return EVENT_ERROR; if (read_expect_type(EVENT_ITEM, &token) < 0) @@ -1636,7 +1636,7 @@ process_str(struct event *event __unused, struct print_arg *arg, char **tok) arg->string.string = token; arg->string.offset = -1; - if (read_expected(EVENT_DELIM, (char *)")") < 0) + if (read_expected(EVENT_DELIM, ")") < 0) return EVENT_ERROR; type = read_token(&token); @@ -1775,13 +1775,13 @@ static int event_read_print(struct event *event) char *token; int ret; - if (read_expected_item(EVENT_ITEM, (char *)"print") < 0) + if (read_expected_item(EVENT_ITEM, "print") < 0) return -1; - if (read_expected(EVENT_ITEM, (char *)"fmt") < 0) + if (read_expected(EVENT_ITEM, "fmt") < 0) return -1; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return -1; if (read_expect_type(EVENT_DQUOTE, &token) < 0) @@ -1811,8 +1811,8 @@ static int event_read_print(struct event *event) token = cat; goto concat; } - - if (test_type_token(type, token, EVENT_DELIM, (char *)",")) + + if (test_type_token(type, token, EVENT_DELIM, ",")) goto fail; free_token(token); @@ -1931,7 +1931,7 @@ static int get_common_info(const char *type, int *offset, int *size) } static int __parse_common(void *data, int *size, int *offset, - char *name) + const char *name) { int ret; @@ -1949,7 +1949,7 @@ int trace_parse_common_type(void *data) static int type_size; return __parse_common(data, &type_size, &type_offset, - (char *)"common_type"); + "common_type"); } static int parse_common_pid(void *data) @@ -1958,7 +1958,7 @@ static int parse_common_pid(void *data) static int pid_size; return __parse_common(data, &pid_size, &pid_offset, - (char *)"common_pid"); + "common_pid"); } static int parse_common_pc(void *data) @@ -1967,7 +1967,7 @@ static int parse_common_pc(void *data) static int pc_size; return __parse_common(data, &pc_size, &pc_offset, - (char *)"common_preempt_count"); + "common_preempt_count"); } static int parse_common_flags(void *data) @@ -1976,7 +1976,7 @@ static int parse_common_flags(void *data) static int flags_size; return __parse_common(data, &flags_size, &flags_offset, - (char *)"common_flags"); + "common_flags"); } static int parse_common_lock_depth(void *data) @@ -1986,7 +1986,7 @@ static int parse_common_lock_depth(void *data) int ret; ret = __parse_common(data, &ld_size, &ld_offset, - (char *)"common_lock_depth"); + "common_lock_depth"); if (ret < 0) return -1; @@ -3049,15 +3049,15 @@ static void print_args(struct print_arg *args) } } -static void parse_header_field(char *field, +static void parse_header_field(const char *field, int *offset, int *size) { char *token; int type; - if (read_expected(EVENT_ITEM, (char *)"field") < 0) + if (read_expected(EVENT_ITEM, "field") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; /* type */ @@ -3067,27 +3067,27 @@ static void parse_header_field(char *field, if (read_expected(EVENT_ITEM, field) < 0) return; - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"offset") < 0) + if (read_expected(EVENT_ITEM, "offset") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) goto fail; *offset = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; - if (read_expected(EVENT_ITEM, (char *)"size") < 0) + if (read_expected(EVENT_ITEM, "size") < 0) return; - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token) < 0) goto fail; *size = atoi(token); free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; type = read_token(&token); if (type != EVENT_NEWLINE) { @@ -3095,19 +3095,19 @@ static void parse_header_field(char *field, if (type != EVENT_ITEM) goto fail; - if (strcmp(token, (char *)"signed") != 0) + if (strcmp(token, "signed") != 0) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)":") < 0) + if (read_expected(EVENT_OP, ":") < 0) return; if (read_expect_type(EVENT_ITEM, &token)) goto fail; free_token(token); - if (read_expected(EVENT_OP, (char *)";") < 0) + if (read_expected(EVENT_OP, ";") < 0) return; if (read_expect_type(EVENT_NEWLINE, &token)) @@ -3121,11 +3121,11 @@ int parse_header_page(char *buf, unsigned long size) { init_input_buf(buf, size); - parse_header_field((char *)"timestamp", &header_page_ts_offset, + parse_header_field("timestamp", &header_page_ts_offset, &header_page_ts_size); - parse_header_field((char *)"commit", &header_page_size_offset, + parse_header_field("commit", &header_page_size_offset, &header_page_size_size); - parse_header_field((char *)"data", &header_page_data_offset, + parse_header_field("data", &header_page_data_offset, &header_page_data_size); return 0; From fce29d15b59245597f7f320db4a9f2be0f5fb512 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:20:34 +0800 Subject: [PATCH 133/470] tracing/filters: Refactor subsystem filter code Change: for_each_pred for_each_subsystem To: for_each_subsystem for_each_pred This change also prepares for later patches. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69502.8060903@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 1 - kernel/trace/trace_events_filter.c | 124 +++++++++++------------------ 2 files changed, 45 insertions(+), 80 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index acef8b4636f..628614532d1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -683,7 +683,6 @@ struct event_filter { int n_preds; struct filter_pred **preds; char *filter_string; - bool no_reset; }; struct event_subsystem { diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 92672016da2..9c4f9a0dae2 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -615,14 +615,7 @@ static int init_subsystem_preds(struct event_subsystem *system) return 0; } -enum { - FILTER_DISABLE_ALL, - FILTER_INIT_NO_RESET, - FILTER_SKIP_NO_RESET, -}; - -static void filter_free_subsystem_preds(struct event_subsystem *system, - int flag) +static void filter_free_subsystem_preds(struct event_subsystem *system) { struct ftrace_event_call *call; @@ -633,14 +626,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system, if (strcmp(call->system, system->name) != 0) continue; - if (flag == FILTER_INIT_NO_RESET) { - call->filter->no_reset = false; - continue; - } - - if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset) - continue; - filter_disable_preds(call); remove_filter_string(call->filter); } @@ -817,44 +802,6 @@ add_pred_fn: return 0; } -static int filter_add_subsystem_pred(struct filter_parse_state *ps, - struct event_subsystem *system, - struct filter_pred *pred, - char *filter_string, - bool dry_run) -{ - struct ftrace_event_call *call; - int err = 0; - bool fail = true; - - list_for_each_entry(call, &ftrace_events, list) { - - if (!call->define_fields) - continue; - - if (strcmp(call->system, system->name)) - continue; - - if (call->filter->no_reset) - continue; - - err = filter_add_pred(ps, call, pred, dry_run); - if (err) - call->filter->no_reset = true; - else - fail = false; - - if (!dry_run) - replace_filter_string(call->filter, filter_string); - } - - if (fail) { - parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); - return err; - } - return 0; -} - static void parse_init(struct filter_parse_state *ps, struct filter_op *ops, char *infix_string) @@ -1209,8 +1156,7 @@ static int check_preds(struct filter_parse_state *ps) return 0; } -static int replace_preds(struct event_subsystem *system, - struct ftrace_event_call *call, +static int replace_preds(struct ftrace_event_call *call, struct filter_parse_state *ps, char *filter_string, bool dry_run) @@ -1257,11 +1203,7 @@ static int replace_preds(struct event_subsystem *system, add_pred: if (!pred) return -ENOMEM; - if (call) - err = filter_add_pred(ps, call, pred, false); - else - err = filter_add_subsystem_pred(ps, system, pred, - filter_string, dry_run); + err = filter_add_pred(ps, call, pred, dry_run); filter_free_pred(pred); if (err) return err; @@ -1272,6 +1214,44 @@ add_pred: return 0; } +static int replace_system_preds(struct event_subsystem *system, + struct filter_parse_state *ps, + char *filter_string) +{ + struct ftrace_event_call *call; + int err; + bool fail = true; + + list_for_each_entry(call, &ftrace_events, list) { + + if (!call->define_fields) + continue; + + if (strcmp(call->system, system->name) != 0) + continue; + + /* try to see if the filter can be applied */ + err = replace_preds(call, ps, filter_string, true); + if (err) + continue; + + /* really apply the filter */ + filter_disable_preds(call); + err = replace_preds(call, ps, filter_string, false); + if (err) + filter_disable_preds(call); + else + replace_filter_string(call->filter, filter_string); + fail = false; + } + + if (fail) { + parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); + return err; + } + return 0; +} + int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { int err; @@ -1306,7 +1286,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) goto out; } - err = replace_preds(NULL, call, ps, filter_string, false); + err = replace_preds(call, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); @@ -1334,7 +1314,7 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out_unlock; if (!strcmp(strstrip(filter_string), "0")) { - filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); + filter_free_subsystem_preds(system); remove_filter_string(system->filter); mutex_unlock(&event_mutex); return 0; @@ -1354,23 +1334,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system, goto out; } - filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); - - /* try to see the filter can be applied to which events */ - err = replace_preds(system, NULL, ps, filter_string, true); - if (err) { + err = replace_system_preds(system, ps, filter_string); + if (err) append_filter_err(ps, system->filter); - goto out; - } - - filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); - - /* really apply the filter to the events */ - err = replace_preds(system, NULL, ps, filter_string, false); - if (err) { - append_filter_err(ps, system->filter); - filter_free_subsystem_preds(system, 2); - } out: filter_opstack_clear(ps); From b0f1a59a98d7ac2102e7e4f22904c26d564a5628 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:21:12 +0800 Subject: [PATCH 134/470] tracing/filters: Use a different op for glob match "==" will always do a full match, and "~" will do a glob match. In the future, we may add "=~" for regex match. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69528.3050309@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 2 +- kernel/trace/trace_events_filter.c | 57 ++++++++++++++---------------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 628614532d1..ffe53ddbe67 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -702,7 +702,7 @@ typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, typedef int (*regex_match_func)(char *str, struct regex *r, int len); enum regex_type { - MATCH_FULL, + MATCH_FULL = 0, MATCH_FRONT_ONLY, MATCH_MIDDLE_ONLY, MATCH_END_ONLY, diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 9c4f9a0dae2..273845fce39 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -29,6 +29,7 @@ enum filter_op_ids { OP_OR, OP_AND, + OP_GLOB, OP_NE, OP_EQ, OP_LT, @@ -46,16 +47,17 @@ struct filter_op { }; static struct filter_op filter_ops[] = { - { OP_OR, "||", 1 }, - { OP_AND, "&&", 2 }, - { OP_NE, "!=", 4 }, - { OP_EQ, "==", 4 }, - { OP_LT, "<", 5 }, - { OP_LE, "<=", 5 }, - { OP_GT, ">", 5 }, - { OP_GE, ">=", 5 }, - { OP_NONE, "OP_NONE", 0 }, - { OP_OPEN_PAREN, "(", 0 }, + { OP_OR, "||", 1 }, + { OP_AND, "&&", 2 }, + { OP_GLOB, "~", 4 }, + { OP_NE, "!=", 4 }, + { OP_EQ, "==", 4 }, + { OP_LT, "<", 5 }, + { OP_LE, "<=", 5 }, + { OP_GT, ">", 5 }, + { OP_GE, ">=", 5 }, + { OP_NONE, "OP_NONE", 0 }, + { OP_OPEN_PAREN, "(", 0 }, }; enum { @@ -329,22 +331,18 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not) return type; } -static int filter_build_regex(struct filter_pred *pred) +static void filter_build_regex(struct filter_pred *pred) { struct regex *r = &pred->regex; - char *search, *dup; - enum regex_type type; - int not; + char *search; + enum regex_type type = MATCH_FULL; + int not = 0; - type = filter_parse_regex(r->pattern, r->len, &search, ¬); - dup = kstrdup(search, GFP_KERNEL); - if (!dup) - return -ENOMEM; - - strcpy(r->pattern, dup); - kfree(dup); - - r->len = strlen(r->pattern); + if (pred->op == OP_GLOB) { + type = filter_parse_regex(r->pattern, r->len, &search, ¬); + r->len = strlen(search); + memmove(r->pattern, search, r->len+1); + } switch (type) { case MATCH_FULL: @@ -362,8 +360,6 @@ static int filter_build_regex(struct filter_pred *pred) } pred->not ^= not; - - return 0; } /* return 1 if event matches, 0 otherwise (discard) */ @@ -676,7 +672,10 @@ static bool is_string_field(struct ftrace_event_field *field) static int is_legal_op(struct ftrace_event_field *field, int op) { - if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) + if (is_string_field(field) && + (op != OP_EQ && op != OP_NE && op != OP_GLOB)) + return 0; + if (!is_string_field(field) && op == OP_GLOB) return 0; return 1; @@ -761,15 +760,13 @@ static int filter_add_pred(struct filter_parse_state *ps, } if (is_string_field(field)) { - ret = filter_build_regex(pred); - if (ret) - return ret; + filter_build_regex(pred); if (field->filter_type == FILTER_STATIC_STRING) { fn = filter_pred_string; pred->regex.field_len = field->size; } else if (field->filter_type == FILTER_DYN_STRING) - fn = filter_pred_strloc; + fn = filter_pred_strloc; else { fn = filter_pred_pchar; pred->regex.field_len = strlen(pred->regex.pattern); From 6fb2915df7f0747d9044da9dbff5b46dc2e20830 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:21:42 +0800 Subject: [PATCH 135/470] tracing/profile: Add filter support - Add an ioctl to allocate a filter for a perf event. - Free the filter when the associated perf event is to be freed. - Do the filtering in perf_swevent_match(). Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD69546.8050401@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 11 ++- include/linux/perf_counter.h | 1 + include/linux/perf_event.h | 6 ++ kernel/perf_event.c | 80 +++++++++++++++-- kernel/trace/trace.h | 3 +- kernel/trace/trace_events_filter.c | 135 +++++++++++++++++++++++------ 6 files changed, 200 insertions(+), 36 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4ec5e67e18c..d11770472bc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -144,7 +144,7 @@ extern char *trace_profile_buf_nmi; #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ extern void destroy_preds(struct ftrace_event_call *call); -extern int filter_match_preds(struct ftrace_event_call *call, void *rec); +extern int filter_match_preds(struct event_filter *filter, void *rec); extern int filter_current_check_discard(struct ring_buffer *buffer, struct ftrace_event_call *call, void *rec, @@ -186,4 +186,13 @@ do { \ __trace_printk(ip, fmt, ##args); \ } while (0) +#ifdef CONFIG_EVENT_PROFILE +struct perf_event; +extern int ftrace_profile_enable(int event_id); +extern void ftrace_profile_disable(int event_id); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str); +extern void ftrace_profile_free_filter(struct perf_event *event); +#endif + #endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7b7fbf433cf..91a2b4309e7 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -225,6 +225,7 @@ struct perf_counter_attr { #define PERF_COUNTER_IOC_RESET _IO ('$', 3) #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_counter_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f9741..df9d964c15f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -221,6 +221,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_RESET _IO ('$', 3) #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, @@ -633,7 +634,12 @@ struct perf_event { struct pid_namespace *ns; u64 id; + +#ifdef CONFIG_EVENT_PROFILE + struct event_filter *filter; #endif + +#endif /* CONFIG_PERF_EVENTS */ }; /** diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c66588..12b5ec39bf9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -1658,6 +1659,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) return ERR_PTR(err); } +static void perf_event_free_filter(struct perf_event *event); + static void free_event_rcu(struct rcu_head *head) { struct perf_event *event; @@ -1665,6 +1668,7 @@ static void free_event_rcu(struct rcu_head *head) event = container_of(head, struct perf_event, rcu_head); if (event->ns) put_pid_ns(event->ns); + perf_event_free_filter(event); kfree(event); } @@ -1974,7 +1978,8 @@ unlock: return ret; } -int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_output(struct perf_event *event, int output_fd); +static int perf_event_set_filter(struct perf_event *event, void __user *arg); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2002,6 +2007,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: return perf_event_set_output(event, arg); + case PERF_EVENT_IOC_SET_FILTER: + return perf_event_set_filter(event, (void __user *)arg); + default: return -ENOTTY; } @@ -3806,9 +3814,14 @@ static int perf_swevent_is_counting(struct perf_event *event) return 1; } +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data); + static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, - u32 event_id, struct pt_regs *regs) + u32 event_id, + struct perf_sample_data *data, + struct pt_regs *regs) { if (!perf_swevent_is_counting(event)) return 0; @@ -3826,6 +3839,10 @@ static int perf_swevent_match(struct perf_event *event, return 0; } + if (event->attr.type == PERF_TYPE_TRACEPOINT && + !perf_tp_event_match(event, data)) + return 0; + return 1; } @@ -3842,7 +3859,7 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_swevent_match(event, type, event_id, regs)) + if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_add(event, nr, nmi, data, regs); } rcu_read_unlock(); @@ -4086,6 +4103,7 @@ static const struct pmu perf_ops_task_clock = { }; #ifdef CONFIG_EVENT_PROFILE + void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size) { @@ -4109,8 +4127,15 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, } EXPORT_SYMBOL_GPL(perf_tp_event); -extern int ftrace_profile_enable(int); -extern void ftrace_profile_disable(int); +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} static void tp_perf_event_destroy(struct perf_event *event) { @@ -4135,12 +4160,53 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) return &perf_ops_generic; } + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + char *filter_str; + int ret; + + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -EINVAL; + + filter_str = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(filter_str)) + return PTR_ERR(filter_str); + + ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); + + kfree(filter_str); + return ret; +} + +static void perf_event_free_filter(struct perf_event *event) +{ + ftrace_profile_free_filter(event); +} + #else + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data) +{ + return 1; +} + static const struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; } -#endif + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + return -ENOENT; +} + +static void perf_event_free_filter(struct perf_event *event) +{ +} + +#endif /* CONFIG_EVENT_PROFILE */ atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; @@ -4394,7 +4460,7 @@ err_size: goto out; } -int perf_event_set_output(struct perf_event *event, int output_fd) +static int perf_event_set_output(struct perf_event *event, int output_fd) { struct perf_event *output_event = NULL; struct file *output_file = NULL; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ffe53ddbe67..4959ada9e0b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -743,7 +743,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, struct ring_buffer *buffer, struct ring_buffer_event *event) { - if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { + if (unlikely(call->filter_active) && + !filter_match_preds(call->filter, rec)) { ring_buffer_discard_commit(buffer, event); return 1; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 273845fce39..e27bb6acc2d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "trace.h" #include "trace_output.h" @@ -363,9 +364,8 @@ static void filter_build_regex(struct filter_pred *pred) } /* return 1 if event matches, 0 otherwise (discard) */ -int filter_match_preds(struct ftrace_event_call *call, void *rec) +int filter_match_preds(struct event_filter *filter, void *rec) { - struct event_filter *filter = call->filter; int match, top = 0, val1 = 0, val2 = 0; int stack[MAX_FILTER_PRED]; struct filter_pred *pred; @@ -538,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call) filter->preds[i]->fn = filter_pred_none; } -void destroy_preds(struct ftrace_event_call *call) +static void __free_preds(struct event_filter *filter) { - struct event_filter *filter = call->filter; int i; if (!filter) @@ -553,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call) kfree(filter->preds); kfree(filter->filter_string); kfree(filter); - call->filter = NULL; } -static int init_preds(struct ftrace_event_call *call) +void destroy_preds(struct ftrace_event_call *call) +{ + __free_preds(call->filter); + call->filter = NULL; + call->filter_active = 0; +} + +static struct event_filter *__alloc_preds(void) { struct event_filter *filter; struct filter_pred *pred; int i; - if (call->filter) - return 0; - - filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); - if (!call->filter) - return -ENOMEM; + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); filter->n_preds = 0; @@ -583,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call) filter->preds[i] = pred; } - return 0; + return filter; oom: - destroy_preds(call); + __free_preds(filter); + return ERR_PTR(-ENOMEM); +} - return -ENOMEM; +static int init_preds(struct ftrace_event_call *call) +{ + if (call->filter) + return 0; + + call->filter_active = 0; + call->filter = __alloc_preds(); + if (IS_ERR(call->filter)) + return PTR_ERR(call->filter); + + return 0; } static int init_subsystem_preds(struct event_subsystem *system) @@ -629,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) static int filter_add_pred_fn(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, filter_pred_fn_t fn) { - struct event_filter *filter = call->filter; int idx, err; if (filter->n_preds == MAX_FILTER_PRED) { @@ -647,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps, return err; filter->n_preds++; - call->filter_active = 1; return 0; } @@ -726,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size, static int filter_add_pred(struct filter_parse_state *ps, struct ftrace_event_call *call, + struct event_filter *filter, struct filter_pred *pred, bool dry_run) { @@ -795,7 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps, add_pred_fn: if (!dry_run) - return filter_add_pred_fn(ps, call, pred, fn); + return filter_add_pred_fn(ps, call, filter, pred, fn); return 0; } @@ -1154,6 +1168,7 @@ static int check_preds(struct filter_parse_state *ps) } static int replace_preds(struct ftrace_event_call *call, + struct event_filter *filter, struct filter_parse_state *ps, char *filter_string, bool dry_run) @@ -1200,7 +1215,7 @@ static int replace_preds(struct ftrace_event_call *call, add_pred: if (!pred) return -ENOMEM; - err = filter_add_pred(ps, call, pred, dry_run); + err = filter_add_pred(ps, call, filter, pred, dry_run); filter_free_pred(pred); if (err) return err; @@ -1216,6 +1231,7 @@ static int replace_system_preds(struct event_subsystem *system, char *filter_string) { struct ftrace_event_call *call; + struct event_filter *filter; int err; bool fail = true; @@ -1228,17 +1244,19 @@ static int replace_system_preds(struct event_subsystem *system, continue; /* try to see if the filter can be applied */ - err = replace_preds(call, ps, filter_string, true); + err = replace_preds(call, filter, ps, filter_string, true); if (err) continue; /* really apply the filter */ filter_disable_preds(call); - err = replace_preds(call, ps, filter_string, false); + err = replace_preds(call, filter, ps, filter_string, false); if (err) filter_disable_preds(call); - else - replace_filter_string(call->filter, filter_string); + else { + call->filter_active = 1; + replace_filter_string(filter, filter_string); + } fail = false; } @@ -1252,7 +1270,6 @@ static int replace_system_preds(struct event_subsystem *system, int apply_event_filter(struct ftrace_event_call *call, char *filter_string) { int err; - struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1283,10 +1300,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) goto out; } - err = replace_preds(call, ps, filter_string, false); + err = replace_preds(call, call->filter, ps, filter_string, false); if (err) append_filter_err(ps, call->filter); - + else + call->filter_active = 1; out: filter_opstack_clear(ps); postfix_clear(ps); @@ -1301,7 +1319,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system, char *filter_string) { int err; - struct filter_parse_state *ps; mutex_lock(&event_mutex); @@ -1345,3 +1362,67 @@ out_unlock: return err; } +#ifdef CONFIG_EVENT_PROFILE + +void ftrace_profile_free_filter(struct perf_event *event) +{ + struct event_filter *filter = event->filter; + + event->filter = NULL; + __free_preds(filter); +} + +int ftrace_profile_set_filter(struct perf_event *event, int event_id, + char *filter_str) +{ + int err; + struct event_filter *filter; + struct filter_parse_state *ps; + struct ftrace_event_call *call = NULL; + + mutex_lock(&event_mutex); + + list_for_each_entry(call, &ftrace_events, list) { + if (call->id == event_id) + break; + } + if (!call) + return -EINVAL; + + if (event->filter) + return -EEXIST; + + filter = __alloc_preds(); + if (IS_ERR(filter)) + return PTR_ERR(filter); + + err = -ENOMEM; + ps = kzalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) + goto free_preds; + + parse_init(ps, filter_ops, filter_str); + err = filter_parse(ps); + if (err) + goto free_ps; + + err = replace_preds(call, filter, ps, filter_str, false); + if (!err) + event->filter = filter; + +free_ps: + filter_opstack_clear(ps); + postfix_clear(ps); + kfree(ps); + +free_preds: + if (err) + __free_preds(filter); + + mutex_unlock(&event_mutex); + + return err; +} + +#endif /* CONFIG_EVENT_PROFILE */ + From c171b552a7d316c7e1c3ad6f70a30178dd53e14c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 15 Oct 2009 11:22:07 +0800 Subject: [PATCH 136/470] perf trace: Add filter Suppport Add a new option "--filter " to perf record, and it should be right after "-e trace_point": #./perf record -R -f -e irq:irq_handler_entry --filter irq==18 ^C # ./perf trace perf-4303 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 init-0 ... irq_handler_entry: irq=18 handler=eth0 See Documentation/trace/events.txt for the syntax of filter expressions. Signed-off-by: Li Zefan Acked-by: Peter Zijlstra Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <4AD6955F.90602@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 15 ++++++++++++++- tools/perf/util/parse-events.c | 26 ++++++++++++++++++++++++-- tools/perf/util/parse-events.h | 2 ++ 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4e3a374e7aa..8b2c860c49a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -374,9 +374,11 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n static void create_counter(int counter, int cpu, pid_t pid) { + char *filter = filters[counter]; struct perf_event_attr *attr = attrs + counter; struct perf_header_attr *h_attr; int track = !counter; /* only the first counter needs these */ + int ret; struct { u64 count; u64 time_enabled; @@ -479,7 +481,6 @@ try_again: multiplex_fd = fd[nr_cpu][counter]; if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { - int ret; ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); assert(ret != -1); @@ -499,6 +500,16 @@ try_again: } } + if (filter != NULL) { + ret = ioctl(fd[nr_cpu][counter], + PERF_EVENT_IOC_SET_FILTER, filter); + if (ret) { + error("failed to set filter with %d (%s)\n", errno, + strerror(errno)); + exit(-1); + } + } + ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); } @@ -676,6 +687,8 @@ static const struct option options[] = { OPT_CALLBACK('e', "event", NULL, "event", "event selector. use 'perf list' to list available events", parse_events), + OPT_CALLBACK(0, "filter", NULL, "filter", + "event filter", parse_filter), OPT_INTEGER('p', "pid", &target_pid, "record events on existing pid"), OPT_INTEGER('r', "realtime", &realtime_prio, diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8cfb48cbbea..b097570e962 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -8,9 +8,10 @@ #include "cache.h" #include "header.h" -int nr_counters; +int nr_counters; struct perf_event_attr attrs[MAX_COUNTERS]; +char *filters[MAX_COUNTERS]; struct event_symbol { u8 type; @@ -708,7 +709,6 @@ static void store_event_type(const char *orgname) perf_header__push_event(id, orgname); } - int parse_events(const struct option *opt __used, const char *str, int unset __used) { struct perf_event_attr attr; @@ -745,6 +745,28 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u return 0; } +int parse_filter(const struct option *opt __used, const char *str, + int unset __used) +{ + int i = nr_counters - 1; + int len = strlen(str); + + if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { + fprintf(stderr, + "-F option should follow a -e tracepoint option\n"); + return -1; + } + + filters[i] = malloc(len + 1); + if (!filters[i]) { + fprintf(stderr, "not enough memory to hold filter string\n"); + return -1; + } + strcpy(filters[i], str); + + return 0; +} + static const char * const event_type_descriptors[] = { "", "Hardware event", diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 8626a439033..b8c1f64bc93 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -17,11 +17,13 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config); extern int nr_counters; extern struct perf_event_attr attrs[MAX_COUNTERS]; +extern char *filters[MAX_COUNTERS]; extern const char *event_name(int ctr); extern const char *__event_name(int type, u64 config); extern int parse_events(const struct option *opt, const char *str, int unset); +extern int parse_filter(const struct option *opt, const char *str, int unset); #define EVENTS_HELP_MAX (128*1024) From a66abe7fbf7805a1a02f241bd5283265ff6706ec Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Oct 2009 12:24:04 +0200 Subject: [PATCH 137/470] tracing/events: Fix locking imbalance in the filter code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Américo Wang noticed that we have a locking imbalance in the error paths of ftrace_profile_set_filter(), causing potential leakage of event_mutex. Also clean up other error codepaths related to event_mutex while at it. Plus fix an initialized variable in the subsystem filter code. Reported-by: Américo Wang Cc: Li Zefan Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <2375c9f90910150247u5ccb8e2at58c764e385ffa490@mail.gmail.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index e27bb6acc2d..21d34757b95 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1230,10 +1230,10 @@ static int replace_system_preds(struct event_subsystem *system, struct filter_parse_state *ps, char *filter_string) { + struct event_filter *filter = system->filter; struct ftrace_event_call *call; - struct event_filter *filter; - int err; bool fail = true; + int err; list_for_each_entry(call, &ftrace_events, list) { @@ -1262,7 +1262,7 @@ static int replace_system_preds(struct event_subsystem *system, if (fail) { parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); - return err; + return -EINVAL; } return 0; } @@ -1281,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) if (!strcmp(strstrip(filter_string), "0")) { filter_disable_preds(call); remove_filter_string(call->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1330,8 +1329,7 @@ int apply_subsystem_event_filter(struct event_subsystem *system, if (!strcmp(strstrip(filter_string), "0")) { filter_free_subsystem_preds(system); remove_filter_string(system->filter); - mutex_unlock(&event_mutex); - return 0; + goto out_unlock; } err = -ENOMEM; @@ -1386,15 +1384,20 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, if (call->id == event_id) break; } - if (!call) - return -EINVAL; + err = -EINVAL; + if (!call) + goto out_unlock; + + err = -EEXIST; if (event->filter) - return -EEXIST; + goto out_unlock; filter = __alloc_preds(); - if (IS_ERR(filter)) - return PTR_ERR(filter); + if (IS_ERR(filter)) { + err = PTR_ERR(filter); + goto out_unlock; + } err = -ENOMEM; ps = kzalloc(sizeof(*ps), GFP_KERNEL); @@ -1419,6 +1422,7 @@ free_preds: if (err) __free_preds(filter); +out_unlock: mutex_unlock(&event_mutex); return err; From 434a83c3fbb951908a3a52040f7f0e0b8ba00dd0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Oct 2009 11:50:39 +0200 Subject: [PATCH 138/470] events: Harmonize event field names and print output names Now that we can filter based on fields via perf record, people will start using filter expressions and will expect them to be obvious. The primary way to see which fields are available is by looking at the trace output, such as: gcc-18676 [000] 343.011728: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.012727: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.032692: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.033690: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.034687: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.035686: irq_handler_entry: irq=0 handler=timer cc1-18677 [000] 343.036684: irq_handler_entry: irq=0 handler=timer While 'irq==0' filters work, the 'handler==' filter expression does not work: $ perf record -R -f -a -e irq:irq_handler_entry --filter handler=timer sleep 1 Error: failed to set filter with 22 (Invalid argument) The problem is that while an 'irq' field exists and is recognized as a filter field - 'handler' does not exist - its name is 'name' in the output. To solve this, we need to synchronize the printout and the field names, wherever possible. In cases where the printout prints a non-field, we enclose that information in square brackets, such as: perf-1380 [013] 724.903505: softirq_exit: vec=9 [action=RCU] perf-1380 [013] 724.904482: softirq_exit: vec=1 [action=TIMER] This way users can use filter expressions more intuitively: all fields that show up as 'primary' (non-bracketed) information is filterable. This patch harmonizes the field names for all irq, bkl, power, sched and timer events. We might in fact think about dropping the print format bit of generic tracepoints altogether, and just print the fields that are being recorded. Cc: Li Zefan Cc: Tom Zanussi Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- include/trace/events/bkl.h | 18 ++++---- include/trace/events/irq.h | 8 ++-- include/trace/events/power.h | 2 - include/trace/events/sched.h | 44 ++++++++++---------- include/trace/events/timer.h | 79 ++++++++++++++++++------------------ 5 files changed, 74 insertions(+), 77 deletions(-) diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h index 8abd620a490..1af72dc2427 100644 --- a/include/trace/events/bkl.h +++ b/include/trace/events/bkl.h @@ -13,7 +13,7 @@ TRACE_EVENT(lock_kernel, TP_ARGS(func, file, line), TP_STRUCT__entry( - __field( int, lock_depth ) + __field( int, depth ) __field_ext( const char *, func, FILTER_PTR_STRING ) __field_ext( const char *, file, FILTER_PTR_STRING ) __field( int, line ) @@ -21,13 +21,13 @@ TRACE_EVENT(lock_kernel, TP_fast_assign( /* We want to record the lock_depth after lock is acquired */ - __entry->lock_depth = current->lock_depth + 1; + __entry->depth = current->lock_depth + 1; __entry->func = func; __entry->file = file; __entry->line = line; ), - TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, __entry->file, __entry->line, __entry->func) ); @@ -38,20 +38,20 @@ TRACE_EVENT(unlock_kernel, TP_ARGS(func, file, line), TP_STRUCT__entry( - __field(int, lock_depth) - __field(const char *, func) - __field(const char *, file) - __field(int, line) + __field(int, depth ) + __field(const char *, func ) + __field(const char *, file ) + __field(int, line ) ), TP_fast_assign( - __entry->lock_depth = current->lock_depth; + __entry->depth = current->lock_depth; __entry->func = func; __entry->file = file; __entry->line = line; ), - TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth, + TP_printk("depth=%d file:line=%s:%d func=%s()", __entry->depth, __entry->file, __entry->line, __entry->func) ); diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index b89f9db4a40..dcfcd440762 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -48,7 +48,7 @@ TRACE_EVENT(irq_handler_entry, __assign_str(name, action->name); ), - TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) + TP_printk("irq=%d name=%s", __entry->irq, __get_str(name)) ); /** @@ -78,7 +78,7 @@ TRACE_EVENT(irq_handler_exit, __entry->ret = ret; ), - TP_printk("irq=%d return=%s", + TP_printk("irq=%d ret=%s", __entry->irq, __entry->ret ? "handled" : "unhandled") ); @@ -107,7 +107,7 @@ TRACE_EVENT(softirq_entry, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); @@ -136,7 +136,7 @@ TRACE_EVENT(softirq_exit, __entry->vec = (int)(h - vec); ), - TP_printk("softirq=%d action=%s", __entry->vec, + TP_printk("vec=%d [action=%s]", __entry->vec, show_softirq_name(__entry->vec)) ); diff --git a/include/trace/events/power.h b/include/trace/events/power.h index ea6d579261a..9bb96e5a284 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -16,8 +16,6 @@ enum { }; #endif - - TRACE_EVENT(power_start, TP_PROTO(unsigned int type, unsigned int state), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 4069c43f418..b50b9856c59 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -26,7 +26,7 @@ TRACE_EVENT(sched_kthread_stop, __entry->pid = t->pid; ), - TP_printk("task %s:%d", __entry->comm, __entry->pid) + TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); /* @@ -46,7 +46,7 @@ TRACE_EVENT(sched_kthread_stop_ret, __entry->ret = ret; ), - TP_printk("ret %d", __entry->ret) + TP_printk("ret=%d", __entry->ret) ); /* @@ -73,7 +73,7 @@ TRACE_EVENT(sched_wait_task, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -94,7 +94,7 @@ TRACE_EVENT(sched_wakeup, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -102,12 +102,12 @@ TRACE_EVENT(sched_wakeup, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -127,7 +127,7 @@ TRACE_EVENT(sched_wakeup_new, __field( pid_t, pid ) __field( int, prio ) __field( int, success ) - __field( int, cpu ) + __field( int, target_cpu ) ), TP_fast_assign( @@ -135,12 +135,12 @@ TRACE_EVENT(sched_wakeup_new, __entry->pid = p->pid; __entry->prio = p->prio; __entry->success = success; - __entry->cpu = task_cpu(p); + __entry->target_cpu = task_cpu(p); ), - TP_printk("task %s:%d [%d] success=%d [%03d]", + TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->cpu) + __entry->success, __entry->target_cpu) ); /* @@ -176,7 +176,7 @@ TRACE_EVENT(sched_switch, __entry->next_prio = next->prio; ), - TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, __entry->prev_state ? __print_flags(__entry->prev_state, "|", @@ -211,7 +211,7 @@ TRACE_EVENT(sched_migrate_task, __entry->dest_cpu = dest_cpu; ), - TP_printk("task %s:%d [%d] from: %d to: %d", + TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu) ); @@ -237,7 +237,7 @@ TRACE_EVENT(sched_process_free, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -262,7 +262,7 @@ TRACE_EVENT(sched_process_exit, __entry->prio = p->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -287,7 +287,7 @@ TRACE_EVENT(sched_process_wait, __entry->prio = current->prio; ), - TP_printk("task %s:%d [%d]", + TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); @@ -314,7 +314,7 @@ TRACE_EVENT(sched_process_fork, __entry->child_pid = child->pid; ), - TP_printk("parent %s:%d child %s:%d", + TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid, __entry->child_comm, __entry->child_pid) ); @@ -340,7 +340,7 @@ TRACE_EVENT(sched_signal_send, __entry->sig = sig; ), - TP_printk("sig: %d task %s:%d", + TP_printk("sig=%d comm=%s pid=%d", __entry->sig, __entry->comm, __entry->pid) ); @@ -374,7 +374,7 @@ TRACE_EVENT(sched_stat_wait, __perf_count(delay); ), - TP_printk("task: %s:%d wait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -406,7 +406,7 @@ TRACE_EVENT(sched_stat_runtime, __perf_count(runtime); ), - TP_printk("task: %s:%d runtime: %Lu [ns], vruntime: %Lu [ns]", + TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->runtime, (unsigned long long)__entry->vruntime) @@ -437,7 +437,7 @@ TRACE_EVENT(sched_stat_sleep, __perf_count(delay); ), - TP_printk("task: %s:%d sleep: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); @@ -467,7 +467,7 @@ TRACE_EVENT(sched_stat_iowait, __perf_count(delay); ), - TP_printk("task: %s:%d iowait: %Lu [ns]", + TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 1844c48d640..e5ce87a0498 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -26,7 +26,7 @@ TRACE_EVENT(timer_init, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -54,7 +54,7 @@ TRACE_EVENT(timer_start, __entry->now = jiffies; ), - TP_printk("timer %p: func %pf, expires %lu, timeout %ld", + TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now) ); @@ -81,7 +81,7 @@ TRACE_EVENT(timer_expire_entry, __entry->now = jiffies; ), - TP_printk("timer %p: now %lu", __entry->timer, __entry->now) + TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) ); /** @@ -108,7 +108,7 @@ TRACE_EVENT(timer_expire_exit, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -129,7 +129,7 @@ TRACE_EVENT(timer_cancel, __entry->timer = timer; ), - TP_printk("timer %p", __entry->timer) + TP_printk("timer=%p", __entry->timer) ); /** @@ -140,24 +140,24 @@ TRACE_EVENT(timer_cancel, */ TRACE_EVENT(hrtimer_init, - TP_PROTO(struct hrtimer *timer, clockid_t clockid, + TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, enum hrtimer_mode mode), - TP_ARGS(timer, clockid, mode), + TP_ARGS(hrtimer, clockid, mode), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( clockid_t, clockid ) __field( enum hrtimer_mode, mode ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; __entry->clockid = clockid; __entry->mode = mode; ), - TP_printk("hrtimer %p, clockid %s, mode %s", __entry->timer, + TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer, __entry->clockid == CLOCK_REALTIME ? "CLOCK_REALTIME" : "CLOCK_MONOTONIC", __entry->mode == HRTIMER_MODE_ABS ? @@ -170,26 +170,26 @@ TRACE_EVENT(hrtimer_init, */ TRACE_EVENT(hrtimer_start, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( void *, function ) __field( s64, expires ) __field( s64, softexpires ) ), TP_fast_assign( - __entry->timer = timer; - __entry->function = timer->function; - __entry->expires = hrtimer_get_expires(timer).tv64; - __entry->softexpires = hrtimer_get_softexpires(timer).tv64; + __entry->hrtimer = hrtimer; + __entry->function = hrtimer->function; + __entry->expires = hrtimer_get_expires(hrtimer).tv64; + __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64; ), - TP_printk("hrtimer %p, func %pf, expires %llu, softexpires %llu", - __entry->timer, __entry->function, + TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu", + __entry->hrtimer, __entry->function, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->expires }), (unsigned long long)ktime_to_ns((ktime_t) { @@ -206,23 +206,22 @@ TRACE_EVENT(hrtimer_start, */ TRACE_EVENT(hrtimer_expire_entry, - TP_PROTO(struct hrtimer *timer, ktime_t *now), + TP_PROTO(struct hrtimer *hrtimer, ktime_t *now), - TP_ARGS(timer, now), + TP_ARGS(hrtimer, now), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) __field( s64, now ) ), TP_fast_assign( - __entry->timer = timer; - __entry->now = now->tv64; + __entry->hrtimer = hrtimer; + __entry->now = now->tv64; ), - TP_printk("hrtimer %p, now %llu", __entry->timer, - (unsigned long long)ktime_to_ns((ktime_t) { - .tv64 = __entry->now })) + TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, + (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); /** @@ -234,40 +233,40 @@ TRACE_EVENT(hrtimer_expire_entry, */ TRACE_EVENT(hrtimer_expire_exit, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** * hrtimer_cancel - called when the hrtimer is canceled - * @timer: pointer to struct hrtimer + * @hrtimer: pointer to struct hrtimer */ TRACE_EVENT(hrtimer_cancel, - TP_PROTO(struct hrtimer *timer), + TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(timer), + TP_ARGS(hrtimer), TP_STRUCT__entry( - __field( void *, timer ) + __field( void *, hrtimer ) ), TP_fast_assign( - __entry->timer = timer; + __entry->hrtimer = hrtimer; ), - TP_printk("hrtimer %p", __entry->timer) + TP_printk("hrtimer=%p", __entry->hrtimer) ); /** @@ -302,7 +301,7 @@ TRACE_EVENT(itimer_state, __entry->interval_usec = value->it_interval.tv_usec; ), - TP_printk("which %d, expires %lu, it_value %lu.%lu, it_interval %lu.%lu", + TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu", __entry->which, __entry->expires, __entry->value_sec, __entry->value_usec, __entry->interval_sec, __entry->interval_usec) @@ -332,7 +331,7 @@ TRACE_EVENT(itimer_expire, __entry->pid = pid_nr(pid); ), - TP_printk("which %d, pid %d, now %lu", __entry->which, + TP_printk("which=%d pid=%d now=%lu", __entry->which, (int) __entry->pid, __entry->now) ); From 5e09954a9acc3b435ffe318b95afd3c02fae069f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 16 Oct 2009 12:31:32 +0200 Subject: [PATCH 139/470] x86, mce: Fix up MCE naming nomenclature Prefix global/setup routines with "mcheck_" thus differentiating from the internal facilities prefixed with "mce_". Also, prefix the per cpu calls with mcheck_cpu and rename them to reflect the MCE setup hierarchy of calls better. There should be no functionality change resulting from this patch. Signed-off-by: Borislav Petkov Cc: Andi Kleen LKML-Reference: <1255689093-26921-1-git-send-email-borislav.petkov@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 4 +-- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpu/mcheck/mce.c | 52 ++++++++++++++++---------------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 227a72df644..161485da683 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -120,9 +120,9 @@ extern int mce_disabled; extern int mce_p5_enabled; #ifdef CONFIG_X86_MCE -void mcheck_init(struct cpuinfo_x86 *c); +void mcheck_cpu_init(struct cpuinfo_x86 *c); #else -static inline void mcheck_init(struct cpuinfo_x86 *c) {} +static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} #endif #ifdef CONFIG_X86_ANCIENT_MCE diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cc25c2b4a56..4df69a38be5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -839,7 +839,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_MCE /* Init Machine Check Exception if available. */ - mcheck_init(c); + mcheck_cpu_init(c); #endif select_idle_routine(c); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0fb9dc50697..68d968e69b1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1136,7 +1136,7 @@ static int check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); -static void mcheck_timer(unsigned long data) +static void mce_start_timer(unsigned long data) { struct timer_list *t = &per_cpu(mce_timer, data); int *n; @@ -1220,7 +1220,7 @@ static int mce_banks_init(void) /* * Initialize Machine Checks for a CPU. */ -static int __cpuinit mce_cap_init(void) +static int __cpuinit __mcheck_cpu_cap_init(void) { unsigned b; u64 cap; @@ -1258,7 +1258,7 @@ static int __cpuinit mce_cap_init(void) return 0; } -static void mce_init(void) +static void __mcheck_cpu_init_generic(void) { mce_banks_t all_banks; u64 cap; @@ -1287,7 +1287,7 @@ static void mce_init(void) } /* Add per CPU specific workarounds here */ -static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) +static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { if (c->x86_vendor == X86_VENDOR_UNKNOWN) { pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); @@ -1355,7 +1355,7 @@ static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) return 0; } -static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) +static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) { if (c->x86 != 5) return; @@ -1369,7 +1369,7 @@ static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) } } -static void mce_cpu_features(struct cpuinfo_x86 *c) +static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { case X86_VENDOR_INTEL: @@ -1383,7 +1383,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) } } -static void mce_init_timer(void) +static void __mcheck_cpu_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); int *n = &__get_cpu_var(mce_next_interval); @@ -1394,7 +1394,7 @@ static void mce_init_timer(void) *n = check_interval * HZ; if (!*n) return; - setup_timer(t, mcheck_timer, smp_processor_id()); + setup_timer(t, mce_start_timer, smp_processor_id()); t->expires = round_jiffies(jiffies + *n); add_timer_on(t, smp_processor_id()); } @@ -1414,26 +1414,26 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = * Called for each booted CPU to set up machine checks. * Must be called with preempt off: */ -void __cpuinit mcheck_init(struct cpuinfo_x86 *c) +void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) { if (mce_disabled) return; - mce_ancient_init(c); + __mcheck_cpu_ancient_init(c); if (!mce_available(c)) return; - if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) { + if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { mce_disabled = 1; return; } machine_check_vector = do_machine_check; - mce_init(); - mce_cpu_features(c); - mce_init_timer(); + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(c); + __mcheck_cpu_init_timer(); INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); if (raw_smp_processor_id() == 0) @@ -1665,7 +1665,7 @@ __setup("mce", mcheck_enable); * Disable machine checks on suspend and shutdown. We can't really handle * them later. */ -static int mce_disable(void) +static int mce_disable_error_reporting(void) { int i; @@ -1680,12 +1680,12 @@ static int mce_disable(void) static int mce_suspend(struct sys_device *dev, pm_message_t state) { - return mce_disable(); + return mce_disable_error_reporting(); } static int mce_shutdown(struct sys_device *dev) { - return mce_disable(); + return mce_disable_error_reporting(); } /* @@ -1695,8 +1695,8 @@ static int mce_shutdown(struct sys_device *dev) */ static int mce_resume(struct sys_device *dev) { - mce_init(); - mce_cpu_features(¤t_cpu_data); + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_vendor(¤t_cpu_data); return 0; } @@ -1706,8 +1706,8 @@ static void mce_cpu_restart(void *data) del_timer_sync(&__get_cpu_var(mce_timer)); if (!mce_available(¤t_cpu_data)) return; - mce_init(); - mce_init_timer(); + __mcheck_cpu_init_generic(); + __mcheck_cpu_init_timer(); } /* Reinit MCEs after user configuration changes */ @@ -1733,7 +1733,7 @@ static void mce_enable_ce(void *all) cmci_reenable(); cmci_recheck(); if (all) - mce_init_timer(); + __mcheck_cpu_init_timer(); } static struct sysdev_class mce_sysclass = { @@ -2042,7 +2042,7 @@ static __init void mce_init_banks(void) } } -static __init int mce_init_device(void) +static __init int mcheck_init_device(void) { int err; int i = 0; @@ -2070,7 +2070,7 @@ static __init int mce_init_device(void) return err; } -device_initcall(mce_init_device); +device_initcall(mcheck_init_device); /* * Old style boot options parsing. Only for compatibility. @@ -2118,7 +2118,7 @@ static int fake_panic_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set, "%llu\n"); -static int __init mce_debugfs_init(void) +static int __init mcheck_debugfs_init(void) { struct dentry *dmce, *ffake_panic; @@ -2132,5 +2132,5 @@ static int __init mce_debugfs_init(void) return 0; } -late_initcall(mce_debugfs_init); +late_initcall(mcheck_debugfs_init); #endif From b33a6363649f0ff83ec81597ea7fe7e688f973cb Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 16 Oct 2009 12:31:33 +0200 Subject: [PATCH 140/470] x86, mce: Add a global MCE init helper Add an early initcall (pre SMP) which sets up global MCE functionality. Signed-off-by: Borislav Petkov Cc: Andi Kleen LKML-Reference: <1255689093-26921-2-git-send-email-borislav.petkov@amd.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 68d968e69b1..80801705edd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1436,8 +1436,6 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) __mcheck_cpu_init_timer(); INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); - if (raw_smp_processor_id() == 0) - atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); } /* @@ -1657,6 +1655,14 @@ static int __init mcheck_enable(char *str) } __setup("mce", mcheck_enable); +static int __init mcheck_init(void) +{ + atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); + + return 0; +} +early_initcall(mcheck_init); + /* * Sysfs support */ From f39cdf25bf77219676ec5360980ac40b1a7e144a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 17 Oct 2009 08:43:17 +0200 Subject: [PATCH 141/470] perf tools: Move dereference after NULL test In each case, if the NULL test on thread is needed, then the dereference should be after the NULL test. A simplified version of the semantic match that detects this problem is as follows (http://coccinelle.lip6.fr/): // @match exists@ expression x, E; identifier fld; @@ * x->fld ... when != \(x = E\|&x\) * x == NULL // Signed-off-by: Julia Lawall LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-report.c | 4 ++-- tools/perf/builtin-sched.c | 4 ++-- tools/perf/builtin-trace.c | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 3fe0de03004..56ba71658d7 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -104,14 +104,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) event->ip.pid, (void *)(long)ip); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (event->header.misc & PERF_RECORD_MISC_KERNEL) { level = 'k'; sym = kernel_maps__find_symbol(ip, &map); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 015c7974596..a4f8cc20915 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -629,14 +629,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (comm_list && !strlist__has_entry(comm_list, thread->comm)) return 0; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index c9c68563e96..57ad3f458ef 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1667,14 +1667,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (profile_cpu != -1 && profile_cpu != (int) cpu) return 0; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ce8459ac284..4c129ff0bb1 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -80,14 +80,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (void *)(long)ip, (long long)period); - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - if (thread == NULL) { eprintf("problem processing %d event, skipping it.\n", event->header.type); return -1; } + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + if (sample_type & PERF_SAMPLE_RAW) { struct { u32 size; From f397af06e4c9bf5a0bc92facb8cb29905e338ab0 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:07:20 -0400 Subject: [PATCH 142/470] tracing/kprobes: Update kprobe-tracer selftest against new syntax Update kprobe-tracer selftest since command syntax has been changed. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000720.16556.26343.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 739f70e8e92..cdacdab1002 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1438,12 +1438,12 @@ static __init int kprobe_trace_self_tests_init(void) pr_info("Testing kprobe tracing: "); ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " - "a1 a2 a3 a4 a5 a6"); + "$arg1 $arg2 $arg3 $arg4 $stack $stack0"); if (WARN_ON_ONCE(ret)) pr_warning("error enabling function entry\n"); ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " - "ra rv"); + "$retval"); if (WARN_ON_ONCE(ret)) pr_warning("error enabling function return\n"); From e63cc2397ecc0f2b604f22fb9cdbb05911c1e5d4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:07:28 -0400 Subject: [PATCH 143/470] tracing/kprobes: Add failure messages for debugging Add verbose failure messages to kprobe-tracer for debugging. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000728.16556.16713.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index cdacdab1002..b8ef707c84d 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -597,15 +597,19 @@ static int create_trace_probe(int argc, char **argv) void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; - if (argc < 2) + if (argc < 2) { + pr_info("Probe point is not specified.\n"); return -EINVAL; + } if (argv[0][0] == 'p') is_return = 0; else if (argv[0][0] == 'r') is_return = 1; - else + else { + pr_info("Probe definition must be started with 'p' or 'r'.\n"); return -EINVAL; + } if (argv[0][1] == ':') { event = &argv[0][2]; @@ -625,21 +629,29 @@ static int create_trace_probe(int argc, char **argv) } if (isdigit(argv[1][0])) { - if (is_return) + if (is_return) { + pr_info("Return probe point must be a symbol.\n"); return -EINVAL; + } /* an address specified */ ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); - if (ret) + if (ret) { + pr_info("Failed to parse address.\n"); return ret; + } } else { /* a symbol specified */ symbol = argv[1]; /* TODO: support .init module functions */ ret = split_symbol_offset(symbol, &offset); - if (ret) + if (ret) { + pr_info("Failed to parse symbol.\n"); return ret; - if (offset && is_return) + } + if (offset && is_return) { + pr_info("Return probe must be used without offset.\n"); return -EINVAL; + } } argc -= 2; argv += 2; @@ -658,8 +670,11 @@ static int create_trace_probe(int argc, char **argv) } tp = alloc_trace_probe(group, event, addr, symbol, offset, argc, is_return); - if (IS_ERR(tp)) + if (IS_ERR(tp)) { + pr_info("Failed to allocate trace_probe.(%d)\n", + (int)PTR_ERR(tp)); return PTR_ERR(tp); + } /* parse arguments */ ret = 0; @@ -672,6 +687,8 @@ static int create_trace_probe(int argc, char **argv) arg = argv[i]; if (conflict_field_name(argv[i], tp->args, i)) { + pr_info("Argument%d name '%s' conflicts with " + "another field.\n", i, argv[i]); ret = -EINVAL; goto error; } @@ -685,8 +702,10 @@ static int create_trace_probe(int argc, char **argv) goto error; } ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); - if (ret) + if (ret) { + pr_info("Parse error at argument%d. (%d)\n", i, ret); goto error; + } } tp->nr_args = i; From 8c95bc3e206cff7a55edd2fc5f0e2b305d57903f Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:07:36 -0400 Subject: [PATCH 144/470] x86: Add MMX/SSE opcode groups to opcode map Add missing MMX/SSE opcode groups to x86 opcode map. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000736.16556.29061.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 78a0daf12e1..e7285d8379e 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -777,12 +777,22 @@ GrpTable: Grp11 EndTable GrpTable: Grp12 +2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B) +4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B) +6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B) EndTable GrpTable: Grp13 +2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B) +4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B) +6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B) EndTable GrpTable: Grp14 +2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B) +3: psrldq Udq,Ib (66),(11B) +6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B) +7: pslldq Udq,Ib (66),(11B) EndTable GrpTable: Grp15 From d1baf5a5a6088e2991b7dbbd370ff200bd6615ce Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:07:44 -0400 Subject: [PATCH 145/470] x86: Add AMD prefetch and 3DNow! opcodes to opcode map Add AMD prefetch and 3DNow! opcode including FEMMS. Since 3DNow! uses the last immediate byte as an opcode extension byte, x86 insn just treats the extenstion byte as an immediate byte instead of a part of opcode (insn_get_opcode() decodes first "0x0f 0x0f" bytes.) Users who are interested in analyzing 3DNow! opcode still can decode it by analyzing the immediate byte. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000744.16556.27881.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index e7285d8379e..894497f7780 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -306,9 +306,10 @@ Referrer: 2-byte escape 0a: 0b: UD2 (1B) 0c: -0d: NOP Ev -0e: -0f: +0d: NOP Ev | GrpP +0e: FEMMS +# 3DNow! uses the last imm byte as opcode extension. +0f: 3DNow! Pq,Qq,Ib # 0x0f 0x10-0x1f 10: movups Vps,Wps | movss Vss,Wss (F3) | movupd Vpd,Wpd (66) | movsd Vsd,Wsd (F2) 11: movups Wps,Vps | movss Wss,Vss (F3) | movupd Wpd,Vpd (66) | movsd Wsd,Vsd (F2) @@ -813,6 +814,12 @@ GrpTable: Grp16 3: prefetch T2 EndTable +# AMD's Prefetch Group +GrpTable: GrpP +0: PREFETCH +1: PREFETCHW +EndTable + GrpTable: GrpPDLK 0: MONTMUL 1: XSHA1 From 4c20194c2de151bca14224ae384b47abf7636a95 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:07:52 -0400 Subject: [PATCH 146/470] perf: Check libdwarf APIs for perf probe Check libdwarf APIs for perf probe in tools/perf/Makefile. Since dwarf_get_ranges() has been added from libdwarf 20081231 (and it's the newest function used in probe-finder.c), this just checks whether the function is defined. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000752.16556.92051.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 52b1f438e71..03c27b9068a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -420,8 +420,8 @@ ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif -ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(warning No libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel); +ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); BASIC_CFLAGS += -DNO_LIBDWARF else EXTLIBS += -lelf -ldwarf From 074fc0e4b3f5d24306c2995f2f3b0bd4759e8aeb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:08:01 -0400 Subject: [PATCH 147/470] perf: Use die() for error cases in perf-probe Use die() for exiting perf-probe with errors. This replaces perror_exit(), msg_exit() and fprintf()+exit() with die(), and uses die() in semantic_error(). This also renames 'die' local variables to 'dw_die' for avoiding name confliction. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000801.16556.46866.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 47 +++++++++-------------------- tools/perf/util/probe-finder.c | 55 +++++++++++++--------------------- 2 files changed, 34 insertions(+), 68 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 73c883b715c..a1467d12547 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -49,6 +49,7 @@ const char *default_search_path[NR_SEARCH_PATH] = { #define MAX_PATH_LEN 256 #define MAX_PROBES 128 +#define MAX_PROBE_ARGS 128 /* Session management structure */ static struct { @@ -60,19 +61,7 @@ static struct { char *events[MAX_PROBES]; } session; -static void semantic_error(const char *msg) -{ - fprintf(stderr, "Semantic error: %s\n", msg); - exit(1); -} - -static void perror_exit(const char *msg) -{ - perror(msg); - exit(1); -} - -#define MAX_PROBE_ARGS 128 +#define semantic_error(msg ...) die("Semantic error :" msg) static int parse_probepoint(const struct option *opt __used, const char *str, int unset __used) @@ -109,7 +98,7 @@ static int parse_probepoint(const struct option *opt __used, /* Duplicate the argument */ argv[argc] = strndup(s, str - s); if (argv[argc] == NULL) - perror_exit("strndup"); + die("strndup"); if (++argc == MAX_PROBE_ARGS) semantic_error("Too many arguments"); debug("argv[%d]=%s\n", argc, argv[argc - 1]); @@ -171,7 +160,7 @@ static int parse_probepoint(const struct option *opt __used, if (pp->nr_args > 0) { pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); if (!pp->args) - perror_exit("malloc"); + die("malloc"); memcpy(pp->args, &argv[2], sizeof(char *) * pp->nr_args); } @@ -260,7 +249,7 @@ static int write_new_event(int fd, const char *buf) printf("Adding new event: %s\n", buf); ret = write(fd, buf, strlen(buf)); if (ret <= 0) - perror("Error: Failed to create event"); + die("failed to create event."); return ret; } @@ -273,7 +262,7 @@ static int synthesize_probepoint(struct probe_point *pp) int i, len, ret; pp->probes[0] = buf = (char *)calloc(MAX_CMDLEN, sizeof(char)); if (!buf) - perror_exit("calloc"); + die("calloc"); ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); if (ret <= 0 || ret >= MAX_CMDLEN) goto error; @@ -322,10 +311,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) ret = synthesize_probepoint(&session.probes[j]); if (ret == -E2BIG) semantic_error("probe point is too long."); - else if (ret < 0) { - perror("snprintf"); - return -1; - } + else if (ret < 0) + die("snprintf"); } #ifndef NO_LIBDWARF @@ -336,10 +323,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) fd = open(session.vmlinux, O_RDONLY); else fd = open_default_vmlinux(); - if (fd < 0) { - perror("vmlinux/module file open"); - return -1; - } + if (fd < 0) + die("vmlinux/module file open"); /* Searching probe points */ for (j = 0; j < session.nr_probe; j++) { @@ -349,10 +334,8 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) lseek(fd, SEEK_SET, 0); ret = find_probepoint(fd, pp); - if (ret <= 0) { - fprintf(stderr, "Error: No probe point found.\n"); - return -1; - } + if (ret <= 0) + die("No probe point found.\n"); debug("probe event %s found\n", session.events[j]); } close(fd); @@ -363,10 +346,8 @@ setup_probes: /* Settng up probe points */ snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); fd = open(buf, O_WRONLY, O_APPEND); - if (fd < 0) { - perror("kprobe_events open"); - return -1; - } + if (fd < 0) + die("kprobe_events open"); for (j = 0; j < session.nr_probe; j++) { pp = &session.probes[j]; if (pp->found == 1) { diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index ec6f53f29e0..338fdb9e093 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -31,6 +31,8 @@ #include #include #include + +#include "util.h" #include "probe-finder.h" @@ -43,20 +45,6 @@ struct die_link { static Dwarf_Debug __dw_debug; static Dwarf_Error __dw_error; -static void msg_exit(int ret, const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - fprintf(stderr, "Error: "); - vfprintf(stderr, fmt, ap); - va_end(ap); - - fprintf(stderr, "\n"); - exit(ret); -} - - /* * Generic dwarf analysis helpers */ @@ -151,11 +139,11 @@ static Dwarf_Unsigned die_get_fileno(Dwarf_Die cu_die, const char *fname) } /* Compare diename and tname */ -static int die_compare_name(Dwarf_Die die, const char *tname) +static int die_compare_name(Dwarf_Die dw_die, const char *tname) { char *name; int ret; - ret = dwarf_diename(die, &name, &__dw_error); + ret = dwarf_diename(dw_die, &name, &__dw_error); ERR_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { ret = strcmp(tname, name); @@ -187,25 +175,25 @@ static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr, } /* Check the die is inlined function */ -static Dwarf_Bool die_inlined_subprogram(Dwarf_Die die) +static Dwarf_Bool die_inlined_subprogram(Dwarf_Die dw_die) { /* TODO: check strictly */ Dwarf_Bool inl; int ret; - ret = dwarf_hasattr(die, DW_AT_inline, &inl, &__dw_error); + ret = dwarf_hasattr(dw_die, DW_AT_inline, &inl, &__dw_error); ERR_IF(ret == DW_DLV_ERROR); return inl; } /* Get the offset of abstruct_origin */ -static Dwarf_Off die_get_abstract_origin(Dwarf_Die die) +static Dwarf_Off die_get_abstract_origin(Dwarf_Die dw_die) { Dwarf_Attribute attr; Dwarf_Off cu_offs; int ret; - ret = dwarf_attr(die, DW_AT_abstract_origin, &attr, &__dw_error); + ret = dwarf_attr(dw_die, DW_AT_abstract_origin, &attr, &__dw_error); ERR_IF(ret != DW_DLV_OK); ret = dwarf_formref(attr, &cu_offs, &__dw_error); ERR_IF(ret != DW_DLV_OK); @@ -214,7 +202,7 @@ static Dwarf_Off die_get_abstract_origin(Dwarf_Die die) } /* Get entry pc(or low pc, 1st entry of ranges) of the die */ -static Dwarf_Addr die_get_entrypc(Dwarf_Die die) +static Dwarf_Addr die_get_entrypc(Dwarf_Die dw_die) { Dwarf_Attribute attr; Dwarf_Addr addr; @@ -224,7 +212,7 @@ static Dwarf_Addr die_get_entrypc(Dwarf_Die die) int ret; /* Try to get entry pc */ - ret = dwarf_attr(die, DW_AT_entry_pc, &attr, &__dw_error); + ret = dwarf_attr(dw_die, DW_AT_entry_pc, &attr, &__dw_error); ERR_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { ret = dwarf_formaddr(attr, &addr, &__dw_error); @@ -234,13 +222,13 @@ static Dwarf_Addr die_get_entrypc(Dwarf_Die die) } /* Try to get low pc */ - ret = dwarf_lowpc(die, &addr, &__dw_error); + ret = dwarf_lowpc(dw_die, &addr, &__dw_error); ERR_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) return addr; /* Try to get ranges */ - ret = dwarf_attr(die, DW_AT_ranges, &attr, &__dw_error); + ret = dwarf_attr(dw_die, DW_AT_ranges, &attr, &__dw_error); ERR_IF(ret != DW_DLV_OK); ret = dwarf_formref(attr, &offs, &__dw_error); ERR_IF(ret != DW_DLV_OK); @@ -382,11 +370,11 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) } else if (op == DW_OP_regx) { regn = loc->lr_number; } else - msg_exit(-EINVAL, "Dwarf_OP %d is not supported.\n", op); + die("Dwarf_OP %d is not supported.\n", op); regs = get_arch_regstr(regn); if (!regs) - msg_exit(-EINVAL, "%lld exceeds max register number.\n", regn); + die("%lld exceeds max register number.\n", regn); if (deref) ret = snprintf(pf->buf, pf->len, @@ -417,8 +405,8 @@ static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); return ; error: - msg_exit(-1, "Failed to find the location of %s at this address.\n" - " Perhaps, it was optimized out.\n", pf->var); + die("Failed to find the location of %s at this address.\n" + " Perhaps, it has been optimized out.\n", pf->var); } static int variable_callback(struct die_link *dlink, void *data) @@ -456,8 +444,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) /* Search child die for local variables and parameters. */ ret = search_die_from_children(sp_die, variable_callback, pf); if (!ret) - msg_exit(-1, "Failed to find '%s' in this function.\n", - pf->var); + die("Failed to find '%s' in this function.\n", pf->var); } /* Get a frame base on the address */ @@ -568,8 +555,7 @@ static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) /* Search a real subprogram including this line, */ ret = search_die_from_children(cu_die, probeaddr_callback, pf); if (ret == 0) - msg_exit(-1, - "Probe point is not found in subprograms.\n"); + die("Probe point is not found in subprograms.\n"); /* Continuing, because target line might be inlined. */ } dwarf_srclines_dealloc(__dw_debug, lines, cnt); @@ -621,7 +607,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) !die_inlined_subprogram(lk->die)) goto found; } - msg_exit(-1, "Failed to find real subprogram.\n"); + die("Failed to find real subprogram.\n"); found: /* Get offset from subprogram */ ret = die_within_subprogram(lk->die, pf->addr, &offs); @@ -649,8 +635,7 @@ int find_probepoint(int fd, struct probe_point *pp) ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); if (ret != DW_DLV_OK) - msg_exit(-1, "Failed to call dwarf_init(). " - "Maybe, not a dwarf file?\n"); + die("Failed to call dwarf_init(). Maybe, not a dwarf file.\n"); pp->found = 0; while (++cu_number) { From 89c69c0eee7515cdc217f4278de43547284b3458 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:08:10 -0400 Subject: [PATCH 148/470] perf: Use eprintf() for debug messages in perf-probe Replace debug() macro with eprintf() and add -v option for showing those messages in perf-probe. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000810.16556.38013.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 23 ++++++++++++++--------- tools/perf/util/probe-finder.c | 12 +++++++----- tools/perf/util/probe-finder.h | 7 ------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index a1467d12547..b5ad86a265f 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -35,6 +35,8 @@ #include "perf.h" #include "builtin.h" #include "util/util.h" +#include "util/event.h" +#include "util/debug.h" #include "util/parse-options.h" #include "util/parse-events.h" /* For debugfs_path */ #include "util/probe-finder.h" @@ -76,7 +78,7 @@ static int parse_probepoint(const struct option *opt __used, if (!str) /* The end of probe points */ return 0; - debug("Probe-define(%d): %s\n", session.nr_probe, str); + eprintf("probe-definition(%d): %s\n", session.nr_probe, str); if (++session.nr_probe == MAX_PROBES) semantic_error("Too many probes"); @@ -101,7 +103,7 @@ static int parse_probepoint(const struct option *opt __used, die("strndup"); if (++argc == MAX_PROBE_ARGS) semantic_error("Too many arguments"); - debug("argv[%d]=%s\n", argc, argv[argc - 1]); + eprintf("argv[%d]=%s\n", argc, argv[argc - 1]); } } while (*str != '\0'); if (argc < 2) @@ -131,7 +133,7 @@ static int parse_probepoint(const struct option *opt __used, pp->line = atoi(ptr); if (!pp->file || !pp->line) semantic_error("Failed to parse line."); - debug("file:%s line:%d\n", pp->file, pp->line); + eprintf("file:%s line:%d\n", pp->file, pp->line); } else { /* Function name */ ptr = strchr(arg, '+'); @@ -148,7 +150,7 @@ static int parse_probepoint(const struct option *opt __used, pp->file = strdup(ptr); } pp->function = strdup(arg); - debug("symbol:%s file:%s offset:%d\n", + eprintf("symbol:%s file:%s offset:%d\n", pp->function, pp->file, pp->offset); } free(argv[1]); @@ -173,7 +175,7 @@ static int parse_probepoint(const struct option *opt __used, session.need_dwarf = 1; } - debug("%d arguments\n", pp->nr_args); + eprintf("%d arguments\n", pp->nr_args); return 0; } @@ -186,7 +188,7 @@ static int open_default_vmlinux(void) ret = uname(&uts); if (ret) { - debug("uname() failed.\n"); + eprintf("uname() failed.\n"); return -errno; } session.release = uts.release; @@ -194,11 +196,12 @@ static int open_default_vmlinux(void) ret = snprintf(fname, MAX_PATH_LEN, default_search_path[i], session.release); if (ret >= MAX_PATH_LEN || ret < 0) { - debug("Filename(%d,%s) is too long.\n", i, uts.release); + eprintf("Filename(%d,%s) is too long.\n", i, + uts.release); errno = E2BIG; return -E2BIG; } - debug("try to open %s\n", fname); + eprintf("try to open %s\n", fname); fd = open(fname, O_RDONLY); if (fd >= 0) break; @@ -213,6 +216,8 @@ static const char * const probe_usage[] = { }; static const struct option options[] = { + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show parsed arguments, etc)"), #ifndef NO_LIBDWARF OPT_STRING('k', "vmlinux", &session.vmlinux, "file", "vmlinux/module pathname"), @@ -336,7 +341,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) ret = find_probepoint(fd, pp); if (ret <= 0) die("No probe point found.\n"); - debug("probe event %s found\n", session.events[j]); + eprintf("probe event %s found\n", session.events[j]); } close(fd); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 338fdb9e093..db24c913225 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -32,6 +32,8 @@ #include #include +#include "event.h" +#include "debug.h" #include "util.h" #include "probe-finder.h" @@ -134,7 +136,7 @@ static Dwarf_Unsigned die_get_fileno(Dwarf_Die cu_die, const char *fname) dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST); } if (found) - debug("found fno: %d\n", (int)found); + eprintf("found fno: %d\n", (int)found); return found; } @@ -440,7 +442,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) return ; } - debug("Searching '%s' variable in context.\n", pf->var); + eprintf("Searching '%s' variable in context.\n", pf->var); /* Search child die for local variables and parameters. */ ret = search_die_from_children(sp_die, variable_callback, pf); if (!ret) @@ -550,7 +552,7 @@ static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); ERR_IF(ret != DW_DLV_OK); - debug("Probe point found: 0x%llx\n", addr); + eprintf("Probe point found: 0x%llx\n", addr); pf->addr = addr; /* Search a real subprogram including this line, */ ret = search_die_from_children(cu_die, probeaddr_callback, pf); @@ -581,7 +583,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) &pf->inl_offs, &__dw_error); ERR_IF(ret != DW_DLV_OK); - debug("inline definition offset %lld\n", + eprintf("inline definition offset %lld\n", pf->inl_offs); return 0; } @@ -597,7 +599,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) /* Get probe address */ pf->addr = die_get_entrypc(dlink->die); pf->addr += pp->offset; - debug("found inline addr: 0x%llx\n", pf->addr); + eprintf("found inline addr: 0x%llx\n", pf->addr); /* Inlined function. Get a real subprogram */ for (lk = dlink->parent; lk != NULL; lk = lk->parent) { tag = 0; diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 306810c32f6..6a7cb0c04e9 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -4,13 +4,6 @@ #define _stringify(n) #n #define stringify(n) _stringify(n) -#ifdef DEBUG -#define debug(fmt ...) \ - fprintf(stderr, "DBG(" __FILE__ ":" stringify(__LINE__) "): " fmt) -#else -#define debug(fmt ...) do {} while (0) -#endif - #define ERR_IF(cnd) \ do { if (cnd) { \ fprintf(stderr, "Error (" __FILE__ ":" stringify(__LINE__) \ From 9769833b8e4425dc93fc837bf124c6cb02a51abb Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:08:18 -0400 Subject: [PATCH 149/470] perf: Add DIE_IF() macro for error checking Add DIE_IF() macro and replace ERR_IF() with it, and use linux/stringify.h. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000818.16556.82452.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/probe-finder.c | 82 +++++++++++++++++----------------- tools/perf/util/probe-finder.h | 10 ----- tools/perf/util/util.h | 9 ++++ 4 files changed, 51 insertions(+), 51 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 03c27b9068a..1abbf9a5ee5 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -321,6 +321,7 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h +LIB_H += ../../include/linux/stringify.h LIB_H += util/include/linux/list.h LIB_H += perf.h LIB_H += util/types.h diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index db24c913225..be997abdf5b 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -146,7 +146,7 @@ static int die_compare_name(Dwarf_Die dw_die, const char *tname) char *name; int ret; ret = dwarf_diename(dw_die, &name, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { ret = strcmp(tname, name); dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); @@ -164,11 +164,11 @@ static int die_within_subprogram(Dwarf_Die sp_die, Dwarf_Addr addr, /* TODO: check ranges */ ret = dwarf_lowpc(sp_die, &lopc, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_NO_ENTRY) return 0; ret = dwarf_highpc(sp_die, &hipc, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); if (lopc <= addr && addr < hipc) { *offs = addr - lopc; return 1; @@ -184,7 +184,7 @@ static Dwarf_Bool die_inlined_subprogram(Dwarf_Die dw_die) int ret; ret = dwarf_hasattr(dw_die, DW_AT_inline, &inl, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); return inl; } @@ -196,9 +196,9 @@ static Dwarf_Off die_get_abstract_origin(Dwarf_Die dw_die) int ret; ret = dwarf_attr(dw_die, DW_AT_abstract_origin, &attr, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); ret = dwarf_formref(attr, &cu_offs, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); return cu_offs; } @@ -215,28 +215,28 @@ static Dwarf_Addr die_get_entrypc(Dwarf_Die dw_die) /* Try to get entry pc */ ret = dwarf_attr(dw_die, DW_AT_entry_pc, &attr, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { ret = dwarf_formaddr(attr, &addr, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); return addr; } /* Try to get low pc */ ret = dwarf_lowpc(dw_die, &addr, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) return addr; /* Try to get ranges */ ret = dwarf_attr(dw_die, DW_AT_ranges, &attr, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); ret = dwarf_formref(attr, &offs, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); ret = dwarf_get_ranges(__dw_debug, offs, &ranges, &cnt, NULL, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); addr = ranges[0].dwr_addr1; dwarf_ranges_dealloc(__dw_debug, ranges, cnt); return addr; @@ -261,7 +261,7 @@ static int __search_die_tree(struct die_link *cur_link, while (!(ret = die_cb(cur_link, data))) { /* Check child die */ ret = dwarf_child(cur_link->die, &new_die, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { new_link.parent = cur_link; new_link.die = new_die; @@ -273,7 +273,7 @@ static int __search_die_tree(struct die_link *cur_link, /* Move to next sibling */ ret = dwarf_siblingof(__dw_debug, cur_link->die, &new_die, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); dwarf_dealloc(__dw_debug, cur_link->die, DW_DLA_DIE); cur_link->die = new_die; if (ret == DW_DLV_NO_ENTRY) @@ -293,7 +293,7 @@ static int search_die_from_children(Dwarf_Die parent_die, new_link.parent = NULL; ret = dwarf_child(parent_die, &new_link.die, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) return __search_die_tree(&new_link, die_cb, data); else @@ -309,7 +309,7 @@ static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, int ret, i; ret = dwarf_loclist_n(attr, &llbuf, &lcnt, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); ret = DW_DLV_NO_ENTRY; for (i = 0; i < lcnt; ++i) { if (llbuf[i]->ld_lopc <= addr && @@ -317,7 +317,7 @@ static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, memcpy(desc, llbuf[i], sizeof(Dwarf_Locdesc)); desc->ld_s = malloc(sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); - ERR_IF(desc->ld_s == NULL); + DIE_IF(desc->ld_s == NULL); memcpy(desc->ld_s, llbuf[i]->ld_s, sizeof(Dwarf_Loc) * llbuf[i]->ld_cents); ret = DW_DLV_OK; @@ -383,8 +383,8 @@ static void show_location(Dwarf_Loc *loc, struct probe_finder *pf) " %s=%+lld(%s)", pf->var, offs, regs); else ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs); - ERR_IF(ret < 0); - ERR_IF(ret >= pf->len); + DIE_IF(ret < 0); + DIE_IF(ret >= pf->len); } /* Show a variables in kprobe event format */ @@ -401,7 +401,7 @@ static void show_variable(Dwarf_Die vr_die, struct probe_finder *pf) if (ret != DW_DLV_OK) goto error; /* TODO? */ - ERR_IF(ld.ld_cents != 1); + DIE_IF(ld.ld_cents != 1); show_location(&ld.ld_s[0], pf); free(ld.ld_s); dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); @@ -418,7 +418,7 @@ static int variable_callback(struct die_link *dlink, void *data) int ret; ret = dwarf_tag(dlink->die, &tag, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && (die_compare_name(dlink->die, pf->var) == 0)) { @@ -437,8 +437,8 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) if (!is_c_varname(pf->var)) { /* Output raw parameters */ ret = snprintf(pf->buf, pf->len, " %s", pf->var); - ERR_IF(ret < 0); - ERR_IF(ret >= pf->len); + DIE_IF(ret < 0); + DIE_IF(ret >= pf->len); return ; } @@ -456,9 +456,9 @@ static void get_current_frame_base(Dwarf_Die sp_die, struct probe_finder *pf) int ret; ret = dwarf_attr(sp_die, DW_AT_frame_base, &attr, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); ret = attr_get_locdesc(attr, &pf->fbloc, (pf->addr - pf->cu_base)); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); } @@ -479,7 +479,7 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, /* Output name of probe point */ ret = dwarf_diename(sp_die, &name, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_OK) { ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name, (unsigned int)offs); @@ -488,8 +488,8 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, /* This function has no name. */ ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr); } - ERR_IF(ret < 0); - ERR_IF(ret >= MAX_PROBE_BUFFER); + DIE_IF(ret < 0); + DIE_IF(ret >= MAX_PROBE_BUFFER); len = ret; /* Find each argument */ @@ -515,7 +515,7 @@ static int probeaddr_callback(struct die_link *dlink, void *data) int ret; ret = dwarf_tag(dlink->die, &tag, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); /* Check the address is in this subprogram */ if (tag == DW_TAG_subprogram && die_within_subprogram(dlink->die, pf->addr, &offs)) { @@ -537,21 +537,21 @@ static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) int ret; ret = dwarf_srclines(cu_die, &lines, &cnt, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); for (i = 0; i < cnt; i++) { ret = dwarf_line_srcfileno(lines[i], &fno, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); if (fno != pf->fno) continue; ret = dwarf_lineno(lines[i], &lineno, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); if (lineno != (Dwarf_Unsigned)pp->line) continue; ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); eprintf("Probe point found: 0x%llx\n", addr); pf->addr = addr; /* Search a real subprogram including this line, */ @@ -574,7 +574,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) int ret; ret = dwarf_tag(dlink->die, &tag, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (tag == DW_TAG_subprogram) { if (die_compare_name(dlink->die, pp->function) == 0) { if (die_inlined_subprogram(dlink->die)) { @@ -582,7 +582,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) ret = dwarf_die_CU_offset(dlink->die, &pf->inl_offs, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); eprintf("inline definition offset %lld\n", pf->inl_offs); return 0; @@ -604,7 +604,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) for (lk = dlink->parent; lk != NULL; lk = lk->parent) { tag = 0; dwarf_tag(lk->die, &tag, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (tag == DW_TAG_subprogram && !die_inlined_subprogram(lk->die)) goto found; @@ -613,7 +613,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) found: /* Get offset from subprogram */ ret = die_within_subprogram(lk->die, pf->addr, &offs); - ERR_IF(!ret); + DIE_IF(!ret); show_probepoint(lk->die, offs, pf); /* Continue to search */ } @@ -644,13 +644,13 @@ int find_probepoint(int fd, struct probe_point *pp) /* Search CU (Compilation Unit) */ ret = dwarf_next_cu_header(__dw_debug, NULL, NULL, NULL, &addr_size, &next_cuh, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_NO_ENTRY) break; /* Get the DIE(Debugging Information Entry) of this CU */ ret = dwarf_siblingof(__dw_debug, 0, &cu_die, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); /* Check if target file is included. */ if (pp->file) @@ -659,7 +659,7 @@ int find_probepoint(int fd, struct probe_point *pp) if (!pp->file || pf.fno) { /* Save CU base address (for frame_base) */ ret = dwarf_lowpc(cu_die, &pf.cu_base, &__dw_error); - ERR_IF(ret == DW_DLV_ERROR); + DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_NO_ENTRY) pf.cu_base = 0; if (pp->line) @@ -670,7 +670,7 @@ int find_probepoint(int fd, struct probe_point *pp) dwarf_dealloc(__dw_debug, cu_die, DW_DLA_DIE); } ret = dwarf_finish(__dw_debug, &__dw_error); - ERR_IF(ret != DW_DLV_OK); + DIE_IF(ret != DW_DLV_OK); return pp->found; } diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 6a7cb0c04e9..d17fafc2135 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -1,16 +1,6 @@ #ifndef _PROBE_FINDER_H #define _PROBE_FINDER_H -#define _stringify(n) #n -#define stringify(n) _stringify(n) - -#define ERR_IF(cnd) \ - do { if (cnd) { \ - fprintf(stderr, "Error (" __FILE__ ":" stringify(__LINE__) \ - "): " stringify(cnd) "\n"); \ - exit(1); \ - } } while (0) - #define MAX_PATH_LEN 256 #define MAX_PROBE_BUFFER 1024 #define MAX_PROBES 128 diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 9de2329dd44..0daa341734f 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -134,6 +134,15 @@ extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, extern int error(const char *err, ...) __attribute__((format (printf, 1, 2))); extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2))); +#include "../../../include/linux/stringify.h" + +#define DIE_IF(cnd) \ + do { if (cnd) \ + die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \ + __stringify(cnd) "\n"); \ + } while (0) + + extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); extern int prefixcmp(const char *str, const char *prefix); From 595c36490deb49381dc51231a3d5e6b66786ed27 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 16 Oct 2009 20:08:27 -0400 Subject: [PATCH 150/470] perf: Add perf-probe document Add perf-probe subcommand document and add it to command-list. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <20091017000827.16556.73539.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-probe.txt | 48 +++++++++++++++++++++++++ tools/perf/command-list.txt | 1 + 2 files changed, 49 insertions(+) create mode 100644 tools/perf/Documentation/perf-probe.txt diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt new file mode 100644 index 00000000000..6b6c6aecdf1 --- /dev/null +++ b/tools/perf/Documentation/perf-probe.txt @@ -0,0 +1,48 @@ +perf-probe(1) +============= + +NAME +---- +perf-probe - Define new dynamic tracepoints + +SYNOPSIS +-------- +[verse] +'perf probe' [-k ] -P 'PROBE' [-P 'PROBE' ...] + + +DESCRIPTION +----------- +This command defines dynamic tracepoint events, by symbol and registers +without debuginfo, or by C expressions (C line numbers, C function names, +and C local variables) with debuginfo. + + +OPTIONS +------- +-k:: +--vmlinux:: + Specify vmlinux path which has debuginfo (Dwarf binary). + +-v:: +--verbose:: + Be more verbose (show parsed arguments, etc). + +-P:: +--probe:: + Define a probe point (see PROBE SYNTAX for detail) + +PROBE SYNTAX +------------ +Probe points are defined by following syntax. + + "TYPE:[GRP/]NAME FUNC[+OFFS][@SRC]|@SRC:LINE [ARG ...]" + +'TYPE' specifies the type of probe point, you can use either "p" (kprobe) or "r" (kretprobe) for 'TYPE'. 'GRP' specifies the group name of this probe, and 'NAME' specifies the event name. If 'GRP' is omitted, "kprobes" is used for its group name. +'FUNC' and 'OFFS' specifies function and offset (in byte) where probe will be put. In addition, 'SRC' specifies a source file which has that function (this is mainly for inline functions). +You can specify a probe point by the source line number by using '@SRC:LINE' syntax, where 'SRC' is the source file path and 'LINE' is the line number. +'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). + +SEE ALSO +-------- +linkperf:perf-trace[1], linkperf:perf-record[1] diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 00326e230d8..6475db4f194 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -11,3 +11,4 @@ perf-stat mainporcelain common perf-timechart mainporcelain common perf-top mainporcelain common perf-trace mainporcelain common +perf-probe mainporcelain common From 11018201b831e19304c0d639f105ad6c27e120b1 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 22:29:23 +1100 Subject: [PATCH 151/470] perf stat: Add branch performance metric When we count both branches and branch-misses it is useful to print out the percentage of branch-misses: # perf stat -e branches -e branch-misses /bin/true Performance counter stats for '/bin/true': 401684 branches # 0.000 M/sec 23301 branch-misses # 5.801 % Signed-off-by: Anton Blanchard Cc: paulus@samba.org Cc: a.p.zijlstra@chello.nl LKML-Reference: <20091018112923.GQ4808@kryten> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3db31e7bf17..c37368343ff 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -125,6 +125,7 @@ struct stats event_res_stats[MAX_COUNTERS][3]; struct stats runtime_nsecs_stats; struct stats walltime_nsecs_stats; struct stats runtime_cycles_stats; +struct stats runtime_branches_stats; #define MATCH_EVENT(t, c, counter) \ (attrs[counter].type == PERF_TYPE_##t && \ @@ -235,6 +236,8 @@ static void read_counter(int counter) update_stats(&runtime_nsecs_stats, count[0]); if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) update_stats(&runtime_cycles_stats, count[0]); + if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) + update_stats(&runtime_branches_stats, count[0]); } static int run_perf_stat(int argc __used, const char **argv) @@ -352,6 +355,14 @@ static void abs_printout(int counter, double avg) ratio = avg / total; fprintf(stderr, " # %10.3f IPC ", ratio); + } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) { + total = avg_stats(&runtime_branches_stats); + + if (total) + ratio = avg * 100 / total; + + fprintf(stderr, " # %10.3f %% ", ratio); + } else { total = avg_stats(&runtime_nsecs_stats); From 5a116dd2797677cad48fee2f42267e3cb69f5502 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:12:33 +0200 Subject: [PATCH 152/470] perf tools: Use kernel bitmap library Use the kernel bitmap library for internal perf tools uses. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255792354-11304-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 16 +++++++ tools/perf/builtin-record.c | 3 -- tools/perf/builtin-sched.c | 2 - tools/perf/util/include/asm/bitops.h | 6 +++ tools/perf/util/include/asm/byteorder.h | 2 + tools/perf/util/include/asm/swab.h | 1 + tools/perf/util/include/asm/types.h | 17 +++++++ tools/perf/util/include/asm/uaccess.h | 14 ++++++ tools/perf/util/include/linux/bitmap.h | 2 + tools/perf/util/include/linux/bitops.h | 27 +++++++++++ tools/perf/util/include/linux/compiler.h | 10 ++++ tools/perf/util/include/linux/ctype.h | 1 + tools/perf/util/include/linux/kernel.h | 59 ++++++++++++++++++++++++ tools/perf/util/include/linux/types.h | 1 + 14 files changed, 156 insertions(+), 5 deletions(-) create mode 100644 tools/perf/util/include/asm/bitops.h create mode 100644 tools/perf/util/include/asm/byteorder.h create mode 100644 tools/perf/util/include/asm/swab.h create mode 100644 tools/perf/util/include/asm/types.h create mode 100644 tools/perf/util/include/asm/uaccess.h create mode 100644 tools/perf/util/include/linux/bitmap.h create mode 100644 tools/perf/util/include/linux/bitops.h create mode 100644 tools/perf/util/include/linux/compiler.h create mode 100644 tools/perf/util/include/linux/ctype.h create mode 100644 tools/perf/util/include/linux/types.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 106c15055b5..2400e5068a2 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -363,6 +363,9 @@ LIB_OBJS += util/parse-options.o LIB_OBJS += util/parse-events.o LIB_OBJS += util/path.o LIB_OBJS += util/rbtree.o +LIB_OBJS += util/bitmap.o +LIB_OBJS += util/hweight.o +LIB_OBJS += util/find_next_bit.o LIB_OBJS += util/run-command.o LIB_OBJS += util/quote.o LIB_OBJS += util/strbuf.o @@ -790,6 +793,19 @@ util/config.o: util/config.c PERF-CFLAGS util/rbtree.o: ../../lib/rbtree.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/rbtree.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +# some perf warning policies can't fit to lib/bitmap.c, eg: it warns about variable shadowing +# from that comes from kernel headers wrapping. +KBITMAP_FLAGS=`echo $(ALL_CFLAGS) | sed s/-Wshadow// | sed s/-Wswitch-default// | sed s/-Wextra//` + +util/bitmap.o: ../../lib/bitmap.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/bitmap.o -c $(KBITMAP_FLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/hweight.o: ../../lib/hweight.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/hweight.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + +util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8b2c860c49a..fc3709cba13 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -21,9 +21,6 @@ #include #include -#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) - static int fd[MAX_NR_CPUS][MAX_COUNTERS]; static long default_interval = 0; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 57ad3f458ef..807ca66e7a8 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -38,8 +38,6 @@ static int cwdlen; #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 -#define BUG_ON(x) assert(!(x)) - static u64 run_measurement_overhead; static u64 sleep_measurement_overhead; diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h new file mode 100644 index 00000000000..fbe4d921291 --- /dev/null +++ b/tools/perf/util/include/asm/bitops.h @@ -0,0 +1,6 @@ +#include "../../../../include/asm-generic/bitops/__fls.h" +#include "../../../../include/asm-generic/bitops/fls.h" +#include "../../../../include/asm-generic/bitops/fls64.h" +#include "../../../../include/asm-generic/bitops/__ffs.h" +#include "../../../../include/asm-generic/bitops/ffz.h" +#include "../../../../include/asm-generic/bitops/hweight.h" diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h new file mode 100644 index 00000000000..39f367cfaf5 --- /dev/null +++ b/tools/perf/util/include/asm/byteorder.h @@ -0,0 +1,2 @@ +#include "../asm/types.h" +#include "../../../../include/linux/swab.h" diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h new file mode 100644 index 00000000000..ed538942523 --- /dev/null +++ b/tools/perf/util/include/asm/swab.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/asm/types.h b/tools/perf/util/include/asm/types.h new file mode 100644 index 00000000000..06703c6cd50 --- /dev/null +++ b/tools/perf/util/include/asm/types.h @@ -0,0 +1,17 @@ +#ifndef PERF_ASM_TYPES_H_ +#define PERF_ASM_TYPES_H_ + +#include +#include "../../types.h" +#include + +/* CHECKME: Not sure both always match */ +#define BITS_PER_LONG __WORDSIZE + +typedef u64 __u64; +typedef u32 __u32; +typedef u16 __u16; +typedef u8 __u8; +typedef s64 __s64; + +#endif /* PERF_ASM_TYPES_H_ */ diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h new file mode 100644 index 00000000000..d0f72b8fcc3 --- /dev/null +++ b/tools/perf/util/include/asm/uaccess.h @@ -0,0 +1,14 @@ +#ifndef _PERF_ASM_UACCESS_H_ +#define _PERF_ASM_UACCESS_H_ + +#define __get_user(src, dest) \ +({ \ + (src) = *dest; \ + 0; \ +}) + +#define get_user __get_user + +#define access_ok(type, addr, size) 1 + +#endif diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h new file mode 100644 index 00000000000..821c1033bcc --- /dev/null +++ b/tools/perf/util/include/linux/bitmap.h @@ -0,0 +1,2 @@ +#include "../../../../include/linux/bitmap.h" +#include "../../../../include/asm-generic/bitops/find.h" diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h new file mode 100644 index 00000000000..ace57c36d1d --- /dev/null +++ b/tools/perf/util/include/linux/bitops.h @@ -0,0 +1,27 @@ +#ifndef _PERF_LINUX_BITOPS_H_ +#define _PERF_LINUX_BITOPS_H_ + +#define __KERNEL__ + +#define CONFIG_GENERIC_FIND_NEXT_BIT +#define CONFIG_GENERIC_FIND_FIRST_BIT +#include "../../../../include/linux/bitops.h" + +static inline void set_bit(int nr, unsigned long *addr) +{ + addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); +} + +static __always_inline int test_bit(unsigned int nr, const unsigned long *addr) +{ + return ((1UL << (nr % BITS_PER_LONG)) & + (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; +} + +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset); + +#endif diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h new file mode 100644 index 00000000000..dfb0713ed47 --- /dev/null +++ b/tools/perf/util/include/linux/compiler.h @@ -0,0 +1,10 @@ +#ifndef _PERF_LINUX_COMPILER_H_ +#define _PERF_LINUX_COMPILER_H_ + +#ifndef __always_inline +#define __always_inline inline +#endif +#define __user +#define __attribute_const__ + +#endif diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h new file mode 100644 index 00000000000..bae5783282e --- /dev/null +++ b/tools/perf/util/include/linux/ctype.h @@ -0,0 +1 @@ +#include "../../../../include/linux/ctype.h" diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index a6b87390cb5..4b9204d9b26 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -1,6 +1,16 @@ #ifndef PERF_LINUX_KERNEL_H_ #define PERF_LINUX_KERNEL_H_ +#include +#include +#include +#include + +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) + #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif @@ -26,4 +36,53 @@ _max1 > _max2 ? _max1 : _max2; }) #endif +#ifndef min +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) +#endif + +#ifndef BUG_ON +#define BUG_ON(cond) assert(!(cond)) +#endif + +/* + * Both need more care to handle endianness + * (Don't use bitmap_copy_le() for now) + */ +#define cpu_to_le64(x) (x) +#define cpu_to_le32(x) (x) + +static inline int +vscnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int i; + ssize_t ssize = size; + + i = vsnprintf(buf, size, fmt, args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline int scnprintf(char * buf, size_t size, const char * fmt, ...) +{ + va_list args; + ssize_t ssize = size; + int i; + + va_start(args, fmt); + i = vsnprintf(buf, size, fmt, args); + va_end(args); + + return (i >= ssize) ? (ssize - 1) : i; +} + +static inline unsigned long +simple_strtoul(const char *nptr, char **endptr, int base) +{ + return strtoul(nptr, endptr, base); +} + #endif diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h new file mode 100644 index 00000000000..ed538942523 --- /dev/null +++ b/tools/perf/util/include/linux/types.h @@ -0,0 +1 @@ +/* stub */ From 2ba0825075e76236d22a20decd8e2346a99faabe Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:12:34 +0200 Subject: [PATCH 153/470] perf tools: Introduce bitmask'ed additional headers This provides a new set of bitmasked headers. A new field is added in the perf headers that implements a bitmap storing optional features present in the perf.data file. The layout can be pictured like this: (Usual perf headers)(Features bitmap)[Feature 0][Feature n][Feature 255] If the bit n is set, then the feature n is used in this file. They are all set in order. This brings a backward and forward compatibility. The trace_info section has moved into such optional features, this is the first and only one for now. This is backward compatible with the .32 file version although it doesn't support the previous separate trace.info file. And finally it doesn't support the current interim development version. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255792354-11304-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 4 +- tools/perf/util/header.c | 100 ++++++++++++++++++++---------------- tools/perf/util/header.h | 30 +++++++---- 3 files changed, 78 insertions(+), 56 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index fc3709cba13..f0467ff0d8a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -574,11 +574,11 @@ static int __cmd_record(int argc, const char **argv) header = perf_header__new(); if (raw_samples) { - perf_header__set_trace_info(); + perf_header__feat_trace_info(header); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - perf_header__set_trace_info(); + perf_header__feat_trace_info(header); break; } } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 9aae360c0f2..171d51b6f35 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -8,6 +8,8 @@ #include "../perf.h" #include "trace-event.h" +#include + /* * Create new perf.data header attribute: */ @@ -48,25 +50,17 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) */ struct perf_header *perf_header__new(void) { - struct perf_header *self = malloc(sizeof(*self)); + struct perf_header *self = calloc(sizeof(*self), 1); if (!self) die("nomem"); - self->frozen = 0; - - self->attrs = 0; self->size = 1; self->attr = malloc(sizeof(void *)); if (!self->attr) die("nomem"); - self->data_offset = 0; - self->data_size = 0; - self->trace_info_offset = 0; - self->trace_info_size = 0; - return self; } @@ -149,14 +143,12 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; - struct perf_file_section trace_info; + feat_mask_t adds_features; }; -static int trace_info; - -void perf_header__set_trace_info(void) +void perf_header__feat_trace_info(struct perf_header *header) { - trace_info = 1; + set_bit(HEADER_TRACE_INFO, perf_header__adds_mask(header)); } static void do_write(int fd, void *buf, size_t size) @@ -172,6 +164,32 @@ static void do_write(int fd, void *buf, size_t size) } } +static void perf_header__adds_write(struct perf_header *self, int fd) +{ + struct perf_file_section trace_sec; + u64 cur_offset = lseek(fd, 0, SEEK_CUR); + unsigned long *feat_mask = perf_header__adds_mask(self); + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + /* Write trace info */ + trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR); + read_tracing_data(fd, attrs, nr_counters); + trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset; + + /* Write trace info headers */ + lseek(fd, cur_offset, SEEK_SET); + do_write(fd, &trace_sec, sizeof(trace_sec)); + + /* + * Update cur_offset. So that other (future) + * features can set their own infos in this place. But if we are + * the only feature, at least that seeks to the place the data + * should begin. + */ + cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + void perf_header__write(struct perf_header *self, int fd) { struct perf_file_header f_header; @@ -210,23 +228,7 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); - if (trace_info) { - static int trace_info_written; - - /* - * Write it only once - */ - if (!trace_info_written) { - self->trace_info_offset = lseek(fd, 0, SEEK_CUR); - read_tracing_data(fd, attrs, nr_counters); - self->trace_info_size = lseek(fd, 0, SEEK_CUR) - - self->trace_info_offset; - trace_info_written = 1; - } else { - lseek(fd, self->trace_info_offset + - self->trace_info_size, SEEK_SET); - } - } + perf_header__adds_write(self, fd); self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -246,12 +248,10 @@ void perf_header__write(struct perf_header *self, int fd) .offset = self->event_offset, .size = self->event_size, }, - .trace_info = { - .offset = self->trace_info_offset, - .size = self->trace_info_size, - }, }; + memcpy(&f_header.adds_features, &self->adds_features, sizeof(feat_mask_t)); + lseek(fd, 0, SEEK_SET); do_write(fd, &f_header, sizeof(f_header)); lseek(fd, self->data_offset + self->data_size, SEEK_SET); @@ -274,6 +274,20 @@ static void do_read(int fd, void *buf, size_t size) } } +static void perf_header__adds_read(struct perf_header *self, int fd) +{ + const unsigned long *feat_mask = perf_header__adds_mask(self); + + if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + struct perf_file_section trace_sec; + + do_read(fd, &trace_sec, sizeof(trace_sec)); + lseek(fd, trace_sec.offset, SEEK_SET); + trace_report(fd); + lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + } +}; + struct perf_header *perf_header__read(int fd) { struct perf_header *self = perf_header__new(); @@ -292,9 +306,11 @@ struct perf_header *perf_header__read(int fd) if (f_header.size != sizeof(f_header)) { /* Support the previous format */ - if (f_header.size == offsetof(typeof(f_header), trace_info)) - f_header.trace_info.size = 0; - else + if (f_header.size == offsetof(typeof(f_header), adds_features)) { + unsigned long *mask = (unsigned long *)(void *) + &f_header.adds_features; + bitmap_zero(mask, HEADER_FEAT_BITS); + } else die("incompatible file format"); } nr_attrs = f_header.attrs.size / sizeof(f_attr); @@ -330,13 +346,9 @@ struct perf_header *perf_header__read(int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - self->trace_info_offset = f_header.trace_info.offset; - self->trace_info_size = f_header.trace_info.size; + memcpy(&self->adds_features, &f_header.adds_features, sizeof(feat_mask_t)); - if (self->trace_info_size) { - lseek(fd, self->trace_info_offset, SEEK_SET); - trace_report(fd); - } + perf_header__adds_read(self, fd); self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 30aee5160dc..0eb4a9126b7 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -12,19 +12,29 @@ struct perf_header_attr { off_t id_offset; }; +#define HEADER_TRACE_INFO 1 + +#define HEADER_FEAT_BITS 256 + +typedef typeof(u64[HEADER_FEAT_BITS / 8]) feat_mask_t; + struct perf_header { - int frozen; - int attrs, size; + int frozen; + int attrs, size; struct perf_header_attr **attr; - s64 attr_offset; - u64 data_offset; - u64 data_size; - u64 event_offset; - u64 event_size; - u64 trace_info_offset; - u64 trace_info_size; + s64 attr_offset; + u64 data_offset; + u64 data_size; + u64 event_offset; + u64 event_size; + feat_mask_t adds_features; }; +static inline unsigned long *perf_header__adds_mask(struct perf_header *self) +{ + return (unsigned long *)(void *)&self->adds_features; +} + struct perf_header *perf_header__read(int fd); void perf_header__write(struct perf_header *self, int fd); @@ -42,7 +52,7 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); -void perf_header__set_trace_info(void); +void perf_header__feat_trace_info(struct perf_header *header); struct perf_header *perf_header__new(void); From db9f11e36d0125a5e3e595ea9ef2e4b89f7e8737 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 17 Oct 2009 17:57:18 +0200 Subject: [PATCH 154/470] perf tools: Use DECLARE_BITMAP instead of an open-coded array Use DECLARE_BITMAP instead of an open coded array for our bitmap of featured sections. This makes the array an unsigned long instead of a u64 but since we use a 256 bits bitmap, the array size shouldn't vary between different boxes. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt LKML-Reference: <1255795038-13751-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 22 +++++++++------------- tools/perf/util/header.h | 11 +++-------- tools/perf/util/include/asm/asm-offsets.h | 1 + tools/perf/util/include/linux/types.h | 8 +++++++- 4 files changed, 20 insertions(+), 22 deletions(-) create mode 100644 tools/perf/util/include/asm/asm-offsets.h diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 171d51b6f35..622c60e4525 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -8,8 +8,6 @@ #include "../perf.h" #include "trace-event.h" -#include - /* * Create new perf.data header attribute: */ @@ -143,12 +141,12 @@ struct perf_file_header { struct perf_file_section attrs; struct perf_file_section data; struct perf_file_section event_types; - feat_mask_t adds_features; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; void perf_header__feat_trace_info(struct perf_header *header) { - set_bit(HEADER_TRACE_INFO, perf_header__adds_mask(header)); + set_bit(HEADER_TRACE_INFO, header->adds_features); } static void do_write(int fd, void *buf, size_t size) @@ -168,7 +166,7 @@ static void perf_header__adds_write(struct perf_header *self, int fd) { struct perf_file_section trace_sec; u64 cur_offset = lseek(fd, 0, SEEK_CUR); - unsigned long *feat_mask = perf_header__adds_mask(self); + unsigned long *feat_mask = self->adds_features; if (test_bit(HEADER_TRACE_INFO, feat_mask)) { /* Write trace info */ @@ -250,7 +248,7 @@ void perf_header__write(struct perf_header *self, int fd) }, }; - memcpy(&f_header.adds_features, &self->adds_features, sizeof(feat_mask_t)); + memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); lseek(fd, 0, SEEK_SET); do_write(fd, &f_header, sizeof(f_header)); @@ -276,7 +274,7 @@ static void do_read(int fd, void *buf, size_t size) static void perf_header__adds_read(struct perf_header *self, int fd) { - const unsigned long *feat_mask = perf_header__adds_mask(self); + const unsigned long *feat_mask = self->adds_features; if (test_bit(HEADER_TRACE_INFO, feat_mask)) { struct perf_file_section trace_sec; @@ -306,11 +304,9 @@ struct perf_header *perf_header__read(int fd) if (f_header.size != sizeof(f_header)) { /* Support the previous format */ - if (f_header.size == offsetof(typeof(f_header), adds_features)) { - unsigned long *mask = (unsigned long *)(void *) - &f_header.adds_features; - bitmap_zero(mask, HEADER_FEAT_BITS); - } else + if (f_header.size == offsetof(typeof(f_header), adds_features)) + bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS); + else die("incompatible file format"); } nr_attrs = f_header.attrs.size / sizeof(f_attr); @@ -346,7 +342,7 @@ struct perf_header *perf_header__read(int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - memcpy(&self->adds_features, &f_header.adds_features, sizeof(feat_mask_t)); + memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features)); perf_header__adds_read(self, fd); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 0eb4a9126b7..2ea9dfb1236 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -5,6 +5,8 @@ #include #include "types.h" +#include + struct perf_header_attr { struct perf_event_attr attr; int ids, size; @@ -16,8 +18,6 @@ struct perf_header_attr { #define HEADER_FEAT_BITS 256 -typedef typeof(u64[HEADER_FEAT_BITS / 8]) feat_mask_t; - struct perf_header { int frozen; int attrs, size; @@ -27,14 +27,9 @@ struct perf_header { u64 data_size; u64 event_offset; u64 event_size; - feat_mask_t adds_features; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; -static inline unsigned long *perf_header__adds_mask(struct perf_header *self) -{ - return (unsigned long *)(void *)&self->adds_features; -} - struct perf_header *perf_header__read(int fd); void perf_header__write(struct perf_header *self, int fd); diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h new file mode 100644 index 00000000000..ed538942523 --- /dev/null +++ b/tools/perf/util/include/asm/asm-offsets.h @@ -0,0 +1 @@ +/* stub */ diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index ed538942523..858a38d0843 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h @@ -1 +1,7 @@ -/* stub */ +#ifndef _PERF_LINUX_TYPES_H_ +#define _PERF_LINUX_TYPES_H_ + +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#endif From 1abc7f5500fff8422f34826a006648d8741d83d3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 18 Oct 2009 19:20:24 -0700 Subject: [PATCH 155/470] perf tools: Display better error messages on missing packages Check for libelf headers and glibc headers separately so that the error message correctly identifies which package installation is missing/needed. Signed-off-by: Randy Dunlap Cc: paulus@samba.org Cc: a.p.zijlstra@chello.nl Cc: efault@gmx.de Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <4ADBCCE8.3060300@oracle.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2400e5068a2..db89a6de9d0 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -431,8 +431,12 @@ ifeq ($(uname_S),Darwin) PTHREAD_LIBS = endif +ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); +endif + ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); + msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); endif ifdef NO_DEMANGLE From 12133afffcc7140eea915b1572189a2ea0cf7b0e Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Mon, 19 Oct 2009 12:03:33 +0200 Subject: [PATCH 156/470] perf stat: Add branch performance events to default output Adds performance event information about branches and branch misses to the default output of perf stat. Signed-off-by: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c37368343ff..95a55eaf72f 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; From 56aab464ff6232bcc2f53b26576983dc83f75db7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Oct 2009 13:27:08 +0200 Subject: [PATCH 157/470] perf stat: Re-align the default_attrs[] array Clean up the array definition to be vertically aligned. No functional effects. Cc: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c373683..95a55ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 95a55eaf72f..90e0a268343 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -50,17 +50,17 @@ static struct perf_event_attr default_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; From dd86e72abdbc4b436471af5a97927c6145f5298c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 19 Oct 2009 13:33:03 +0200 Subject: [PATCH 158/470] perf stat: Count branches first Count branches first, cache-misses second. The reason is that on x86 branches are not counted by all counters on all CPUs. Before: Performance counter stats for 'ls': 0.756653 task-clock-msecs # 0.802 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 250 page-faults # 0.330 M/sec 2375725 cycles # 3139.781 M/sec 1628129 instructions # 0.685 IPC 19643 cache-references # 25.960 M/sec 4608 cache-misses # 6.090 M/sec 342532 branches # 452.694 M/sec branch-misses 0.000943356 seconds time elapsed After: Performance counter stats for 'ls': 1.056734 task-clock-msecs # 0.859 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 259 page-faults # 0.245 M/sec 3345932 cycles # 3166.295 M/sec 3074090 instructions # 0.919 IPC 616928 branches # 583.806 M/sec 39279 branch-misses # 6.367 % 21312 cache-references # 20.168 M/sec 3661 cache-misses # 3.464 M/sec 0.001230551 seconds time elapsed (also prettify the printout of branch misses, in case it's getting scaled.) Cc: Tim Blechmann Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <4ADC3975.8050109@klingt.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c373683..95a55ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -59,6 +59,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 20 ++++++++++---------- 1 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 95a55ea..90e0a26 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -50,17 +50,17 @@ static struct perf_event_attr default_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS}, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; --- tools/perf/builtin-stat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 90e0a268343..c6df3770b87 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -57,10 +57,10 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; @@ -363,7 +363,7 @@ static void abs_printout(int counter, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %10.3f %% ", ratio); + fprintf(stderr, " # %10.3f %% ", ratio); } else { total = avg_stats(&runtime_nsecs_stats); From 20639c15d2e78f180d398a6b6422880fac3258bb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 15:11:36 -0200 Subject: [PATCH 159/470] perf tools: Add missing tools/perf/util/include/string.h To cure a bunch of: In file included from util/include/linux/bitmap.h:1, from util/header.h:8, from builtin-trace.c:7: util/include/../../../../include/linux/bitmap.h:8:26: error: linux/string.h: No such file or directory make: *** [builtin-trace.o] Error 1 make: *** Waiting for unfinished jobs.... Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <1255972296-11500-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/util/include/linux/string.h | 1 + 2 files changed, 2 insertions(+) create mode 100644 tools/perf/util/include/linux/string.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index db89a6de9d0..3b154f17d95 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -329,6 +329,7 @@ LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h LIB_H += util/include/linux/list.h +LIB_H += util/include/linux/string.h LIB_H += perf.h LIB_H += util/event.h LIB_H += util/types.h diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h new file mode 100644 index 00000000000..3b2f5900276 --- /dev/null +++ b/tools/perf/util/include/linux/string.h @@ -0,0 +1 @@ +#include From 79b9ad361be8c6f3eeea97dd3883e8bcfa989333 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 15:31:31 -0200 Subject: [PATCH 160/470] perf tools: Add bunch of missing headers to LIB_H Build dependencies were not properly mapped out. Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <1255973491-11626-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3b154f17d95..64c6b7b57d8 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -328,8 +328,25 @@ LIB_FILE=libperf.a LIB_H += ../../include/linux/perf_event.h LIB_H += ../../include/linux/rbtree.h LIB_H += ../../include/linux/list.h +LIB_H += util/include/linux/bitmap.h +LIB_H += util/include/linux/bitops.h +LIB_H += util/include/linux/compiler.h +LIB_H += util/include/linux/ctype.h +LIB_H += util/include/linux/kernel.h LIB_H += util/include/linux/list.h +LIB_H += util/include/linux/module.h +LIB_H += util/include/linux/poison.h +LIB_H += util/include/linux/prefetch.h +LIB_H += util/include/linux/rbtree.h LIB_H += util/include/linux/string.h +LIB_H += util/include/linux/types.h +LIB_H += util/include/asm/asm-offsets.h +LIB_H += util/include/asm/bitops.h +LIB_H += util/include/asm/byteorder.h +LIB_H += util/include/asm/swab.h +LIB_H += util/include/asm/system.h +LIB_H += util/include/asm/types.h +LIB_H += util/include/asm/uaccess.h LIB_H += perf.h LIB_H += util/event.h LIB_H += util/types.h From bbe2987bea26a684ff11d887dfc4cf39b22c27a2 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 20 Oct 2009 07:09:39 +0900 Subject: [PATCH 161/470] perf timechart: Add a process filter During the Kernel Summit demo of perf/ftrace/timechart, there was a feature request to have a process filter for timechart so that you can zoom into one or a few processes that you are really interested in. This patch adds basic support for this feature, the -p (--process) option now can select a PID or a process name to be shown. Multiple -p options are allowed, and the combined set will be included in the output. Signed-off-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091020070939.7d0fb8a7@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-timechart.txt | 5 +- tools/perf/builtin-timechart.c | 105 +++++++++++++++++++- 2 files changed, 106 insertions(+), 4 deletions(-) diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt index a7910099d6f..4b1788355ec 100644 --- a/tools/perf/Documentation/perf-timechart.txt +++ b/tools/perf/Documentation/perf-timechart.txt @@ -31,9 +31,12 @@ OPTIONS -w:: --width=:: Select the width of the SVG file (default: 1000) --p:: +-P:: --power-only:: Only output the CPU power section of the diagram +-p:: +--process:: + Select the processes to display, by name or PID SEE ALSO diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index e8a510d935e..34fad57087f 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -153,6 +153,17 @@ static struct wake_event *wake_events; struct sample_wrapper *all_samples; + +struct process_filter; +struct process_filter { + char *name; + int pid; + struct process_filter *next; +}; + +static struct process_filter *process_filter; + + static struct per_pid *find_create_pid(int pid) { struct per_pid *cursor = all_data; @@ -763,11 +774,11 @@ static void draw_wakeups(void) c = p->all; while (c) { if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { - if (p->pid == we->waker) { + if (p->pid == we->waker && !from) { from = c->Y; task_from = strdup(c->comm); } - if (p->pid == we->wakee) { + if (p->pid == we->wakee && !to) { to = c->Y; task_to = strdup(c->comm); } @@ -882,12 +893,89 @@ static void draw_process_bars(void) } } +static void add_process_filter(const char *string) +{ + struct process_filter *filt; + int pid; + + pid = strtoull(string, NULL, 10); + filt = malloc(sizeof(struct process_filter)); + if (!filt) + return; + + filt->name = strdup(string); + filt->pid = pid; + filt->next = process_filter; + + process_filter = filt; +} + +static int passes_filter(struct per_pid *p, struct per_pidcomm *c) +{ + struct process_filter *filt; + if (!process_filter) + return 1; + + filt = process_filter; + while (filt) { + if (filt->pid && p->pid == filt->pid) + return 1; + if (strcmp(filt->name, c->comm) == 0) + return 1; + filt = filt->next; + } + return 0; +} + +static int determine_display_tasks_filtered(void) +{ + struct per_pid *p; + struct per_pidcomm *c; + int count = 0; + + p = all_data; + while (p) { + p->display = 0; + if (p->start_time == 1) + p->start_time = first_time; + + /* no exit marker, task kept running to the end */ + if (p->end_time == 0) + p->end_time = last_time; + + c = p->all; + + while (c) { + c->display = 0; + + if (c->start_time == 1) + c->start_time = first_time; + + if (passes_filter(p, c)) { + c->display = 1; + p->display = 1; + count++; + } + + if (c->end_time == 0) + c->end_time = last_time; + + c = c->next; + } + p = p->next; + } + return count; +} + static int determine_display_tasks(u64 threshold) { struct per_pid *p; struct per_pidcomm *c; int count = 0; + if (process_filter) + return determine_display_tasks_filtered(); + p = all_data; while (p) { p->display = 0; @@ -1153,6 +1241,14 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } +static int +parse_process(const struct option *opt __used, const char *arg, int __used unset) +{ + if (arg) + add_process_filter(arg); + return 0; +} + static const struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), @@ -1160,8 +1256,11 @@ static const struct option options[] = { "output file name"), OPT_INTEGER('w', "width", &svg_page_width, "page width"), - OPT_BOOLEAN('p', "power-only", &power_only, + OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), + OPT_CALLBACK('p', "process", NULL, "process", + "process selector. Pass a pid or process name.", + parse_process), OPT_END() }; From ed52ce2e3c33dc7626a40fa2da766d1a6460e543 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 19 Oct 2009 17:17:57 -0200 Subject: [PATCH 162/470] perf tools: Add ->unmap_ip operation to struct map We need this because we get section relative addresses when reading the symtabs, but when a tool like 'perf annotate' needs to match these address to what 'objdump -dS' produces we need the address + section back again. So in annotate now we look at the 'struct hist_entry' instances (that weren't really being used) so that we iterate only over the symbols that had some hit and get the map where that particular hit happened so that we can get the right address to match with annotate. Verified that at least: perf annotate mmap_read_counter # Uses the ~/bin/perf binary perf annotate --vmlinux /home/acme/git/build/perf/vmlinux intel_pmu_enable_all on a 'perf record perf top' session seems to work. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1255979877-12533-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 62 ++++++++++++++++++++++++----------- tools/perf/util/event.h | 8 ++++- tools/perf/util/map.c | 6 ++-- tools/perf/util/symbol.c | 8 +++-- 4 files changed, 58 insertions(+), 26 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 56ba71658d7..06f10278b28 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -58,9 +58,12 @@ static void hist_hit(struct hist_entry *he, u64 ip) return; sym_size = sym->end - sym->start; - ip = he->map->map_ip(he->map, ip); offset = ip - sym->start; + if (verbose) + fprintf(stderr, "%s: ip=%Lx\n", __func__, + he->map->unmap_ip(he->map, ip)); + if (offset >= sym_size) return; @@ -83,8 +86,7 @@ static int hist_entry__add(struct thread *thread, struct map *map, count, level, &hit); if (he == NULL) return -ENOMEM; - if (hit) - hist_hit(he, ip); + hist_hit(he, ip); return 0; } @@ -260,14 +262,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -parse_line(FILE *file, struct symbol *sym, u64 len) +static int parse_line(FILE *file, struct hist_entry *he, u64 len) { + struct symbol *sym = he->sym; char *line = NULL, *tmp, *tmp2; static const char *prev_line; static const char *prev_color; unsigned int offset; size_t line_len; + u64 start; s64 line_ip; int ret; char *c; @@ -304,6 +307,8 @@ parse_line(FILE *file, struct symbol *sym, u64 len) line_ip = -1; } + start = he->map->unmap_ip(he->map, sym->start); + if (line_ip != -1) { const char *path = NULL; unsigned int hits = 0; @@ -311,7 +316,7 @@ parse_line(FILE *file, struct symbol *sym, u64 len) const char *color; struct sym_ext *sym_ext = sym->priv; - offset = line_ip - sym->start; + offset = line_ip - start; if (offset < len) hits = sym->hist[offset]; @@ -390,8 +395,10 @@ static void free_source_line(struct symbol *sym, int len) /* Get the filename:line for the colored entries */ static void -get_source_line(struct symbol *sym, int len, const char *filename) +get_source_line(struct hist_entry *he, int len, const char *filename) { + struct symbol *sym = he->sym; + u64 start; int i; char cmd[PATH_MAX * 2]; struct sym_ext *sym_ext; @@ -404,6 +411,7 @@ get_source_line(struct symbol *sym, int len, const char *filename) return; sym_ext = sym->priv; + start = he->map->unmap_ip(he->map, sym->start); for (i = 0; i < len; i++) { char *path = NULL; @@ -415,7 +423,7 @@ get_source_line(struct symbol *sym, int len, const char *filename) if (sym_ext[i].percent <= 0.5) continue; - offset = sym->start + i; + offset = start + i; sprintf(cmd, "addr2line -e %s %016llx", filename, offset); fp = popen(cmd, "r"); if (!fp) @@ -465,8 +473,11 @@ static void print_summary(const char *filename) } } -static void annotate_sym(struct dso *dso, struct symbol *sym) +static void annotate_sym(struct hist_entry *he) { + struct map *map = he->map; + struct dso *dso = map->dso; + struct symbol *sym = he->sym; const char *filename = dso->long_name, *d_filename; u64 len; char command[PATH_MAX*2]; @@ -475,6 +486,12 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) if (!filename) return; + if (verbose) + fprintf(stderr, "%s: filename=%s, sym=%s, start=%Lx, end=%Lx\n", + __func__, filename, sym->name, + map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end)); + if (full_paths) d_filename = filename; else @@ -483,7 +500,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) len = sym->end - sym->start; if (print_line) { - get_source_line(sym, len, filename); + get_source_line(he, len, filename); print_summary(filename); } @@ -496,7 +513,8 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) dso, dso->long_name, sym, sym->name); sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s|grep -v %s", - sym->start, sym->end, filename, filename); + map->unmap_ip(map, sym->start), map->unmap_ip(map, sym->end), + filename, filename); if (verbose >= 3) printf("doing: %s\n", command); @@ -506,7 +524,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) return; while (!feof(file)) { - if (parse_line(file, sym, len) < 0) + if (parse_line(file, he, len) < 0) break; } @@ -518,18 +536,22 @@ static void annotate_sym(struct dso *dso, struct symbol *sym) static void find_annotations(void) { struct rb_node *nd; - struct dso *dso; int count = 0; - list_for_each_entry(dso, &dsos, node) { + for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { + struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); - for (nd = rb_first(&dso->syms); nd; nd = rb_next(nd)) { - struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + if (he->sym && he->sym->hist) { + annotate_sym(he); + count++; + /* + * Since we have a hist_entry per IP for the same + * symbol, free he->sym->hist to signal we already + * processed this symbol. + */ + free(he->sym->hist); + he->sym->hist = NULL; - if (sym->hist) { - annotate_sym(dso, sym); - count++; - } } } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index c2e62be6279..6b5be56a827 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -82,6 +82,7 @@ struct map { u64 end; u64 pgoff; u64 (*map_ip)(struct map *, u64); + u64 (*unmap_ip)(struct map *, u64); struct dso *dso; }; @@ -90,7 +91,12 @@ static inline u64 map__map_ip(struct map *map, u64 ip) return ip - map->start + map->pgoff; } -static inline u64 vdso__map_ip(struct map *map __used, u64 ip) +static inline u64 map__unmap_ip(struct map *map, u64 ip) +{ + return ip + map->start - map->pgoff; +} + +static inline u64 identity__map_ip(struct map *map __used, u64 ip) { return ip; } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 804e0238273..4e203d144f9 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -54,9 +54,11 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) goto out_delete; if (self->dso == vdso || anon) - self->map_ip = vdso__map_ip; - else + self->map_ip = self->unmap_ip = identity__map_ip; + else { self->map_ip = map__map_ip; + self->unmap_ip = map__unmap_ip; + } } return self; out_delete: diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index faa84f5d4f5..3350119f690 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -337,7 +337,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) return -1; } - map->map_ip = vdso__map_ip; + map->map_ip = map->unmap_ip = identity__map_ip; kernel_maps__insert(map); ++kernel_range; } @@ -790,7 +790,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, dso__delete(curr_dso); goto out_elf_end; } - curr_map->map_ip = vdso__map_ip; + curr_map->map_ip = identity__map_ip; + curr_map->unmap_ip = identity__map_ip; curr_dso->origin = DSO__ORIG_KERNEL; kernel_maps__insert(curr_map); dsos__add(curr_dso); @@ -1158,6 +1159,7 @@ static struct map *map__new2(u64 start, struct dso *dso) self->pgoff = 0; self->dso = dso; self->map_ip = map__map_ip; + self->unmap_ip = map__unmap_ip; RB_CLEAR_NODE(&self->rb_node); } return self; @@ -1259,7 +1261,7 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, if (kernel_map == NULL) goto out_delete_dso; - kernel_map->map_ip = vdso__map_ip; + kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; if (use_modules && dsos__load_modules(sym_priv_size) < 0) { fprintf(stderr, "Failed to load list of modules in use! " From e42049926ebdcae24fdfdc8f0e3ff8f05f24a60b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 14:25:40 -0200 Subject: [PATCH 163/470] perf annotate: Use the sym_priv_size area for the histogram We have this sym_priv_size mechanism for attaching private areas to struct symbol entries but annotate wasn't using it, adding private areas to struct symbol in addition to a ->priv pointer. Scrap all that and use the sym_priv_size mechanism. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256055940-19511-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 109 ++++++++++++++++++++++++---------- tools/perf/builtin-report.c | 2 +- tools/perf/util/data_map.c | 2 +- tools/perf/util/event.h | 8 ++- tools/perf/util/map.c | 21 ++++++- tools/perf/util/symbol.c | 56 ++++++----------- tools/perf/util/symbol.h | 17 ++---- 7 files changed, 132 insertions(+), 83 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 06f10278b28..245692530de 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,12 +37,44 @@ static int print_line; static unsigned long page_size; static unsigned long mmap_window = 32; +struct sym_hist { + u64 sum; + u64 ip[0]; +}; + struct sym_ext { struct rb_node node; double percent; char *path; }; +struct sym_priv { + struct sym_hist *hist; + struct sym_ext *ext; +}; + +static const char *sym_hist_filter; + +static int symbol_filter(struct map *map, struct symbol *sym) +{ + if (strcmp(sym->name, sym_hist_filter) == 0) { + struct sym_priv *priv = dso__sym_priv(map->dso, sym); + const int size = (sizeof(*priv->hist) + + (sym->end - sym->start) * sizeof(u64)); + + priv->hist = malloc(size); + if (priv->hist) + memset(priv->hist, 0, size); + return 0; + } + /* + * FIXME: We should really filter it out, as we don't want to go thru symbols + * we're not interested, and if a DSO ends up with no symbols, delete it too, + * but right now the kernel loading routines in symbol.c bail out if no symbols + * are found, fix it later. + */ + return 0; +} /* * collect histogram counts @@ -51,10 +83,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) { unsigned int sym_size, offset; struct symbol *sym = he->sym; + struct sym_priv *priv; + struct sym_hist *h; he->count++; - if (!sym || !sym->hist) + if (!sym || !he->map) + return; + + priv = dso__sym_priv(he->map->dso, sym); + if (!priv->hist) return; sym_size = sym->end - sym->start; @@ -67,15 +105,16 @@ static void hist_hit(struct hist_entry *he, u64 ip) if (offset >= sym_size) return; - sym->hist_sum++; - sym->hist[offset]++; + h = priv->hist; + h->sum++; + h->ip[offset]++; if (verbose >= 3) printf("%p %s: count++ [ip: %p, %08Lx] => %Ld\n", (void *)(unsigned long)he->sym->start, he->sym->name, (void *)(unsigned long)ip, ip - he->sym->start, - sym->hist[offset]); + h->ip[offset]); } static int hist_entry__add(struct thread *thread, struct map *map, @@ -162,7 +201,9 @@ got_map: static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, NULL, 0); + struct map *map = map__new(&event->mmap, NULL, 0, + sizeof(struct sym_priv), symbol_filter, + verbose); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", @@ -314,17 +355,19 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len) unsigned int hits = 0; double percent = 0.0; const char *color; - struct sym_ext *sym_ext = sym->priv; + struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_ext *sym_ext = priv->ext; + struct sym_hist *h = priv->hist; offset = line_ip - start; if (offset < len) - hits = sym->hist[offset]; + hits = h->ip[offset]; if (offset < len && sym_ext) { path = sym_ext[offset].path; percent = sym_ext[offset].percent; - } else if (sym->hist_sum) - percent = 100.0 * hits / sym->hist_sum; + } else if (h->sum) + percent = 100.0 * hits / h->sum; color = get_percent_color(percent); @@ -377,9 +420,10 @@ static void insert_source_line(struct sym_ext *sym_ext) rb_insert_color(&sym_ext->node, &root_sym_ext); } -static void free_source_line(struct symbol *sym, int len) +static void free_source_line(struct hist_entry *he, int len) { - struct sym_ext *sym_ext = sym->priv; + struct sym_priv *priv = dso__sym_priv(he->map->dso, he->sym); + struct sym_ext *sym_ext = priv->ext; int i; if (!sym_ext) @@ -389,7 +433,7 @@ static void free_source_line(struct symbol *sym, int len) free(sym_ext[i].path); free(sym_ext); - sym->priv = NULL; + priv->ext = NULL; root_sym_ext = RB_ROOT; } @@ -402,15 +446,16 @@ get_source_line(struct hist_entry *he, int len, const char *filename) int i; char cmd[PATH_MAX * 2]; struct sym_ext *sym_ext; + struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_hist *h = priv->hist; - if (!sym->hist_sum) + if (!h->sum) return; - sym->priv = calloc(len, sizeof(struct sym_ext)); - if (!sym->priv) + sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext)); + if (!priv->ext) return; - sym_ext = sym->priv; start = he->map->unmap_ip(he->map, sym->start); for (i = 0; i < len; i++) { @@ -419,7 +464,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename) u64 offset; FILE *fp; - sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum; + sym_ext[i].percent = 100.0 * h->ip[i] / h->sum; if (sym_ext[i].percent <= 0.5) continue; @@ -530,7 +575,7 @@ static void annotate_sym(struct hist_entry *he) pclose(file); if (print_line) - free_source_line(sym, len); + free_source_line(he, len); } static void find_annotations(void) @@ -540,19 +585,23 @@ static void find_annotations(void) for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); + struct sym_priv *priv; - if (he->sym && he->sym->hist) { - annotate_sym(he); - count++; - /* - * Since we have a hist_entry per IP for the same - * symbol, free he->sym->hist to signal we already - * processed this symbol. - */ - free(he->sym->hist); - he->sym->hist = NULL; + if (he->sym == NULL) + continue; - } + priv = dso__sym_priv(he->map->dso, he->sym); + if (priv->hist == NULL) + continue; + + annotate_sym(he); + count++; + /* + * Since we have a hist_entry per IP for the same symbol, free + * he->sym->hist to signal we already processed this symbol. + */ + free(priv->hist); + priv->hist = NULL; } if (!count) @@ -593,7 +642,7 @@ static int __cmd_annotate(void) exit(0); } - if (load_kernel() < 0) { + if (load_kernel(sizeof(struct sym_priv), symbol_filter) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index a4f8cc20915..bee207ce589 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -680,7 +680,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen); + struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL, verbose); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 242b0555ab9..18accb8fee4 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -130,7 +130,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, if (curr_handler->sample_type_check(sample_type) < 0) exit(-1); - if (load_kernel() < 0) { + if (load_kernel(0, NULL) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 6b5be56a827..db59c8bbe49 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -101,7 +101,13 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) return ip; } -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); +struct symbol; + +typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); + +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, + unsigned int sym_priv_size, symbol_filter_t filter, + int v); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 4e203d144f9..55079c0200e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -3,6 +3,7 @@ #include #include #include +#include "debug.h" static inline int is_anon_memory(const char *filename) { @@ -19,7 +20,9 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) return n; } - struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, + unsigned int sym_priv_size, symbol_filter_t filter, + int v) { struct map *self = malloc(sizeof(*self)); @@ -27,6 +30,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) const char *filename = event->filename; char newfilename[PATH_MAX]; int anon; + bool new_dso; if (cwd) { int n = strcommon(filename, cwd, cwdlen); @@ -49,10 +53,23 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) self->end = event->start + event->len; self->pgoff = event->pgoff; - self->dso = dsos__findnew(filename); + self->dso = dsos__findnew(filename, sym_priv_size, &new_dso); if (self->dso == NULL) goto out_delete; + if (new_dso) { + int nr = dso__load(self->dso, self, filter, v); + + if (nr < 0) + eprintf("Failed to open %s, continuing " + "without symbols\n", + self->dso->long_name); + else if (nr == 0) + eprintf("No symbols found in %s, maybe " + "install a debug package?\n", + self->dso->long_name); + } + if (self->dso == vdso || anon) self->map_ip = self->unmap_ip = identity__map_ip; else { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 3350119f690..0a4898480d6 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -11,8 +11,6 @@ #include #include -const char *sym_hist_filter; - enum dso_origin { DSO__ORIG_KERNEL = 0, DSO__ORIG_JAVA_JIT, @@ -86,22 +84,16 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, if (!self) return NULL; - if (v > 2) - printf("new symbol: %016Lx [%08lx]: %s, hist: %p\n", - start, (unsigned long)len, name, self->hist); - - self->hist = NULL; - self->hist_sum = 0; - - if (sym_hist_filter && !strcmp(name, sym_hist_filter)) - self->hist = calloc(sizeof(u64), len); - if (priv_size) { memset(self, 0, priv_size); self = ((void *)self) + priv_size; } self->start = start; self->end = len ? start + len - 1 : start; + + if (v > 2) + printf("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); + memcpy(self->name, name, namelen); return self; @@ -919,7 +911,8 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, + symbol_filter_t filter, int v) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -1335,33 +1328,21 @@ static struct dso *dsos__find(const char *name) return NULL; } -struct dso *dsos__findnew(const char *name) +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, + bool *is_new) { struct dso *dso = dsos__find(name); - int nr; - if (dso) - return dso; - - dso = dso__new(name, 0); - if (!dso) - goto out_delete_dso; - - nr = dso__load(dso, NULL, NULL, verbose); - if (nr < 0) { - eprintf("Failed to open: %s\n", name); - goto out_delete_dso; - } - if (!nr) - eprintf("No symbols found in: %s, maybe install a debug package?\n", name); - - dsos__add(dso); + if (!dso) { + dso = dso__new(name, sym_priv_size); + if (dso) { + dsos__add(dso); + *is_new = true; + } + } else + *is_new = false; return dso; - -out_delete_dso: - dso__delete(dso); - return NULL; } void dsos__fprintf(FILE *fp) @@ -1372,9 +1353,10 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -int load_kernel(void) +int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) { - if (dsos__load_kernel(vmlinux_name, 0, NULL, verbose, modules) <= 0) + if (dsos__load_kernel(vmlinux_name, sym_priv_size, + filter, verbose, modules) <= 0) return -1; vdso = dso__new("[vdso]", 0); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 2e4522edeb0..c2a777de9b7 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -2,6 +2,7 @@ #define __PERF_SYMBOL 1 #include +#include #include "types.h" #include #include @@ -35,9 +36,6 @@ struct symbol { struct rb_node rb_node; u64 start; u64 end; - u64 hist_sum; - u64 *hist; - void *priv; char name[0]; }; @@ -54,10 +52,6 @@ struct dso { char name[0]; }; -extern const char *sym_hist_filter; - -typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); - struct dso *dso__new(const char *name, unsigned int sym_priv_size); void dso__delete(struct dso *self); @@ -70,15 +64,16 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, symbol_filter_t filter, int verbose, int modules); -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter, - int verbose); -struct dso *dsos__findnew(const char *name); +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, + bool *is_new); +int dso__load(struct dso *self, struct map *map, + symbol_filter_t filter, int v); void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); -int load_kernel(void); +int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter); void symbol__init(void); From 8f0b037398a909ccf703ad5f5803066db6327f22 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 15:08:29 -0200 Subject: [PATCH 164/470] perf annotate: Remove requirement of passing a symbol name If the user doesn't pass a symbol name to annotate, it will annotate all the symbols that have hits, in order, just like 'perf report -s comm,dso,symbol'. This is a natural followup patch to the one that uses output_hists to find the symbols with hits. The common case is to annotate the first few entries at the top of a perf report, so lets type less characters. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256058509-19678-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 245692530de..99bac6aa72c 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -57,7 +57,8 @@ static const char *sym_hist_filter; static int symbol_filter(struct map *map, struct symbol *sym) { - if (strcmp(sym->name, sym_hist_filter) == 0) { + if (sym_hist_filter == NULL || + strcmp(sym->name, sym_hist_filter) == 0) { struct sym_priv *priv = dso__sym_priv(map->dso, sym); const int size = (sizeof(*priv->hist) + (sym->end - sym->start) * sizeof(u64)); @@ -581,7 +582,6 @@ static void annotate_sym(struct hist_entry *he) static void find_annotations(void) { struct rb_node *nd; - int count = 0; for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); @@ -595,7 +595,6 @@ static void find_annotations(void) continue; annotate_sym(he); - count++; /* * Since we have a hist_entry per IP for the same symbol, free * he->sym->hist to signal we already processed this symbol. @@ -603,9 +602,6 @@ static void find_annotations(void) free(priv->hist); priv->hist = NULL; } - - if (!count) - printf(" Error: symbol '%s' not present amongst the samples.\n", sym_hist_filter); } static int __cmd_annotate(void) @@ -793,9 +789,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) sym_hist_filter = argv[0]; } - if (!sym_hist_filter) - usage_with_options(annotate_usage, options); - setup_pager(); if (field_sep && *field_sep == '.') { From c88e4bf60de6253a048cf4e6b3b0715e543e0460 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 20 Oct 2009 15:54:55 -0200 Subject: [PATCH 165/470] perf top: Fix symbol annotation We need to use map->unmap_ip() here too to match section relative symbol address to the absolute address needed to match objdump -dS addresses. Reported-by: Mike Galbraith Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1256061295-19835-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index cc662863030..fa20345a0ab 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -141,7 +141,8 @@ static void parse_source(struct sym_entry *syme) sprintf(command, "objdump --start-address=0x%016Lx " "--stop-address=0x%016Lx -dS %s", - sym->start, sym->end, path); + map->unmap_ip(map, sym->start), + map->unmap_ip(map, sym->end), path); file = popen(command, "r"); if (!file) @@ -173,11 +174,11 @@ static void parse_source(struct sym_entry *syme) if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); - src->eip += map->start; + src->eip = map->unmap_ip(map, src->eip); } if (strlen(src->line)>8 && src->line[16] == ':') { src->eip = strtoull(src->line, NULL, 16); - src->eip += map->start; + src->eip = map->unmap_ip(map, src->eip); } } pclose(file); From 06ed6ba5ecb771cc3a967838a4bb1d9cbd8786b9 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 20 Oct 2009 12:55:24 -0400 Subject: [PATCH 166/470] x86: Fix group attribute decoding bug Fix a typo in inat_get_group_attribute() which should refer inat_group_tables, not inat_escape_tables. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Jim Keniston Cc: Frederic Weisbecker LKML-Reference: <20091020165524.4145.97333.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/lib/inat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 054656a01df..3fb5998b823 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -68,7 +68,7 @@ insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, if (!table) return inat_group_common_attribute(grp_attr); if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { - table = inat_escape_tables[n][m]; + table = inat_group_tables[n][m]; if (!table) return inat_group_common_attribute(grp_attr); } From 9983d60d74db9e544c6cb6f65351849fe8e9c1de Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 20 Oct 2009 12:55:31 -0400 Subject: [PATCH 167/470] x86: Add AES opcodes to opcode map Add Intel AES opcodes to x86 opcode map. These opcodes are used in arch/x86/crypt/aesni-intel_asm.S. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Jim Keniston Cc: Frederic Weisbecker LKML-Reference: <20091020165531.4145.21872.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 894497f7780..701c4678687 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -567,7 +567,7 @@ fe: paddd Pq,Qq | paddd Vdq,Wdq (66) ff: EndTable -Table: 3-byte opcode 1 +Table: 3-byte opcode 1 (0x0f 0x38) Referrer: 3-byte escape 1 # 0x0f 0x38 0x00-0x0f 00: pshufb Pq,Qq | pshufb Vdq,Wdq (66) @@ -642,11 +642,16 @@ Referrer: 3-byte escape 1 41: phminposuw Vdq,Wdq (66) 80: INVEPT Gd/q,Mdq (66) 81: INVPID Gd/q,Mdq (66) +db: aesimc Vdq,Wdq (66) +dc: aesenc Vdq,Wdq (66) +dd: aesenclast Vdq,Wdq (66) +de: aesdec Vdq,Wdq (66) +df: aesdeclast Vdq,Wdq (66) f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) EndTable -Table: 3-byte opcode 2 +Table: 3-byte opcode 2 (0x0f 0x3a) Referrer: 3-byte escape 2 # 0x0f 0x3a 0x00-0xff 08: roundps Vdq,Wdq,Ib (66) @@ -671,6 +676,7 @@ Referrer: 3-byte escape 2 61: pcmpestri Vdq,Wdq,Ib (66) 62: pcmpistrm Vdq,Wdq,Ib (66) 63: pcmpistri Vdq,Wdq,Ib (66) +df: aeskeygenassist Vdq,Wdq,Ib (66) EndTable GrpTable: Grp1 From 60d526f7fa6246b8e32d5b45610d625a5608d988 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Oct 2009 19:19:34 -0400 Subject: [PATCH 168/470] perf tools: Add 'make DEBUG=1' to remove the -O6 cflag When using gdb to debug perf, it is practically impossible to use when perf is compiled with -O6. For developers, this patch adds the DEBUG feature to the make command line so that a developer can easily remove the optimization flag. LKML-Reference: <1255590330.8392.446.camel@twins> Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091020232033.984323261@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 64c6b7b57d8..65e6e52fe37 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -201,7 +201,14 @@ EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +ifeq ("$(origin DEBUG)", "command line") + PERF_DEBUG = $(DEBUG) +endif +ifndef PERF_DEBUG + CFLAGS_OPTIMIZE = -O6 +endif + +CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) LDFLAGS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) From 4e3b799d7dbb2a12ca8dca8d3594d32095772973 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 20 Oct 2009 19:19:35 -0400 Subject: [PATCH 169/470] perf tools: Use strsep() over strtok_r() for parsing single line The second argument in the strtok_r() function is not to be used generically and can have different implementations. Currently the function parsing of the perf trace code uses the second argument to copy data from. This can crash the tool or just have unpredictable results. The correct solution is to use strsep() which has a defined result. I also added a check to see if the result was correct, and will break out of the loop in case it fails to parse as expected. Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Frederic Weisbecker LKML-Reference: <20091020232034.237814877@goodmis.org> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 4b61b497040..eae56050308 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -286,16 +286,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused) char *line; char *next = NULL; char *addr_str; - char *fmt; int i; line = strtok_r(file, "\n", &next); while (line) { + addr_str = strsep(&line, ":"); + if (!line) { + warning("error parsing print strings"); + break; + } item = malloc_or_die(sizeof(*item)); - addr_str = strtok_r(line, ":", &fmt); item->addr = strtoull(addr_str, NULL, 16); /* fmt still has a space, skip it */ - item->printk = strdup(fmt+1); + item->printk = strdup(line+1); item->next = list; list = item; line = strtok_r(NULL, "\n", &next); From 9bf4e7fba8006d19846fec877b6da0616b2772de Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 21 Oct 2009 14:39:51 +0200 Subject: [PATCH 170/470] x86, instruction decoder: Fix test_get_len build rules Add the kernel source include file as well to the include files search path, to fix this build bug: In file included from arch/x86/tools/test_get_len.c:28: arch/x86/lib/insn.c:21:26: error: linux/string.h: No such file or directory Cc: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Jim Keniston Cc: Frederic Weisbecker LKML-Reference: <20091020165531.4145.21872.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- arch/x86/tools/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 1bd006c8156..5e295d95dc2 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -8,8 +8,8 @@ posttest: $(obj)/test_get_len vmlinux hostprogs-y := test_get_len # -I needed for generated C source and C source which in the kernel tree. -HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ +HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ -# Dependancies are also needed. +# Dependencies are also needed. $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c From af0a6fa46388e1e0c2d1a672aad84f8f6ef0b20b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:22 +0200 Subject: [PATCH 171/470] perf tools: Fix missing top level callchain While recursively printing the branches of each callchains, we forget to display the root. It is never printed. Say we have: symbol f1 f2 | -------- f3 | f4 | ---------f5 f6 Actually we never see that, instead it displays: symbol | --------- f3 | f4 | --------- f5 f6 However f1 is always the same than "symbol" and if we are sorting by symbols first then "symbol", f1 and f2 will be well aligned like in the above example, so displaying f1 looks redundant here. But if we are sorting by something else first (dso, comm, etc...), displaying f1 doesn't look redundant but rather necessary because the symbol is not well aligned anymore with its callchain: comm dso symbol f1 f2 | --------- [...] And we want the callchain to be obvious. So we fix the bug by printing the root branch, but we also filter its first entry if we are sorting by symbols first. Reported-by: Anton Blanchard Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras LKML-Reference: <1256246604-17156-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 40 ++++++++++++++++++++++++++++++------- tools/perf/util/sort.c | 10 +++++++--- tools/perf/util/sort.h | 1 + 3 files changed, 41 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index bee207ce589..3d8c52220f1 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -122,8 +122,8 @@ static void init_rem_hits(void) } static size_t -callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples, int depth, int depth_mask) +__callchain__fprintf_graph(FILE *fp, struct callchain_node *self, + u64 total_samples, int depth, int depth_mask) { struct rb_node *node, *next; struct callchain_node *child; @@ -174,9 +174,9 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, new_total, cumul); } - ret += callchain__fprintf_graph(fp, child, new_total, - depth + 1, - new_depth_mask | (1 << depth)); + ret += __callchain__fprintf_graph(fp, child, new_total, + depth + 1, + new_depth_mask | (1 << depth)); node = next; } @@ -196,6 +196,33 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, return ret; } +static size_t +callchain__fprintf_graph(FILE *fp, struct callchain_node *self, + u64 total_samples) +{ + struct callchain_list *chain; + int i = 0; + int ret = 0; + + list_for_each_entry(chain, &self->val, list) { + if (chain->ip >= PERF_CONTEXT_MAX) + continue; + + if (!i++ && sort_by_sym_first) + continue; + + if (chain->sym) + ret += fprintf(fp, " %s\n", chain->sym->name); + else + ret += fprintf(fp, " %p\n", + (void *)(long)chain->ip); + } + + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1); + + return ret; +} + static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, u64 total_samples) @@ -244,8 +271,7 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, break; case CHAIN_GRAPH_ABS: /* Falldown */ case CHAIN_GRAPH_REL: - ret += callchain__fprintf_graph(fp, chain, - total_samples, 1, 1); + ret += callchain__fprintf_graph(fp, chain, total_samples); case CHAIN_NONE: default: break; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 40c9acd41ca..60ced707bd6 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -5,8 +5,9 @@ char default_parent_pattern[] = "^sys_|^do_page_fault"; char *parent_pattern = default_parent_pattern; char default_sort_order[] = "comm,dso,symbol"; char *sort_order = default_sort_order; -int sort__need_collapse = 0; -int sort__has_parent = 0; +int sort__need_collapse = 0; +int sort__has_parent = 0; +int sort_by_sym_first; unsigned int dsos__col_width; unsigned int comms__col_width; @@ -265,6 +266,10 @@ int sort_dimension__add(const char *tok) sort__has_parent = 1; } + if (list_empty(&hist_entry__sort_list) && + !strcmp(sd->name, "symbol")) + sort_by_sym_first = true; + list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; @@ -273,4 +278,3 @@ int sort_dimension__add(const char *tok) return -ESRCH; } - diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 13806d782af..24c2b709f0d 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -39,6 +39,7 @@ extern struct sort_entry sort_parent; extern unsigned int dsos__col_width; extern unsigned int comms__col_width; extern unsigned int threads__col_width; +extern int sort_by_sym_first; struct hist_entry { struct rb_node rb_node; From a4fb581b15949cfd10b64c8af37bc106e95307f3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:23 +0200 Subject: [PATCH 172/470] perf tools: Bind callchains to the first sort dimension column Currently, the callchains are displayed using a constant left margin. So depending on the current sort dimension configuration, callchains may appear to be well attached to the first sort dimension column field which is mostly the case, except when the first dimension of sorting is done by comm, because these are right aligned. This patch binds the callchain to the first letter in the first column, whatever type of column it is (dso, comm, symbol). Before: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent After: 0.80% perf [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify | | __fsnotify_parent Also, for clarity, we don't put anymore the callchain as is but: - If we have a top level ancestor in the callchain, start it with a first ascii hook. Before: 0.80% perf [kernel] [k] __lock_acquire __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] After: 0.80% perf [kernel] [k] __lock_acquire | --- __lock_acquire lock_acquire | |--58.33%-- _spin_lock | | | |--28.57%-- inotify_should_send_event | | fsnotify [..] [..] - Otherwise, if we have several top level ancestors, then display these like we did before: 1.69% Xorg | |--21.21%-- vread_hpet | 0x7fffd85b46fc | 0x7fffd85b494d | 0x7f4fafb4e54d | |--15.15%-- exaOffscreenAlloc | |--9.09%-- I830WaitLpRing Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Anton Blanchard LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 82 ++++++++++++++++++++++++++++--------- tools/perf/util/sort.c | 18 ++++++-- tools/perf/util/sort.h | 10 ++++- tools/perf/util/thread.c | 11 +++++ tools/perf/util/thread.h | 2 + 5 files changed, 99 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3d8c52220f1..72d58421223 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -59,12 +59,28 @@ static struct perf_header *header; static u64 sample_type; -static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) + +static size_t +callchain__fprintf_left_margin(FILE *fp, int left_margin) +{ + int i; + int ret; + + ret = fprintf(fp, " "); + + for (i = 0; i < left_margin; i++) + ret += fprintf(fp, " "); + + return ret; +} + +static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, + int left_margin) { int i; size_t ret = 0; - ret += fprintf(fp, "%s", " "); + ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) if (depth_mask & (1 << i)) @@ -79,12 +95,12 @@ static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask) static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth, int depth_mask, int count, u64 total_samples, - int hits) + int hits, int left_margin) { int i; size_t ret = 0; - ret += fprintf(fp, "%s", " "); + ret += callchain__fprintf_left_margin(fp, left_margin); for (i = 0; i < depth; i++) { if (depth_mask & (1 << i)) ret += fprintf(fp, "|"); @@ -123,7 +139,8 @@ static void init_rem_hits(void) static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples, int depth, int depth_mask) + u64 total_samples, int depth, int depth_mask, + int left_margin) { struct rb_node *node, *next; struct callchain_node *child; @@ -164,7 +181,8 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, * But we keep the older depth mask for the line seperator * to keep the level link until we reach the last child */ - ret += ipchain__fprintf_graph_line(fp, depth, depth_mask); + ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, + left_margin); i = 0; list_for_each_entry(chain, &child->val, list) { if (chain->ip >= PERF_CONTEXT_MAX) @@ -172,11 +190,13 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, ret += ipchain__fprintf_graph(fp, chain, depth, new_depth_mask, i++, new_total, - cumul); + cumul, + left_margin); } ret += __callchain__fprintf_graph(fp, child, new_total, depth + 1, - new_depth_mask | (1 << depth)); + new_depth_mask | (1 << depth), + left_margin); node = next; } @@ -190,17 +210,19 @@ __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, ret += ipchain__fprintf_graph(fp, &rem_hits, depth, new_depth_mask, 0, new_total, - remaining); + remaining, left_margin); } return ret; } + static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, - u64 total_samples) + u64 total_samples, int left_margin) { struct callchain_list *chain; + bool printed = false; int i = 0; int ret = 0; @@ -208,17 +230,27 @@ callchain__fprintf_graph(FILE *fp, struct callchain_node *self, if (chain->ip >= PERF_CONTEXT_MAX) continue; - if (!i++ && sort_by_sym_first) + if (!i++ && sort__first_dimension == SORT_SYM) continue; + if (!printed) { + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "|\n"); + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "---"); + + left_margin += 3; + printed = true; + } else + ret += callchain__fprintf_left_margin(fp, left_margin); + if (chain->sym) - ret += fprintf(fp, " %s\n", chain->sym->name); + ret += fprintf(fp, " %s\n", chain->sym->name); else - ret += fprintf(fp, " %p\n", - (void *)(long)chain->ip); + ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); } - ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1); + ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); return ret; } @@ -251,7 +283,7 @@ callchain__fprintf_flat(FILE *fp, struct callchain_node *self, static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, - u64 total_samples) + u64 total_samples, int left_margin) { struct rb_node *rb_node; struct callchain_node *chain; @@ -271,7 +303,8 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, break; case CHAIN_GRAPH_ABS: /* Falldown */ case CHAIN_GRAPH_REL: - ret += callchain__fprintf_graph(fp, chain, total_samples); + ret += callchain__fprintf_graph(fp, chain, total_samples, + left_margin); case CHAIN_NONE: default: break; @@ -316,8 +349,19 @@ hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples) ret += fprintf(fp, "\n"); - if (callchain) - hist_entry_callchain__fprintf(fp, self, total_samples); + if (callchain) { + int left_margin = 0; + + if (sort__first_dimension == SORT_COMM) { + se = list_first_entry(&hist_entry__sort_list, typeof(*se), + list); + left_margin = se->width ? *se->width : 0; + left_margin -= thread__comm_len(self->thread); + } + + hist_entry_callchain__fprintf(fp, self, total_samples, + left_margin); + } return ret; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 60ced707bd6..b490354d1b2 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -7,7 +7,8 @@ char default_sort_order[] = "comm,dso,symbol"; char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; -int sort_by_sym_first; + +enum sort_type sort__first_dimension; unsigned int dsos__col_width; unsigned int comms__col_width; @@ -266,9 +267,18 @@ int sort_dimension__add(const char *tok) sort__has_parent = 1; } - if (list_empty(&hist_entry__sort_list) && - !strcmp(sd->name, "symbol")) - sort_by_sym_first = true; + if (list_empty(&hist_entry__sort_list)) { + if (!strcmp(sd->name, "pid")) + sort__first_dimension = SORT_PID; + else if (!strcmp(sd->name, "comm")) + sort__first_dimension = SORT_COMM; + else if (!strcmp(sd->name, "dso")) + sort__first_dimension = SORT_DSO; + else if (!strcmp(sd->name, "symbol")) + sort__first_dimension = SORT_SYM; + else if (!strcmp(sd->name, "parent")) + sort__first_dimension = SORT_PARENT; + } list_add_tail(&sd->entry->list, &hist_entry__sort_list); sd->taken = 1; diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 24c2b709f0d..333e664ff45 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -39,7 +39,7 @@ extern struct sort_entry sort_parent; extern unsigned int dsos__col_width; extern unsigned int comms__col_width; extern unsigned int threads__col_width; -extern int sort_by_sym_first; +extern enum sort_type sort__first_dimension; struct hist_entry { struct rb_node rb_node; @@ -54,6 +54,14 @@ struct hist_entry { struct rb_root sorted_chain; }; +enum sort_type { + SORT_PID, + SORT_COMM, + SORT_DSO, + SORT_SYM, + SORT_PARENT +}; + /* * configurable sorting bits */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index f53fad7c0a8..8cb47f1d8a7 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -33,6 +33,17 @@ int thread__set_comm(struct thread *self, const char *comm) return self->comm ? 0 : -ENOMEM; } +int thread__comm_len(struct thread *self) +{ + if (!self->comm_len) { + if (!self->comm) + return 0; + self->comm_len = strlen(self->comm); + } + + return self->comm_len; +} + static size_t thread__fprintf(struct thread *self, FILE *fp) { struct rb_node *nd; diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 1abef3b7455..53addd77ce8 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -12,9 +12,11 @@ struct thread { pid_t pid; char shortname[3]; char *comm; + int comm_len; }; int thread__set_comm(struct thread *self, const char *comm); +int thread__comm_len(struct thread *self); struct thread *threads__findnew(pid_t pid); struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); From 802da5f2289bbe363acef084805195c11f453c48 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 22 Oct 2009 23:23:24 +0200 Subject: [PATCH 173/470] perf tools: Drop asm/types.h wrapper Wrapping the kernel headers is dangerous when it comes to arch headers. Once we wrap asm/types.h, it will also replace the glibc asm/types.h, not only the kernel one. This results in build errors on some machines. Drop this wrapper and do its work from linux/types.h wrapper, also the glibc asm/types.h can already handle most of the type definition it was doing (typedef __u64, __u32, etc...). Todo: Check the others asm/*.h wrappers to prevent from other conflicts. Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Anton Blanchard LKML-Reference: <1256246604-17156-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 - tools/perf/util/include/asm/bitops.h | 12 ++++++++++++ tools/perf/util/include/asm/byteorder.h | 2 +- tools/perf/util/include/asm/types.h | 17 ----------------- tools/perf/util/include/linux/types.h | 2 ++ 5 files changed, 15 insertions(+), 19 deletions(-) delete mode 100644 tools/perf/util/include/asm/types.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 65e6e52fe37..0a40c29b238 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -352,7 +352,6 @@ LIB_H += util/include/asm/bitops.h LIB_H += util/include/asm/byteorder.h LIB_H += util/include/asm/swab.h LIB_H += util/include/asm/system.h -LIB_H += util/include/asm/types.h LIB_H += util/include/asm/uaccess.h LIB_H += perf.h LIB_H += util/event.h diff --git a/tools/perf/util/include/asm/bitops.h b/tools/perf/util/include/asm/bitops.h index fbe4d921291..58e9817ffae 100644 --- a/tools/perf/util/include/asm/bitops.h +++ b/tools/perf/util/include/asm/bitops.h @@ -1,6 +1,18 @@ +#ifndef _PERF_ASM_BITOPS_H_ +#define _PERF_ASM_BITOPS_H_ + +#include +#include "../../types.h" +#include + +/* CHECKME: Not sure both always match */ +#define BITS_PER_LONG __WORDSIZE + #include "../../../../include/asm-generic/bitops/__fls.h" #include "../../../../include/asm-generic/bitops/fls.h" #include "../../../../include/asm-generic/bitops/fls64.h" #include "../../../../include/asm-generic/bitops/__ffs.h" #include "../../../../include/asm-generic/bitops/ffz.h" #include "../../../../include/asm-generic/bitops/hweight.h" + +#endif diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h index 39f367cfaf5..b722abe3a62 100644 --- a/tools/perf/util/include/asm/byteorder.h +++ b/tools/perf/util/include/asm/byteorder.h @@ -1,2 +1,2 @@ -#include "../asm/types.h" +#include #include "../../../../include/linux/swab.h" diff --git a/tools/perf/util/include/asm/types.h b/tools/perf/util/include/asm/types.h deleted file mode 100644 index 06703c6cd50..00000000000 --- a/tools/perf/util/include/asm/types.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef PERF_ASM_TYPES_H_ -#define PERF_ASM_TYPES_H_ - -#include -#include "../../types.h" -#include - -/* CHECKME: Not sure both always match */ -#define BITS_PER_LONG __WORDSIZE - -typedef u64 __u64; -typedef u32 __u32; -typedef u16 __u16; -typedef u8 __u8; -typedef s64 __s64; - -#endif /* PERF_ASM_TYPES_H_ */ diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h index 858a38d0843..196862a81a2 100644 --- a/tools/perf/util/include/linux/types.h +++ b/tools/perf/util/include/linux/types.h @@ -1,6 +1,8 @@ #ifndef _PERF_LINUX_TYPES_H_ #define _PERF_LINUX_TYPES_H_ +#include + #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] From 6beba7adbe092e63dfe8d09fbd1e3ec140474a13 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 21 Oct 2009 17:34:06 -0200 Subject: [PATCH 174/470] perf tools: Unify debug messages mechanisms We were using eprintf in some places, that looks at a global 'verbose' level, and at other places passing a 'v' parameter to specify the verbosity level, unify it by introducing pr_{err,warning,debug,etc}, just like in the kernel. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256153646-10097-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 3 +- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-report.c | 9 +- tools/perf/builtin-sched.c | 4 +- tools/perf/builtin-timechart.c | 13 ++- tools/perf/builtin-top.c | 2 +- tools/perf/builtin-trace.c | 4 +- tools/perf/util/callchain.c | 2 +- tools/perf/util/debug.c | 4 +- tools/perf/util/debug.h | 3 +- tools/perf/util/event.h | 3 +- tools/perf/util/header.c | 2 +- tools/perf/util/include/linux/kernel.h | 17 ++++ tools/perf/util/map.c | 17 ++-- tools/perf/util/symbol.c | 134 +++++++++++-------------- tools/perf/util/symbol.h | 5 +- tools/perf/util/thread.c | 6 +- 17 files changed, 114 insertions(+), 116 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 99bac6aa72c..6d63c2eea2c 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -203,8 +203,7 @@ static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { struct map *map = map__new(&event->mmap, NULL, 0, - sizeof(struct sym_priv), symbol_filter, - verbose); + sizeof(struct sym_priv), symbol_filter); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f0467ff0d8a..ac5ddfff445 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -630,7 +630,7 @@ static int __cmd_record(int argc, const char **argv) param.sched_priority = realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { - printf("Could not set realtime priority.\n"); + pr_err("Could not set realtime priority.\n"); exit(-1); } } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 72d58421223..b3d814b5455 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -689,7 +689,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) dump_printf("... chain: nr:%Lu\n", chain->nr); if (validate_chain(chain, event) < 0) { - eprintf("call-chain problem with event, skipping it.\n"); + pr_debug("call-chain problem with event, " + "skipping it.\n"); return 0; } @@ -700,7 +701,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) } if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", + pr_debug("problem processing %d event, skipping it.\n", event->header.type); return -1; } @@ -738,7 +739,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (hist_entry__add(thread, map, sym, ip, chain, level, period)) { - eprintf("problem incrementing symbol count, skipping event\n"); + pr_debug("problem incrementing symbol count, skipping event\n"); return -1; } @@ -750,7 +751,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL, verbose); + struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 807ca66e7a8..9a48d9626be 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1666,8 +1666,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (long long)period); if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 34fad57087f..0a2f22261c3 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1162,12 +1162,10 @@ more: size = event->header.size; if (!size || process_event(event) < 0) { - - printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - + pr_warning("%p [%p]: skipping unknown header type: %d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type); /* * assume we lost track of the stream, check alignment, and * increment a single u64 in the hope to catch on again 'soon'. @@ -1200,7 +1198,8 @@ done: write_svg_file(output_name); - printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); + pr_info("Written %2.1f seconds of trace to %s.\n", + (last_time - first_time) / 1000000000.0, output_name); return rc; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index fa20345a0ab..4a9fe228be2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -809,7 +809,7 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), - symbol_filter, verbose, 1) <= 0) + symbol_filter, 1) <= 0) return -1; if (dump_symtab) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 4c129ff0bb1..e566bbe3f22 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -81,8 +81,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) (long long)period); if (thread == NULL) { - eprintf("problem processing %d event, skipping it.\n", - event->header.type); + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); return -1; } diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3b8380f1b47..b3b71258272 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -206,7 +206,7 @@ fill_node(struct callchain_node *node, struct ip_callchain *chain, } node->val_nr = chain->nr - start; if (!node->val_nr) - printf("Warning: empty node in callchain tree\n"); + pr_warning("Warning: empty node in callchain tree\n"); } static void diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index e8ca98fe0bd..28d520d5a1f 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -13,12 +13,12 @@ int verbose = 0; int dump_trace = 0; -int eprintf(const char *fmt, ...) +int eprintf(int level, const char *fmt, ...) { va_list args; int ret = 0; - if (verbose) { + if (verbose >= level) { va_start(args, fmt); ret = vfprintf(stderr, fmt, args); va_end(args); diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 02d1fa1c246..e8b18a1f87a 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -5,7 +5,8 @@ extern int verbose; extern int dump_trace; -int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); +int eprintf(int level, + const char *fmt, ...) __attribute__((format(printf, 2, 3))); int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2))); void trace_event(event_t *event); diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index db59c8bbe49..d972b4b0d38 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -106,8 +106,7 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter, - int v); + unsigned int sym_priv_size, symbol_filter_t filter); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 622c60e4525..7d26659b806 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -93,7 +93,7 @@ static struct perf_trace_event_type *events; void perf_header__push_event(u64 id, const char *name) { if (strlen(name) > MAX_EVENT_NAME) - printf("Event %s will be truncated\n", name); + pr_warning("Event %s will be truncated\n", name); if (!events) { events = malloc(sizeof(struct perf_trace_event_type)); diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h index 4b9204d9b26..21c0274c02f 100644 --- a/tools/perf/util/include/linux/kernel.h +++ b/tools/perf/util/include/linux/kernel.h @@ -85,4 +85,21 @@ simple_strtoul(const char *nptr, char **endptr, int base) return strtoul(nptr, endptr, base); } +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_err(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_warning(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_info(fmt, ...) \ + do { fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__); } while (0) +#define pr_debug(fmt, ...) \ + eprintf(1, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__) + #endif diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 55079c0200e..c1c55682534 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -21,8 +21,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) } struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter, - int v) + unsigned int sym_priv_size, symbol_filter_t filter) { struct map *self = malloc(sizeof(*self)); @@ -58,16 +57,16 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, goto out_delete; if (new_dso) { - int nr = dso__load(self->dso, self, filter, v); + int nr = dso__load(self->dso, self, filter); if (nr < 0) - eprintf("Failed to open %s, continuing " - "without symbols\n", - self->dso->long_name); + pr_warning("Failed to open %s, continuing " + "without symbols\n", + self->dso->long_name); else if (nr == 0) - eprintf("No symbols found in %s, maybe " - "install a debug package?\n", - self->dso->long_name); + pr_warning("No symbols found in %s, maybe " + "install a debug package?\n", + self->dso->long_name); } if (self->dso == vdso || anon) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0a4898480d6..8f0208ce237 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -76,7 +76,7 @@ static void kernel_maps__fixup_end(void) } static struct symbol *symbol__new(u64 start, u64 len, const char *name, - unsigned int priv_size, int v) + unsigned int priv_size) { size_t namelen = strlen(name) + 1; struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); @@ -91,8 +91,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, self->start = start; self->end = len ? start + len - 1 : start; - if (v > 2) - printf("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); + pr_debug3("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); memcpy(self->name, name, namelen); @@ -209,7 +208,7 @@ size_t dso__fprintf(struct dso *self, FILE *fp) * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int kernel_maps__load_all_kallsyms(int v) +static int kernel_maps__load_all_kallsyms(void) { char *line = NULL; size_t n; @@ -252,7 +251,7 @@ static int kernel_maps__load_all_kallsyms(int v) * Will fix up the end later, when we have all symbols sorted. */ sym = symbol__new(start, 0, symbol_name, - kernel_map->dso->sym_priv_size, v); + kernel_map->dso->sym_priv_size); if (sym == NULL) goto out_delete_line; @@ -300,8 +299,8 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) if (strcmp(map->dso->name, module)) { map = kernel_maps__find_by_dso_name(module); if (!map) { - fputs("/proc/{kallsyms,modules} " - "inconsistency!\n", stderr); + pr_err("/proc/{kallsyms,modules} " + "inconsistency!\n"); return -1; } } @@ -351,10 +350,9 @@ delete_symbol: } -static int kernel_maps__load_kallsyms(symbol_filter_t filter, - int use_modules, int v) +static int kernel_maps__load_kallsyms(symbol_filter_t filter, int use_modules) { - if (kernel_maps__load_all_kallsyms(v)) + if (kernel_maps__load_all_kallsyms()) return -1; dso__fixup_sym_end(kernel_map->dso); @@ -362,9 +360,9 @@ static int kernel_maps__load_kallsyms(symbol_filter_t filter, return kernel_maps__split_kallsyms(filter, use_modules); } -static size_t kernel_maps__fprintf(FILE *fp, int v) +static size_t kernel_maps__fprintf(FILE *fp) { - size_t printed = fprintf(stderr, "Kernel maps:\n"); + size_t printed = fprintf(fp, "Kernel maps:\n"); struct rb_node *nd; for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { @@ -372,17 +370,17 @@ static size_t kernel_maps__fprintf(FILE *fp, int v) printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - if (v > 1) { + if (verbose > 1) { printed += dso__fprintf(pos->dso, fp); printed += fprintf(fp, "--\n"); } } - return printed + fprintf(stderr, "END kernel maps\n"); + return printed + fprintf(fp, "END kernel maps\n"); } static int dso__load_perf_map(struct dso *self, struct map *map, - symbol_filter_t filter, int v) + symbol_filter_t filter) { char *line = NULL; size_t n; @@ -420,7 +418,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, continue; sym = symbol__new(start, size, line + len, - self->sym_priv_size, v); + self->sym_priv_size); if (sym == NULL) goto out_delete_line; @@ -534,7 +532,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *self, int v) +static int dso__synthesize_plt_symbols(struct dso *self) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -618,7 +616,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, v); + sympltname, self->sym_priv_size); if (!f) goto out_elf_end; @@ -636,7 +634,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int v) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size, v); + sympltname, self->sym_priv_size); if (!f) goto out_elf_end; @@ -654,14 +652,14 @@ out_close: if (err == 0) return nr; out: - fprintf(stderr, "%s: problems reading %s PLT info.\n", - __func__, self->long_name); + pr_warning("%s: problems reading %s PLT info.\n", + __func__, self->long_name); return 0; } static int dso__load_sym(struct dso *self, struct map *map, const char *name, int fd, symbol_filter_t filter, int kernel, - int kmodule, int v) + int kmodule) { struct map *curr_map = map; struct dso *curr_dso = self; @@ -680,15 +678,12 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { - if (v) - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, name); + pr_err("%s: cannot read %s ELF file.\n", __func__, name); goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - if (v) - fprintf(stderr, "%s: cannot get elf header.\n", __func__); + pr_err("%s: cannot get elf header.\n", __func__); goto out_elf_end; } @@ -794,10 +789,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } if (curr_dso->adjust_symbols) { - if (v > 2) - printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", - (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); - + pr_debug2("adjusting symbol: st_value: %Lx sh_addr: " + "%Lx sh_offset: %Lx\n", (u64)sym.st_value, + (u64)shdr.sh_addr, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } /* @@ -810,7 +804,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, elf_name = demangled; new_symbol: f = symbol__new(sym.st_value, sym.st_size, elf_name, - curr_dso->sym_priv_size, v); + curr_dso->sym_priv_size); free(demangled); if (!f) goto out_elf_end; @@ -837,7 +831,7 @@ out_close: #define BUILD_ID_SIZE 128 -static char *dso__read_build_id(struct dso *self, int v) +static char *dso__read_build_id(struct dso *self) { int i; GElf_Ehdr ehdr; @@ -854,15 +848,13 @@ static char *dso__read_build_id(struct dso *self, int v) elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); if (elf == NULL) { - if (v) - fprintf(stderr, "%s: cannot read %s ELF file.\n", - __func__, self->long_name); + pr_err("%s: cannot read %s ELF file.\n", __func__, + self->long_name); goto out_close; } if (gelf_getehdr(elf, &ehdr) == NULL) { - if (v) - fprintf(stderr, "%s: cannot get elf header.\n", __func__); + pr_err("%s: cannot get elf header.\n", __func__); goto out_elf_end; } @@ -884,8 +876,7 @@ static char *dso__read_build_id(struct dso *self, int v) ++raw; bid += 2; } - if (v >= 2) - printf("%s(%s): %s\n", __func__, self->long_name, build_id); + pr_debug2("%s(%s): %s\n", __func__, self->long_name, build_id); out_elf_end: elf_end(elf); out_close: @@ -911,8 +902,7 @@ char dso__symtab_origin(const struct dso *self) return origin[self->origin]; } -int dso__load(struct dso *self, struct map *map, - symbol_filter_t filter, int v) +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name = malloc(size), *build_id = NULL; @@ -925,7 +915,7 @@ int dso__load(struct dso *self, struct map *map, self->adjust_symbols = 0; if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, map, filter, v); + ret = dso__load_perf_map(self, map, filter); self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : DSO__ORIG_NOT_FOUND; return ret; @@ -946,7 +936,7 @@ more: self->long_name); break; case DSO__ORIG_BUILDID: - build_id = dso__read_build_id(self, v); + build_id = dso__read_build_id(self); if (build_id != NULL) { snprintf(name, size, "/usr/lib/debug/.build-id/%.2s/%s.debug", @@ -967,7 +957,7 @@ more: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, map, name, fd, filter, 0, 0, v); + ret = dso__load_sym(self, map, name, fd, filter, 0, 0); close(fd); /* @@ -977,7 +967,7 @@ more: goto more; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(self, v); + int nr_plt = dso__synthesize_plt_symbols(self); if (nr_plt > 0) ret += nr_plt; } @@ -1025,34 +1015,29 @@ struct map *kernel_maps__find_by_dso_name(const char *name) } static int dso__load_module_sym(struct dso *self, struct map *map, - symbol_filter_t filter, int v) + symbol_filter_t filter) { int err = 0, fd = open(self->long_name, O_RDONLY); if (fd < 0) { - if (v) - fprintf(stderr, "%s: cannot open %s\n", - __func__, self->long_name); + pr_err("%s: cannot open %s\n", __func__, self->long_name); return err; } - err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1, v); + err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1); close(fd); return err; } -static int dsos__load_modules_sym_dir(char *dirname, - symbol_filter_t filter, int v) +static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) { struct dirent *dent; int nr_symbols = 0, err; DIR *dir = opendir(dirname); if (!dir) { - if (v) - fprintf(stderr, "%s: cannot open %s dir\n", __func__, - dirname); + pr_err("%s: cannot open %s dir\n", __func__, dirname); return -1; } @@ -1066,7 +1051,7 @@ static int dsos__load_modules_sym_dir(char *dirname, snprintf(path, sizeof(path), "%s/%s", dirname, dent->d_name); - err = dsos__load_modules_sym_dir(path, filter, v); + err = dsos__load_modules_sym_dir(path, filter); if (err < 0) goto failure; } else { @@ -1092,7 +1077,7 @@ static int dsos__load_modules_sym_dir(char *dirname, if (map->dso->long_name == NULL) goto failure; - err = dso__load_module_sym(map->dso, map, filter, v); + err = dso__load_module_sym(map->dso, map, filter); if (err < 0) goto failure; last = rb_last(&map->dso->syms); @@ -1119,7 +1104,7 @@ failure: return -1; } -static int dsos__load_modules_sym(symbol_filter_t filter, int v) +static int dsos__load_modules_sym(symbol_filter_t filter) { struct utsname uts; char modules_path[PATH_MAX]; @@ -1130,7 +1115,7 @@ static int dsos__load_modules_sym(symbol_filter_t filter, int v) snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", uts.release); - return dsos__load_modules_sym_dir(modules_path, filter, v); + return dsos__load_modules_sym_dir(modules_path, filter); } /* @@ -1225,15 +1210,14 @@ out_failure: } static int dso__load_vmlinux(struct dso *self, struct map *map, - const char *vmlinux, - symbol_filter_t filter, int v) + const char *vmlinux, symbol_filter_t filter) { int err, fd = open(vmlinux, O_RDONLY); if (fd < 0) return -1; - err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0, v); + err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0); close(fd); @@ -1241,7 +1225,7 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, } int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int v, int use_modules) + symbol_filter_t filter, int use_modules) { int err = -1; struct dso *dso = dso__new(vmlinux, sym_priv_size); @@ -1257,26 +1241,26 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; if (use_modules && dsos__load_modules(sym_priv_size) < 0) { - fprintf(stderr, "Failed to load list of modules in use! " - "Continuing...\n"); + pr_warning("Failed to load list of modules in use! " + "Continuing...\n"); use_modules = 0; } if (vmlinux) { - err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter, v); + err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter); if (err > 0 && use_modules) { - int syms = dsos__load_modules_sym(filter, v); + int syms = dsos__load_modules_sym(filter); if (syms < 0) - fprintf(stderr, "Failed to read module symbols!" - " Continuing...\n"); + pr_warning("Failed to read module symbols!" + " Continuing...\n"); else err += syms; } } if (err <= 0) - err = kernel_maps__load_kallsyms(filter, use_modules, v); + err = kernel_maps__load_kallsyms(filter, use_modules); if (err > 0) { struct rb_node *node = rb_first(&dso->syms); @@ -1296,8 +1280,8 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_maps__fixup_end(); dsos__add(dso); - if (v > 0) - kernel_maps__fprintf(stderr, v); + if (verbose) + kernel_maps__fprintf(stderr); } return err; @@ -1355,8 +1339,8 @@ void dsos__fprintf(FILE *fp) int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) { - if (dsos__load_kernel(vmlinux_name, sym_priv_size, - filter, verbose, modules) <= 0) + if (dsos__load_kernel(vmlinux_name, sym_priv_size, filter, + modules) <= 0) return -1; vdso = dso__new("[vdso]", 0); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index c2a777de9b7..77b7b3e4241 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -63,11 +63,10 @@ static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int verbose, int modules); + symbol_filter_t filter, int modules); struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, bool *is_new); -int dso__load(struct dso *self, struct map *map, - symbol_filter_t filter, int v); +int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 8cb47f1d8a7..0f6d78c9863 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -127,9 +127,9 @@ static void thread__remove_overlappings(struct thread *self, struct map *map) continue; if (verbose >= 2) { - printf("overlapping maps:\n"); - map__fprintf(map, stdout); - map__fprintf(pos, stdout); + fputs("overlapping maps:\n", stderr); + map__fprintf(map, stderr); + map__fprintf(pos, stderr); } rb_erase(&pos->rb_node, &self->maps); From b7cb10e790fbd145296e771f789273a875c15719 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 21 Oct 2009 17:34:06 -0200 Subject: [PATCH 175/470] perf probe: Print debug messages using pr_*() Use the new pr_{err,warning,debug,etc} printout methods, just like in the kernel. Signed-off-by: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256153646-10097-1-git-send-email-acme@redhat.com> [ Split this patch out, to keep perf/probes separate. ] Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 20 ++++++++++---------- tools/perf/util/probe-finder.c | 12 ++++++------ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index b5ad86a265f..dcb406c7f82 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -78,7 +78,7 @@ static int parse_probepoint(const struct option *opt __used, if (!str) /* The end of probe points */ return 0; - eprintf("probe-definition(%d): %s\n", session.nr_probe, str); + pr_debug("probe-definition(%d): %s\n", session.nr_probe, str); if (++session.nr_probe == MAX_PROBES) semantic_error("Too many probes"); @@ -103,7 +103,7 @@ static int parse_probepoint(const struct option *opt __used, die("strndup"); if (++argc == MAX_PROBE_ARGS) semantic_error("Too many arguments"); - eprintf("argv[%d]=%s\n", argc, argv[argc - 1]); + pr_debug("argv[%d]=%s\n", argc, argv[argc - 1]); } } while (*str != '\0'); if (argc < 2) @@ -133,7 +133,7 @@ static int parse_probepoint(const struct option *opt __used, pp->line = atoi(ptr); if (!pp->file || !pp->line) semantic_error("Failed to parse line."); - eprintf("file:%s line:%d\n", pp->file, pp->line); + pr_debug("file:%s line:%d\n", pp->file, pp->line); } else { /* Function name */ ptr = strchr(arg, '+'); @@ -150,8 +150,8 @@ static int parse_probepoint(const struct option *opt __used, pp->file = strdup(ptr); } pp->function = strdup(arg); - eprintf("symbol:%s file:%s offset:%d\n", - pp->function, pp->file, pp->offset); + pr_debug("symbol:%s file:%s offset:%d\n", + pp->function, pp->file, pp->offset); } free(argv[1]); if (pp->file) @@ -175,7 +175,7 @@ static int parse_probepoint(const struct option *opt __used, session.need_dwarf = 1; } - eprintf("%d arguments\n", pp->nr_args); + pr_debug("%d arguments\n", pp->nr_args); return 0; } @@ -188,7 +188,7 @@ static int open_default_vmlinux(void) ret = uname(&uts); if (ret) { - eprintf("uname() failed.\n"); + pr_debug("uname() failed.\n"); return -errno; } session.release = uts.release; @@ -196,12 +196,12 @@ static int open_default_vmlinux(void) ret = snprintf(fname, MAX_PATH_LEN, default_search_path[i], session.release); if (ret >= MAX_PATH_LEN || ret < 0) { - eprintf("Filename(%d,%s) is too long.\n", i, + pr_debug("Filename(%d,%s) is too long.\n", i, uts.release); errno = E2BIG; return -E2BIG; } - eprintf("try to open %s\n", fname); + pr_debug("try to open %s\n", fname); fd = open(fname, O_RDONLY); if (fd >= 0) break; @@ -341,7 +341,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) ret = find_probepoint(fd, pp); if (ret <= 0) die("No probe point found.\n"); - eprintf("probe event %s found\n", session.events[j]); + pr_debug("probe event %s found\n", session.events[j]); } close(fd); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index be997abdf5b..54e70718530 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -136,7 +136,7 @@ static Dwarf_Unsigned die_get_fileno(Dwarf_Die cu_die, const char *fname) dwarf_dealloc(__dw_debug, srcs, DW_DLA_LIST); } if (found) - eprintf("found fno: %d\n", (int)found); + pr_debug("found fno: %d\n", (int)found); return found; } @@ -442,7 +442,7 @@ static void find_variable(Dwarf_Die sp_die, struct probe_finder *pf) return ; } - eprintf("Searching '%s' variable in context.\n", pf->var); + pr_debug("Searching '%s' variable in context.\n", pf->var); /* Search child die for local variables and parameters. */ ret = search_die_from_children(sp_die, variable_callback, pf); if (!ret) @@ -552,7 +552,7 @@ static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); DIE_IF(ret != DW_DLV_OK); - eprintf("Probe point found: 0x%llx\n", addr); + pr_debug("Probe point found: 0x%llx\n", addr); pf->addr = addr; /* Search a real subprogram including this line, */ ret = search_die_from_children(cu_die, probeaddr_callback, pf); @@ -583,8 +583,8 @@ static int probefunc_callback(struct die_link *dlink, void *data) &pf->inl_offs, &__dw_error); DIE_IF(ret != DW_DLV_OK); - eprintf("inline definition offset %lld\n", - pf->inl_offs); + pr_debug("inline definition offset %lld\n", + pf->inl_offs); return 0; } /* Get probe address */ @@ -599,7 +599,7 @@ static int probefunc_callback(struct die_link *dlink, void *data) /* Get probe address */ pf->addr = die_get_entrypc(dlink->die); pf->addr += pp->offset; - eprintf("found inline addr: 0x%llx\n", pf->addr); + pr_debug("found inline addr: 0x%llx\n", pf->addr); /* Inlined function. Get a real subprogram */ for (lk = dlink->parent; lk != NULL; lk = lk->parent) { tag = 0; From fcd14b3203b538dca04a2b065c774c0b57863eec Mon Sep 17 00:00:00 2001 From: Michael Cree Date: Mon, 26 Oct 2009 21:32:06 +1300 Subject: [PATCH 176/470] perf tools, Alpha: Add Alpha support to perf.h For the perf tool the patch implements an Alpha specific section in the perf.h header file. Signed-off-by: Michael Cree Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1256545926-6972-1-git-send-email-mcree@orcon.net.nz> Signed-off-by: Ingo Molnar --- tools/perf/perf.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 8cc4623afd6..216bdb223f6 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -47,6 +47,12 @@ #define cpu_relax() asm volatile("":::"memory") #endif +#ifdef __alpha__ +#include "../../arch/alpha/include/asm/unistd.h" +#define rmb() asm volatile("mb" ::: "memory") +#define cpu_relax() asm volatile("" ::: "memory") +#endif + #include #include #include From 7f3bedcc93f935631d2363f23de1cc80f04fdf3e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 26 Oct 2009 19:23:17 -0200 Subject: [PATCH 177/470] perf record: Fix race where process can disappear while reading its /proc/pid/tasks Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256592199-9608-1-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index ac5ddfff445..9e1638cc19c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -206,6 +206,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full) fp = fopen(filename, "r"); if (fp == NULL) { +out_race: /* * We raced with a task exiting - just return: */ @@ -247,6 +248,9 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full) snprintf(filename, sizeof(filename), "/proc/%d/task", pid); tasks = opendir(filename); + if (tasks == NULL) + goto out_race; + while (!readdir_r(tasks, &dirent, &next) && next) { char *end; pid = strtol(dirent.d_name, &end, 10); From 234fbbf508c58c5084292b11b242377553897459 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 26 Oct 2009 19:23:18 -0200 Subject: [PATCH 178/470] perf tools: Generalize event synthesizing routines Because we will need it in 'perf top' to support userspace symbols for existing threads. Now we pass a callback that will receive the synthesized event and then write it to the output file in 'perf record' and in the upcoming patch for 'perf top' we will just immediatelly create the in memory representation of threads and maps. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256592199-9608-2-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/builtin-record.c | 181 ++---------------------------------- tools/perf/util/event.c | 177 +++++++++++++++++++++++++++++++++++ tools/perf/util/event.h | 3 + 4 files changed, 191 insertions(+), 171 deletions(-) create mode 100644 tools/perf/util/event.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0a40c29b238..9f4488d6f8e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -380,6 +380,7 @@ LIB_OBJS += util/alias.o LIB_OBJS += util/config.o LIB_OBJS += util/ctype.o LIB_OBJS += util/environment.o +LIB_OBJS += util/event.o LIB_OBJS += util/exec_cmd.o LIB_OBJS += util/help.o LIB_OBJS += util/levenshtein.o diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 9e1638cc19c..4a73d89ce5d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -109,6 +109,12 @@ static void write_output(void *buf, size_t size) } } +static int process_synthesized_event(event_t *event) +{ + write_output(event, event->header.size); + return 0; +} + static void mmap_read(struct mmap_data *md) { unsigned int head = mmap_read_head(md); @@ -191,172 +197,6 @@ static void sig_atexit(void) kill(getpid(), signr); } -static pid_t pid_synthesize_comm_event(pid_t pid, int full) -{ - struct comm_event comm_ev; - char filename[PATH_MAX]; - char bf[BUFSIZ]; - FILE *fp; - size_t size = 0; - DIR *tasks; - struct dirent dirent, *next; - pid_t tgid = 0; - - snprintf(filename, sizeof(filename), "/proc/%d/status", pid); - - fp = fopen(filename, "r"); - if (fp == NULL) { -out_race: - /* - * We raced with a task exiting - just return: - */ - if (verbose) - fprintf(stderr, "couldn't open %s\n", filename); - return 0; - } - - memset(&comm_ev, 0, sizeof(comm_ev)); - while (!comm_ev.comm[0] || !comm_ev.pid) { - if (fgets(bf, sizeof(bf), fp) == NULL) - goto out_failure; - - if (memcmp(bf, "Name:", 5) == 0) { - char *name = bf + 5; - while (*name && isspace(*name)) - ++name; - size = strlen(name) - 1; - memcpy(comm_ev.comm, name, size++); - } else if (memcmp(bf, "Tgid:", 5) == 0) { - char *tgids = bf + 5; - while (*tgids && isspace(*tgids)) - ++tgids; - tgid = comm_ev.pid = atoi(tgids); - } - } - - comm_ev.header.type = PERF_RECORD_COMM; - size = ALIGN(size, sizeof(u64)); - comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); - - if (!full) { - comm_ev.tid = pid; - - write_output(&comm_ev, comm_ev.header.size); - goto out_fclose; - } - - snprintf(filename, sizeof(filename), "/proc/%d/task", pid); - - tasks = opendir(filename); - if (tasks == NULL) - goto out_race; - - while (!readdir_r(tasks, &dirent, &next) && next) { - char *end; - pid = strtol(dirent.d_name, &end, 10); - if (*end) - continue; - - comm_ev.tid = pid; - - write_output(&comm_ev, comm_ev.header.size); - } - closedir(tasks); - -out_fclose: - fclose(fp); - return tgid; - -out_failure: - fprintf(stderr, "couldn't get COMM and pgid, malformed %s\n", - filename); - exit(EXIT_FAILURE); -} - -static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid) -{ - char filename[PATH_MAX]; - FILE *fp; - - snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); - - fp = fopen(filename, "r"); - if (fp == NULL) { - /* - * We raced with a task exiting - just return: - */ - if (verbose) - fprintf(stderr, "couldn't open %s\n", filename); - return; - } - while (1) { - char bf[BUFSIZ], *pbf = bf; - struct mmap_event mmap_ev = { - .header = { .type = PERF_RECORD_MMAP }, - }; - int n; - size_t size; - if (fgets(bf, sizeof(bf), fp) == NULL) - break; - - /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ - n = hex2u64(pbf, &mmap_ev.start); - if (n < 0) - continue; - pbf += n + 1; - n = hex2u64(pbf, &mmap_ev.len); - if (n < 0) - continue; - pbf += n + 3; - if (*pbf == 'x') { /* vm_exec */ - char *execname = strchr(bf, '/'); - - /* Catch VDSO */ - if (execname == NULL) - execname = strstr(bf, "[vdso]"); - - if (execname == NULL) - continue; - - size = strlen(execname); - execname[size - 1] = '\0'; /* Remove \n */ - memcpy(mmap_ev.filename, execname, size); - size = ALIGN(size, sizeof(u64)); - mmap_ev.len -= mmap_ev.start; - mmap_ev.header.size = (sizeof(mmap_ev) - - (sizeof(mmap_ev.filename) - size)); - mmap_ev.pid = tgid; - mmap_ev.tid = pid; - - write_output(&mmap_ev, mmap_ev.header.size); - } - } - - fclose(fp); -} - -static void synthesize_all(void) -{ - DIR *proc; - struct dirent dirent, *next; - - proc = opendir("/proc"); - - while (!readdir_r(proc, &dirent, &next) && next) { - char *end; - pid_t pid, tgid; - - pid = strtol(dirent.d_name, &end, 10); - if (*end) /* only interested in proper numerical dirents */ - continue; - - tgid = pid_synthesize_comm_event(pid, 1); - pid_synthesize_mmap_samples(pid, tgid); - } - - closedir(proc); -} - static int group_fd; static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) @@ -608,11 +448,10 @@ static int __cmd_record(int argc, const char **argv) if (file_new) perf_header__write(header, output); - if (!system_wide) { - pid_t tgid = pid_synthesize_comm_event(pid, 0); - pid_synthesize_mmap_samples(pid, tgid); - } else - synthesize_all(); + if (!system_wide) + event__synthesize_thread(pid, process_synthesized_event); + else + event__synthesize_threads(process_synthesized_event); if (target_pid == -1 && argc) { pid = fork(); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c new file mode 100644 index 00000000000..1dae7e3b400 --- /dev/null +++ b/tools/perf/util/event.c @@ -0,0 +1,177 @@ +#include +#include "event.h" +#include "debug.h" +#include "string.h" + +static pid_t event__synthesize_comm(pid_t pid, int full, + int (*process)(event_t *event)) +{ + event_t ev; + char filename[PATH_MAX]; + char bf[BUFSIZ]; + FILE *fp; + size_t size = 0; + DIR *tasks; + struct dirent dirent, *next; + pid_t tgid = 0; + + snprintf(filename, sizeof(filename), "/proc/%d/status", pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { +out_race: + /* + * We raced with a task exiting - just return: + */ + pr_debug("couldn't open %s\n", filename); + return 0; + } + + memset(&ev.comm, 0, sizeof(ev.comm)); + while (!ev.comm.comm[0] || !ev.comm.pid) { + if (fgets(bf, sizeof(bf), fp) == NULL) + goto out_failure; + + if (memcmp(bf, "Name:", 5) == 0) { + char *name = bf + 5; + while (*name && isspace(*name)) + ++name; + size = strlen(name) - 1; + memcpy(ev.comm.comm, name, size++); + } else if (memcmp(bf, "Tgid:", 5) == 0) { + char *tgids = bf + 5; + while (*tgids && isspace(*tgids)) + ++tgids; + tgid = ev.comm.pid = atoi(tgids); + } + } + + ev.comm.header.type = PERF_RECORD_COMM; + size = ALIGN(size, sizeof(u64)); + ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); + + if (!full) { + ev.comm.tid = pid; + + process(&ev); + goto out_fclose; + } + + snprintf(filename, sizeof(filename), "/proc/%d/task", pid); + + tasks = opendir(filename); + if (tasks == NULL) + goto out_race; + + while (!readdir_r(tasks, &dirent, &next) && next) { + char *end; + pid = strtol(dirent.d_name, &end, 10); + if (*end) + continue; + + ev.comm.tid = pid; + + process(&ev); + } + closedir(tasks); + +out_fclose: + fclose(fp); + return tgid; + +out_failure: + pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); + return -1; +} + +static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, + int (*process)(event_t *event)) +{ + char filename[PATH_MAX]; + FILE *fp; + + snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); + + fp = fopen(filename, "r"); + if (fp == NULL) { + /* + * We raced with a task exiting - just return: + */ + pr_debug("couldn't open %s\n", filename); + return -1; + } + + while (1) { + char bf[BUFSIZ], *pbf = bf; + event_t ev = { + .header = { .type = PERF_RECORD_MMAP }, + }; + int n; + size_t size; + if (fgets(bf, sizeof(bf), fp) == NULL) + break; + + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ + n = hex2u64(pbf, &ev.mmap.start); + if (n < 0) + continue; + pbf += n + 1; + n = hex2u64(pbf, &ev.mmap.len); + if (n < 0) + continue; + pbf += n + 3; + if (*pbf == 'x') { /* vm_exec */ + char *execname = strchr(bf, '/'); + + /* Catch VDSO */ + if (execname == NULL) + execname = strstr(bf, "[vdso]"); + + if (execname == NULL) + continue; + + size = strlen(execname); + execname[size - 1] = '\0'; /* Remove \n */ + memcpy(ev.mmap.filename, execname, size); + size = ALIGN(size, sizeof(u64)); + ev.mmap.len -= ev.mmap.start; + ev.mmap.header.size = (sizeof(ev.mmap) - + (sizeof(ev.mmap.filename) - size)); + ev.mmap.pid = tgid; + ev.mmap.tid = pid; + + process(&ev); + } + } + + fclose(fp); + return 0; +} + +int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)) +{ + pid_t tgid = event__synthesize_comm(pid, 1, process); + if (tgid == -1) + return -1; + return event__synthesize_mmap_events(pid, tgid, process); +} + +void event__synthesize_threads(int (*process)(event_t *event)) +{ + DIR *proc; + struct dirent dirent, *next; + + proc = opendir("/proc"); + + while (!readdir_r(proc, &dirent, &next) && next) { + char *end; + pid_t pid = strtol(dirent.d_name, &end, 10); + + if (*end) /* only interested in proper numerical dirents */ + continue; + + event__synthesize_thread(pid, process); + } + + closedir(proc); +} diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index d972b4b0d38..2ae1177be40 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -111,4 +111,7 @@ struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); +int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); +void event__synthesize_threads(int (*process)(event_t *event)); + #endif /* __PERF_RECORD_H */ From 5b2bb75a0d4b08cd16bc35ecd674f957fc3b0eb7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 26 Oct 2009 19:23:19 -0200 Subject: [PATCH 179/470] perf top: Support userspace symbols too Example: Compiling the kernel with 'make -k 22 allyesconfig' [root@emilia linux-2.6-tip]# perf top -r 90 ------------------------------------------------------------------------------ PerfTop: 3669 irqs/sec kernel:59.9% [1000Hz cycles], (all, 8 CPUs) ------------------------------------------------------------------------------ samples pcnt function DSO _______ _____ ________________________________ ________________ 3062.00 6.5% clear_page_c [kernel] 2233.00 4.8% _int_malloc /lib64/libc-2.5.so 2100.00 4.5% yylex /home/acme/git/build/allyesconfig/scripts/genksyms/genksyms 2029.00 4.3% memset /lib64/libc-2.5.so 1224.00 2.6% page_fault [kernel] 1075.00 2.3% __GI_strlen /lib64/libc-2.5.so 863.00 1.8% sub_preempt_count [kernel] 822.00 1.8% __GI_memcpy /lib64/libc-2.5.so 810.00 1.7% __GI_vfprintf /lib64/libc-2.5.so 786.00 1.7% _int_free /lib64/libc-2.5.so 775.00 1.7% __GI_strcmp /lib64/libc-2.5.so 748.00 1.6% _spin_lock [kernel] 699.00 1.5% main /home/acme/git/build/allyesconfig/scripts/basic/fixdep 659.00 1.4% add_preempt_count [kernel] 649.00 1.4% yyparse /home/acme/git/build/allyesconfig/scripts/genksyms/genksyms 645.00 1.4% preempt_trace [kernel] 635.00 1.4% __GI___libc_free /lib64/libc-2.5.so 597.00 1.3% trace_preempt_on [kernel] 551.00 1.2% __GI___libc_malloc /lib64/libc-2.5.so 516.00 1.1% _spin_lock_irqsave [kernel] 481.00 1.0% copy_user_generic_string [kernel] 479.00 1.0% unmap_vmas [kernel] 429.00 0.9% _IO_file_xsputn_internal /lib64/libc-2.5.so 425.00 0.9% __GI_strncpy /lib64/libc-2.5.so 416.00 0.9% get_page_from_freelist [kernel] 414.00 0.9% malloc_consolidate /lib64/libc-2.5.so 406.00 0.9% get_parent_ip [kernel] 362.00 0.8% __rmqueue [kernel] 347.00 0.7% in_lock_functions [kernel] 316.00 0.7% __d_lookup [kernel] [root@emilia linux-2.6-tip]# More polishing is needed to print just DSO basename when not --verbose, etc. Supporting a 'comm' column requires some more reworking of 'perf top' internals as we will need to use something like the hist entries 'perf report' uses and will be done in another patch. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256592199-9608-3-git-send-email-acme@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 147 +++++++++++++++++++++++++++------------ 1 file changed, 103 insertions(+), 44 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 4a9fe228be2..a02fc414601 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -318,7 +318,7 @@ static void show_details(struct sym_entry *syme) } /* - * Symbols will be added here in record_ip and will get out + * Symbols will be added here in event__process_sample and will get out * after decayed. */ static LIST_HEAD(active_symbols); @@ -459,18 +459,18 @@ static void print_sym_table(void) } if (nr_counters == 1) - printf(" samples pcnt"); + printf(" samples pcnt"); else - printf(" weight samples pcnt"); + printf(" weight samples pcnt"); if (verbose) printf(" RIP "); - printf(" kernel function\n"); - printf(" %s _______ _____", + printf(" function DSO\n"); + printf(" %s _______ _____", nr_counters == 1 ? " " : "______"); if (verbose) - printf(" ________________"); - printf(" _______________\n\n"); + printf(" ________________"); + printf(" ________________________________ ________________\n\n"); for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { struct symbol *sym; @@ -486,16 +486,15 @@ static void print_sym_table(void) sum_ksamples)); if (nr_counters == 1 || !display_weighted) - printf("%20.2f - ", syme->weight); + printf("%20.2f ", syme->weight); else - printf("%9.1f %10ld - ", syme->weight, syme->snap_count); + printf("%9.1f %10ld ", syme->weight, syme->snap_count); percent_color_fprintf(stdout, "%4.1f%%", pcnt); if (verbose) - printf(" - %016llx", sym->start); - printf(" : %s", sym->name); - if (syme->map->dso->name[0] == '[') - printf(" \t%s", syme->map->dso->name); + printf(" %016llx", sym->start); + printf(" %-32s", sym->name); + printf(" %s", syme->map->dso->short_name); printf("\n"); } } @@ -818,41 +817,97 @@ static int parse_symbols(void) return 0; } -/* - * Binary search in the histogram table and record the hit: - */ -static void record_ip(u64 ip, int counter) +static void event__process_sample(const event_t *self, int counter) { + u64 ip = self->ip.ip; struct map *map; - struct symbol *sym = kernel_maps__find_symbol(ip, &map); + struct sym_entry *syme; + struct symbol *sym; - if (sym != NULL) { - struct sym_entry *syme = dso__sym_priv(map->dso, sym); + switch (self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) { + case PERF_RECORD_MISC_USER: { + struct thread *thread = threads__findnew(self->ip.pid); - if (!syme->skip) { - syme->count[counter]++; - record_precise_ip(syme, counter, ip); - pthread_mutex_lock(&active_symbols_lock); - if (list_empty(&syme->node) || !syme->node.next) - __list_insert_active_sym(syme); - pthread_mutex_unlock(&active_symbols_lock); + if (thread == NULL) return; + + map = thread__find_map(thread, ip); + if (map != NULL) { + ip = map->map_ip(map, ip); + sym = map->dso->find_symbol(map->dso, ip); + if (sym == NULL) + return; + userspace_samples++; + break; } } - - samples--; -} - -static void process_event(u64 ip, int counter, int user) -{ - samples++; - - if (user) { - userspace_samples++; + /* + * If this is outside of all known maps, + * and is a negative address, try to look it + * up in the kernel dso, as it might be a + * vsyscall or vdso (which executes in user-mode). + */ + if ((long long)ip >= 0) + return; + /* Fall thru */ + case PERF_RECORD_MISC_KERNEL: + sym = kernel_maps__find_symbol(ip, &map); + if (sym == NULL) + return; + break; + default: return; } - record_ip(ip, counter); + syme = dso__sym_priv(map->dso, sym); + + if (!syme->skip) { + syme->count[counter]++; + record_precise_ip(syme, counter, ip); + pthread_mutex_lock(&active_symbols_lock); + if (list_empty(&syme->node) || !syme->node.next) + __list_insert_active_sym(syme); + pthread_mutex_unlock(&active_symbols_lock); + ++samples; + return; + } +} + +static void event__process_mmap(event_t *self) +{ + struct thread *thread = threads__findnew(self->mmap.pid); + + if (thread != NULL) { + struct map *map = map__new(&self->mmap, NULL, 0, + sizeof(struct sym_entry), + symbol_filter); + if (map != NULL) + thread__insert_map(thread, map); + } +} + +static void event__process_comm(event_t *self) +{ + struct thread *thread = threads__findnew(self->comm.pid); + + if (thread != NULL) + thread__set_comm(thread, self->comm.comm); +} + +static int event__process(event_t *event) +{ + switch (event->header.type) { + case PERF_RECORD_COMM: + event__process_comm(event); + break; + case PERF_RECORD_MMAP: + event__process_mmap(event); + break; + default: + break; + } + + return 0; } struct mmap_data { @@ -925,13 +980,11 @@ static void mmap_read_counter(struct mmap_data *md) event = &event_copy; } + if (event->header.type == PERF_RECORD_SAMPLE) + event__process_sample(event, md->counter); + else + event__process(event); old += size; - - if (event->header.type == PERF_RECORD_SAMPLE) { - int user = - (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER; - process_event(event->ip.ip, md->counter, user); - } } md->prev = old; @@ -973,6 +1026,7 @@ static void start_counter(int i, int counter) } attr->inherit = (cpu < 0) && inherit; + attr->mmap = 1; try_again: fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0); @@ -1031,6 +1085,11 @@ static int __cmd_top(void) int i, counter; int ret; + if (target_pid != -1) + event__synthesize_thread(target_pid, event__process); + else + event__synthesize_threads(event__process); + for (i = 0; i < nr_cpus; i++) { group_fd = -1; for (counter = 0; counter < nr_counters; counter++) From 85df6f683efa457440eb922272fd5a71aa022ad4 Mon Sep 17 00:00:00 2001 From: Marti Raudsepp Date: Tue, 27 Oct 2009 00:33:04 +0000 Subject: [PATCH 180/470] perf tools: Notify user when unrecognized event is specified Previously no indication was given about what went wrong. Signed-off-by: Marti Raudsepp Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <03ec9ee96f17cef05424.1256603584@localhost> Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index b097570e962..e9e6d5c0ae4 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -678,6 +678,8 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr) if (ret != EVT_FAILED) goto modifier; + fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); + fprintf(stderr, "Run 'perf list' for a list of valid events\n"); return EVT_FAILED; modifier: From 689d30187828afe1faedf050b2f7593515b90c76 Mon Sep 17 00:00:00 2001 From: Marti Raudsepp Date: Tue, 27 Oct 2009 00:33:05 +0000 Subject: [PATCH 181/470] perf tools: Output 'perf list' to stdout not stderr Writing to stdout is probably the expected behavior because the user explicitly asked for a list. Signed-off-by: Marti Raudsepp Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <4ebb59420ef057972167.1256603585@localhost> Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index e9e6d5c0ae4..31baa5a6036 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -806,7 +806,7 @@ static void print_tracepoint_events(void) for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); - fprintf(stderr, " %-42s [%s]\n", evt_path, + printf(" %-42s [%s]\n", evt_path, event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); } closedir(evt_dir); @@ -823,8 +823,8 @@ void print_events(void) unsigned int i, type, op, prev_type = -1; char name[40]; - fprintf(stderr, "\n"); - fprintf(stderr, "List of pre-defined events (to be used in -e):\n"); + printf("\n"); + printf("List of pre-defined events (to be used in -e):\n"); for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) { type = syms->type + 1; @@ -832,19 +832,19 @@ void print_events(void) type = 0; if (type != prev_type) - fprintf(stderr, "\n"); + printf("\n"); if (strlen(syms->alias)) sprintf(name, "%s OR %s", syms->symbol, syms->alias); else strcpy(name, syms->symbol); - fprintf(stderr, " %-42s [%s]\n", name, + printf(" %-42s [%s]\n", name, event_type_descriptors[type]); prev_type = type; } - fprintf(stderr, "\n"); + printf("\n"); for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { /* skip invalid cache type */ @@ -852,17 +852,17 @@ void print_events(void) continue; for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { - fprintf(stderr, " %-42s [%s]\n", + printf(" %-42s [%s]\n", event_cache_name(type, op, i), event_type_descriptors[4]); } } } - fprintf(stderr, "\n"); - fprintf(stderr, " %-42s [raw hardware event descriptor]\n", + printf("\n"); + printf(" %-42s [raw hardware event descriptor]\n", "rNNN"); - fprintf(stderr, "\n"); + printf("\n"); print_tracepoint_events(); From bc284e5d9d6da48934a177db92bf8e09b96a9cb8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 21 Sep 2009 16:56:10 +0000 Subject: [PATCH 182/470] powerpc: perf_event: Log invalid data addresses as all 1s When we take an exception and the SDAR isn't synchronised we currently log 0 as the address. Unfortunately this is a pretty common value, so use ~0UL instead. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index bbcbae183e9..73c3b73bf00 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1162,7 +1162,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, */ if (record) { struct perf_sample_data data = { - .addr = 0, + .addr = ~0ULL, .period = event->hw.last_period, }; From 81cd5ae303e88a1e9d3a3e0f1fe8abd100edde16 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 27 Oct 2009 18:31:29 +0000 Subject: [PATCH 183/470] powerpc: perf_event: Enable SDAR in continous sample mode In continuous sampling mode we want the SDAR to update. While we can select between dcache misses and ERAT (L1-TLB) misses, a decent default is to enable both. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/reg.h | 2 ++ arch/powerpc/kernel/power5+-pmu.c | 4 ---- arch/powerpc/kernel/power5-pmu.c | 6 +----- arch/powerpc/kernel/power6-pmu.c | 2 +- arch/powerpc/kernel/power7-pmu.c | 6 +----- arch/powerpc/kernel/ppc970-pmu.c | 4 ---- 6 files changed, 5 insertions(+), 19 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 6315edc205d..bc8dd53f718 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -489,6 +489,8 @@ #define SPRN_MMCR1 798 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ +#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL +#define MMCRA_SDAR_ERAT_MISS 0x20000000UL #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 0f4c1c73a6a..199de527d41 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -72,10 +72,6 @@ #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0x7f -/* - * Bits in MMCRA - */ - /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index c351b3a57fb..98b6a729a9d 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -72,10 +72,6 @@ #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0x7f -/* - * Bits in MMCRA - */ - /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 @@ -390,7 +386,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; - unsigned long mmcra = 0; + unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; unsigned int pmc, unit, byte, psel; unsigned int ttm, grp; int i, isbus, bit, grsel; diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index ca399ba5034..84a607bda8f 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -178,7 +178,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; - unsigned long mmcra = 0; + unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; int i; unsigned int pmc, ev, b, u, s, psel; unsigned int ttmset = 0; diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 28a4daacdc0..852f7b7f6b4 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -50,10 +50,6 @@ #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0xff -/* - * Bits in MMCRA - */ - /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 @@ -230,7 +226,7 @@ static int power7_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; - unsigned long mmcra = 0; + unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; unsigned int pmc, unit, combine, l2sel, psel; unsigned int pmc_inuse = 0; int i; diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 479574413a9..8eff48e20db 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -83,10 +83,6 @@ static short mmcr1_adder_bits[8] = { MMCR1_PMC8_ADDER_SEL_SH }; -/* - * Bits in MMCRA - */ - /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 From f7d7986060b2890fc26db6ab5203efbd33aa2497 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 01:09:29 +0000 Subject: [PATCH 184/470] perf_event: Add alignment-faults and emulation-faults software events Add two more software events that are common to many cpus. Alignment faults: When a load or store is not aligned properly. Emulation faults: When an instruction is emulated in software. Both cause a very significant slowdown (100x or worse), so identifying and fixing them is very important. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- include/linux/perf_counter.h | 2 ++ include/linux/perf_event.h | 2 ++ kernel/perf_event.c | 2 ++ tools/perf/design.txt | 2 ++ tools/perf/util/parse-events.c | 4 ++++ 5 files changed, 12 insertions(+) diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 7b7fbf433cf..d6b95d1e79f 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -106,6 +106,8 @@ enum perf_sw_ids { PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_MAX, /* non-ABI */ }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e6d95f9741..a33707a3a78 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -102,6 +102,8 @@ enum perf_sw_ids { PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_MAX, /* non-ABI */ }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c66588..0683b33cbb2 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4186,6 +4186,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event) case PERF_COUNT_SW_PAGE_FAULTS_MAJ: case PERF_COUNT_SW_CONTEXT_SWITCHES: case PERF_COUNT_SW_CPU_MIGRATIONS: + case PERF_COUNT_SW_ALIGNMENT_FAULTS: + case PERF_COUNT_SW_EMULATION_FAULTS: if (!event->parent) { atomic_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; diff --git a/tools/perf/design.txt b/tools/perf/design.txt index fdd42a824c9..f000c30877a 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -137,6 +137,8 @@ enum sw_event_ids { PERF_COUNT_SW_CPU_MIGRATIONS = 4, PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, }; Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8cfb48cbbea..34bd8442393 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -46,6 +46,8 @@ static struct event_symbol event_symbols[] = { { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, + { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, + { CSW(EMULATION_FAULTS), "emulation-faults", "" }, }; #define __PERF_EVENT_FIELD(config, name) \ @@ -74,6 +76,8 @@ static const char *sw_event_names[] = { "CPU-migrations", "minor-faults", "major-faults", + "alignment-faults", + "emulation-faults", }; #define MAX_ALIASES 8 From eecff81d1fcda22cd0029d11fe2a71dceed11dad Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 27 Oct 2009 18:46:55 +0000 Subject: [PATCH 185/470] powerpc: Create PPC_WARN_ALIGNMENT to match PPC_WARN_EMULATED perf_event wants a separate event for alignment and emulation faults, so create another emulation event. This will make it easy to hook in perf_event at one spot. We pass in regs which will be required for these events. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/emulated_ops.h | 7 +++++-- arch/powerpc/kernel/align.c | 12 ++++++------ arch/powerpc/kernel/traps.c | 18 +++++++++--------- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 9154e852673..640c4e456aa 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -57,7 +57,7 @@ extern u32 ppc_warn_emulated; extern void ppc_warn_emulated_print(const char *type); -#define PPC_WARN_EMULATED(type) \ +#define __PPC_WARN_EMULATED(type) \ do { \ atomic_inc(&ppc_emulated.type.val); \ if (ppc_warn_emulated) \ @@ -66,8 +66,11 @@ extern void ppc_warn_emulated_print(const char *type); #else /* !CONFIG_PPC_EMULATED_STATS */ -#define PPC_WARN_EMULATED(type) do { } while (0) +#define __PPC_WARN_EMULATED(type) do { } while (0) #endif /* !CONFIG_PPC_EMULATED_STATS */ +#define PPC_WARN_EMULATED(type, regs) __PPC_WARN_EMULATED(type) +#define PPC_WARN_ALIGNMENT(type, regs) __PPC_WARN_EMULATED(type) + #endif /* _ASM_POWERPC_EMULATED_OPS_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a5b632e52fa..3839839f83c 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -732,7 +732,7 @@ int fix_alignment(struct pt_regs *regs) #ifdef CONFIG_SPE if ((instr >> 26) == 0x4) { - PPC_WARN_EMULATED(spe); + PPC_WARN_ALIGNMENT(spe, regs); return emulate_spe(regs, reg, instr); } #endif @@ -786,7 +786,7 @@ int fix_alignment(struct pt_regs *regs) flags |= SPLT; nb = 8; } - PPC_WARN_EMULATED(vsx); + PPC_WARN_ALIGNMENT(vsx, regs); return emulate_vsx(addr, reg, areg, regs, flags, nb); } #endif @@ -794,7 +794,7 @@ int fix_alignment(struct pt_regs *regs) * the exception of DCBZ which is handled as a special case here */ if (instr == DCBZ) { - PPC_WARN_EMULATED(dcbz); + PPC_WARN_ALIGNMENT(dcbz, regs); return emulate_dcbz(regs, addr); } if (unlikely(nb == 0)) @@ -804,7 +804,7 @@ int fix_alignment(struct pt_regs *regs) * function */ if (flags & M) { - PPC_WARN_EMULATED(multiple); + PPC_WARN_ALIGNMENT(multiple, regs); return emulate_multiple(regs, addr, reg, nb, flags, instr, swiz); } @@ -825,11 +825,11 @@ int fix_alignment(struct pt_regs *regs) /* Special case for 16-byte FP loads and stores */ if (nb == 16) { - PPC_WARN_EMULATED(fp_pair); + PPC_WARN_ALIGNMENT(fp_pair, regs); return emulate_fp_pair(addr, reg, flags); } - PPC_WARN_EMULATED(unaligned); + PPC_WARN_ALIGNMENT(unaligned, regs); /* If we are loading, get the data from user space, else * get it from register values diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6f0ae1a9bfa..9d1f9354d6c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -759,7 +759,7 @@ static int emulate_instruction(struct pt_regs *regs) /* Emulate the mfspr rD, PVR. */ if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { - PPC_WARN_EMULATED(mfpvr); + PPC_WARN_EMULATED(mfpvr, regs); rd = (instword >> 21) & 0x1f; regs->gpr[rd] = mfspr(SPRN_PVR); return 0; @@ -767,7 +767,7 @@ static int emulate_instruction(struct pt_regs *regs) /* Emulating the dcba insn is just a no-op. */ if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { - PPC_WARN_EMULATED(dcba); + PPC_WARN_EMULATED(dcba, regs); return 0; } @@ -776,7 +776,7 @@ static int emulate_instruction(struct pt_regs *regs) int shift = (instword >> 21) & 0x1c; unsigned long msk = 0xf0000000UL >> shift; - PPC_WARN_EMULATED(mcrxr); + PPC_WARN_EMULATED(mcrxr, regs); regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); regs->xer &= ~0xf0000000UL; return 0; @@ -784,19 +784,19 @@ static int emulate_instruction(struct pt_regs *regs) /* Emulate load/store string insn. */ if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { - PPC_WARN_EMULATED(string); + PPC_WARN_EMULATED(string, regs); return emulate_string_inst(regs, instword); } /* Emulate the popcntb (Population Count Bytes) instruction. */ if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { - PPC_WARN_EMULATED(popcntb); + PPC_WARN_EMULATED(popcntb, regs); return emulate_popcntb_inst(regs, instword); } /* Emulate isel (Integer Select) instruction */ if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { - PPC_WARN_EMULATED(isel); + PPC_WARN_EMULATED(isel, regs); return emulate_isel(regs, instword); } @@ -995,7 +995,7 @@ void SoftwareEmulation(struct pt_regs *regs) #ifdef CONFIG_MATH_EMULATION errcode = do_mathemu(regs); if (errcode >= 0) - PPC_WARN_EMULATED(math); + PPC_WARN_EMULATED(math, regs); switch (errcode) { case 0: @@ -1018,7 +1018,7 @@ void SoftwareEmulation(struct pt_regs *regs) #elif defined(CONFIG_8XX_MINIMAL_FPEMU) errcode = Soft_emulate_8xx(regs); if (errcode >= 0) - PPC_WARN_EMULATED(8xx); + PPC_WARN_EMULATED(8xx, regs); switch (errcode) { case 0: @@ -1129,7 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs) flush_altivec_to_thread(current); - PPC_WARN_EMULATED(altivec); + PPC_WARN_EMULATED(altivec, regs); err = emulate_altivec(regs); if (err == 0) { regs->nip += 4; /* skip emulated instruction */ From 196f02bf900c5eb6f85d889c4f70e7cc11fda7e8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 01:13:00 +0000 Subject: [PATCH 186/470] powerpc: perf_event: Add alignment-faults and emulation-faults software events Hook up the alignment-faults and emulation-faults events for powerpc. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/emulated_ops.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 640c4e456aa..f0fb4fc1f6e 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -19,6 +19,7 @@ #define _ASM_POWERPC_EMULATED_OPS_H #include +#include #ifdef CONFIG_PPC_EMULATED_STATS @@ -70,7 +71,18 @@ extern void ppc_warn_emulated_print(const char *type); #endif /* !CONFIG_PPC_EMULATED_STATS */ -#define PPC_WARN_EMULATED(type, regs) __PPC_WARN_EMULATED(type) -#define PPC_WARN_ALIGNMENT(type, regs) __PPC_WARN_EMULATED(type) +#define PPC_WARN_EMULATED(type, regs) \ + do { \ + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ + 1, 0, regs, 0); \ + __PPC_WARN_EMULATED(type); \ + } while (0) + +#define PPC_WARN_ALIGNMENT(type, regs) \ + do { \ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ + 1, 0, regs, regs->dar); \ + __PPC_WARN_EMULATED(type); \ + } while (0) #endif /* _ASM_POWERPC_EMULATED_OPS_H */ From 1bf4af165050d90ea6659ffb2536ec8ca783aab5 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:47:42 +0000 Subject: [PATCH 187/470] powerpc: tracing: Add powerpc tracepoints for interrupt entry and exit This adds powerpc-specific tracepoints for interrupt entry and exit. While we already have generic irq_handler_entry and irq_handler_exit tracepoints there are cases on our virtualised powerpc machines where an interrupt is presented to the OS, but subsequently handled by the hypervisor. This means no OS interrupt handler is invoked. Here is an example on a POWER6 machine with the patch below applied: -0 [006] 3243.949840744: irq_entry: pt_regs=c0000000ce31fb10 -0 [006] 3243.949850520: irq_exit: pt_regs=c0000000ce31fb10 -0 [007] 3243.950218208: irq_entry: pt_regs=c0000000ce323b10 -0 [007] 3243.950224080: irq_exit: pt_regs=c0000000ce323b10 -0 [000] 3244.021879320: irq_entry: pt_regs=c000000000a63aa0 -0 [000] 3244.021883616: irq_handler_entry: irq=87 handler=eth0 -0 [000] 3244.021887328: irq_handler_exit: irq=87 return=handled -0 [000] 3244.021897408: irq_exit: pt_regs=c000000000a63aa0 Here we see two phantom interrupts (no handler was invoked), followed by a real interrupt for eth0. Without the tracepoints in this patch we would have missed the phantom interrupts. Signed-off-by: Anton Blanchard Acked-by: Steven Rostedt Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/trace.h | 53 ++++++++++++++++++++++++++++++++ arch/powerpc/kernel/irq.c | 6 ++++ 2 files changed, 59 insertions(+) create mode 100644 arch/powerpc/include/asm/trace.h diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h new file mode 100644 index 00000000000..187696da5ae --- /dev/null +++ b/arch/powerpc/include/asm/trace.h @@ -0,0 +1,53 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM powerpc + +#if !defined(_TRACE_POWERPC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_POWERPC_H + +#include + +struct pt_regs; + +TRACE_EVENT(irq_entry, + + TP_PROTO(struct pt_regs *regs), + + TP_ARGS(regs), + + TP_STRUCT__entry( + __field(struct pt_regs *, regs) + ), + + TP_fast_assign( + __entry->regs = regs; + ), + + TP_printk("pt_regs=%p", __entry->regs) +); + +TRACE_EVENT(irq_exit, + + TP_PROTO(struct pt_regs *regs), + + TP_ARGS(regs), + + TP_STRUCT__entry( + __field(struct pt_regs *, regs) + ), + + TP_fast_assign( + __entry->regs = regs; + ), + + TP_printk("pt_regs=%p", __entry->regs) +); + +#endif /* _TRACE_POWERPC_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm +#define TRACE_INCLUDE_FILE trace + +#include diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index e5d12117798..02a334662cc 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -70,6 +70,8 @@ #include #include #endif +#define CREATE_TRACE_POINTS +#include int __irq_offset_value; static int ppc_spurious_interrupts; @@ -325,6 +327,8 @@ void do_IRQ(struct pt_regs *regs) struct pt_regs *old_regs = set_irq_regs(regs); unsigned int irq; + trace_irq_entry(regs); + irq_enter(); check_stack_overflow(); @@ -348,6 +352,8 @@ void do_IRQ(struct pt_regs *regs) timer_interrupt(regs); } #endif + + trace_irq_exit(regs); } void __init init_IRQ(void) From 6795b85c6a4f690e61e7be31aa150d945c723fb5 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:49:14 +0000 Subject: [PATCH 188/470] powerpc: tracing: Add powerpc tracepoints for timer entry and exit We can monitor the effectiveness of our power management of both the kernel and hypervisor by probing the timer interrupt. For example, on this box we see 10.37s timer interrupts on an idle core: -0 [010] 3900.671297: timer_interrupt_entry: pt_regs=c0000000ce1e7b10 -0 [010] 3900.671302: timer_interrupt_exit: pt_regs=c0000000ce1e7b10 -0 [010] 3911.042963: timer_interrupt_entry: pt_regs=c0000000ce1e7b10 -0 [010] 3911.042968: timer_interrupt_exit: pt_regs=c0000000ce1e7b10 -0 [010] 3921.414630: timer_interrupt_entry: pt_regs=c0000000ce1e7b10 -0 [010] 3921.414635: timer_interrupt_exit: pt_regs=c0000000ce1e7b10 Since we have a 207MHz decrementer it will go negative and fire every 10.37s even if Linux is completely idle. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/trace.h | 34 ++++++++++++++++++++++++++++++++ arch/powerpc/kernel/time.c | 6 ++++++ 2 files changed, 40 insertions(+) diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index 187696da5ae..b558c31d409 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -42,6 +42,40 @@ TRACE_EVENT(irq_exit, TP_printk("pt_regs=%p", __entry->regs) ); +TRACE_EVENT(timer_interrupt_entry, + + TP_PROTO(struct pt_regs *regs), + + TP_ARGS(regs), + + TP_STRUCT__entry( + __field(struct pt_regs *, regs) + ), + + TP_fast_assign( + __entry->regs = regs; + ), + + TP_printk("pt_regs=%p", __entry->regs) +); + +TRACE_EVENT(timer_interrupt_exit, + + TP_PROTO(struct pt_regs *regs), + + TP_ARGS(regs), + + TP_STRUCT__entry( + __field(struct pt_regs *, regs) + ), + + TP_fast_assign( + __entry->regs = regs; + ), + + TP_printk("pt_regs=%p", __entry->regs) +); + #endif /* _TRACE_POWERPC_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 92dc844299b..d6e88df4630 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -571,6 +572,8 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + trace_timer_interrupt_entry(regs); + /* Ensure a positive value is written to the decrementer, or else * some CPUs will continuue to take decrementer exceptions */ set_dec(DECREMENTER_MAX); @@ -590,6 +593,7 @@ void timer_interrupt(struct pt_regs * regs) now = decrementer->next_tb - now; if (now <= DECREMENTER_MAX) set_dec((int)now); + trace_timer_interrupt_exit(regs); return; } old_regs = set_irq_regs(regs); @@ -620,6 +624,8 @@ void timer_interrupt(struct pt_regs * regs) irq_exit(); set_irq_regs(old_regs); + + trace_timer_interrupt_exit(regs); } void wakeup_decrementer(void) From c8cd093a6e9f96ea6b871576fd4e46d7c818bb89 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:50:29 +0000 Subject: [PATCH 189/470] powerpc: tracing: Add hypervisor call tracepoints Add hcall_entry and hcall_exit tracepoints. This replaces the inline assembly HCALL_STATS code and converts it to use the new tracepoints. To keep the disabled case as quick as possible, we embed a status word in the TOC so we can get at it with a single load. By doing so we keep the overhead at a minimum. Time taken for a null hcall: No tracepoint code: 135.79 cycles Disabled tracepoints: 137.95 cycles For reference, before this patch enabling HCALL_STATS resulted in a null hcall of 201.44 cycles! Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/Kconfig.debug | 2 +- arch/powerpc/include/asm/hvcall.h | 2 + arch/powerpc/include/asm/trace.h | 45 +++++++++ arch/powerpc/platforms/pseries/hvCall.S | 101 +++++++++++-------- arch/powerpc/platforms/pseries/hvCall_inst.c | 37 +++++++ arch/powerpc/platforms/pseries/lpar.c | 32 ++++++ 6 files changed, 175 insertions(+), 44 deletions(-) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 3b100518539..bf3382f1904 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -46,7 +46,7 @@ config DEBUG_STACK_USAGE config HCALL_STATS bool "Hypervisor call instrumentation" - depends on PPC_PSERIES && DEBUG_FS + depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS help Adds code to keep track of the number of hypervisor calls made and the amount of time spent in hypervisor calls. Wall time spent in diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 6251a4b10be..c27caac47ad 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -274,6 +274,8 @@ struct hcall_stats { unsigned long num_calls; /* number of calls (on this CPU) */ unsigned long tb_total; /* total wall time (mftb) of calls. */ unsigned long purr_total; /* total cpu time (PURR) of calls. */ + unsigned long tb_start; + unsigned long purr_start; }; #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index b558c31d409..9b01c0e43b5 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -76,6 +76,51 @@ TRACE_EVENT(timer_interrupt_exit, TP_printk("pt_regs=%p", __entry->regs) ); +#ifdef CONFIG_PPC_PSERIES +extern void hcall_tracepoint_regfunc(void); +extern void hcall_tracepoint_unregfunc(void); + +TRACE_EVENT_FN(hcall_entry, + + TP_PROTO(unsigned long opcode), + + TP_ARGS(opcode), + + TP_STRUCT__entry( + __field(unsigned long, opcode) + ), + + TP_fast_assign( + __entry->opcode = opcode; + ), + + TP_printk("opcode=%lu", __entry->opcode), + + hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc +); + +TRACE_EVENT_FN(hcall_exit, + + TP_PROTO(unsigned long opcode, unsigned long retval), + + TP_ARGS(opcode, retval), + + TP_STRUCT__entry( + __field(unsigned long, opcode) + __field(unsigned long, retval) + ), + + TP_fast_assign( + __entry->opcode = opcode; + __entry->retval = retval; + ), + + TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval), + + hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc +); +#endif + #endif /* _TRACE_POWERPC_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index c1427b3634e..01e95ab18d3 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -14,20 +14,54 @@ #define STK_PARM(i) (48 + ((i)-3)*8) -#ifdef CONFIG_HCALL_STATS +#ifdef CONFIG_TRACEPOINTS + + .section ".toc","aw" + + .globl hcall_tracepoint_refcount +hcall_tracepoint_refcount: + .llong 0 + + .section ".text" + /* * precall must preserve all registers. use unused STK_PARM() - * areas to save snapshots and opcode. + * areas to save snapshots and opcode. We branch around this + * in early init (eg when populating the MMU hashtable) by using an + * unconditional cpu feature. */ #define HCALL_INST_PRECALL \ - std r3,STK_PARM(r3)(r1); /* save opcode */ \ - mftb r0; /* get timebase and */ \ - std r0,STK_PARM(r5)(r1); /* save for later */ \ BEGIN_FTR_SECTION; \ - mfspr r0,SPRN_PURR; /* get PURR and */ \ - std r0,STK_PARM(r6)(r1); /* save for later */ \ -END_FTR_SECTION_IFSET(CPU_FTR_PURR); - + b 1f; \ +END_FTR_SECTION(0, 1); \ + ld r12,hcall_tracepoint_refcount@toc(r2); \ + cmpdi r12,0; \ + beq+ 1f; \ + mflr r0; \ + std r3,STK_PARM(r3)(r1); \ + std r4,STK_PARM(r4)(r1); \ + std r5,STK_PARM(r5)(r1); \ + std r6,STK_PARM(r6)(r1); \ + std r7,STK_PARM(r7)(r1); \ + std r8,STK_PARM(r8)(r1); \ + std r9,STK_PARM(r9)(r1); \ + std r10,STK_PARM(r10)(r1); \ + std r0,16(r1); \ + stdu r1,-STACK_FRAME_OVERHEAD(r1); \ + bl .__trace_hcall_entry; \ + addi r1,r1,STACK_FRAME_OVERHEAD; \ + ld r0,16(r1); \ + ld r3,STK_PARM(r3)(r1); \ + ld r4,STK_PARM(r4)(r1); \ + ld r5,STK_PARM(r5)(r1); \ + ld r6,STK_PARM(r6)(r1); \ + ld r7,STK_PARM(r7)(r1); \ + ld r8,STK_PARM(r8)(r1); \ + ld r9,STK_PARM(r9)(r1); \ + ld r10,STK_PARM(r10)(r1); \ + mtlr r0; \ +1: + /* * postcall is performed immediately before function return which * allows liberal use of volatile registers. We branch around this @@ -38,40 +72,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR); BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ - ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ - cmpldi cr7,r4,MAX_HCALL_OPCODE; \ - bgt- cr7,1f; \ - \ - /* get time and PURR snapshots after hcall */ \ - mftb r7; /* timebase after */ \ -BEGIN_FTR_SECTION; \ - mfspr r8,SPRN_PURR; /* PURR after */ \ - ld r6,STK_PARM(r6)(r1); /* PURR before */ \ - subf r6,r6,r8; /* delta */ \ -END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ - ld r5,STK_PARM(r5)(r1); /* timebase before */ \ - subf r5,r5,r7; /* time delta */ \ - \ - /* calculate address of stat structure r4 = opcode */ \ - srdi r4,r4,2; /* index into array */ \ - mulli r4,r4,HCALL_STAT_SIZE; \ - LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ - add r4,r4,r7; \ - ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ - add r4,r4,r7; \ - \ - /* update stats */ \ - ld r7,HCALL_STAT_CALLS(r4); /* count */ \ - addi r7,r7,1; \ - std r7,HCALL_STAT_CALLS(r4); \ - ld r7,HCALL_STAT_TB(r4); /* timebase */ \ - add r7,r7,r5; \ - std r7,HCALL_STAT_TB(r4); \ -BEGIN_FTR_SECTION; \ - ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ - add r7,r7,r6; \ - std r7,HCALL_STAT_PURR(r4); \ -END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ + ld r12,hcall_tracepoint_refcount@toc(r2); \ + cmpdi r12,0; \ + beq+ 1f; \ + mflr r0; \ + ld r6,STK_PARM(r3)(r1); \ + std r3,STK_PARM(r3)(r1); \ + mr r4,r3; \ + mr r3,r6; \ + std r0,16(r1); \ + stdu r1,-STACK_FRAME_OVERHEAD(r1); \ + bl .__trace_hcall_exit; \ + addi r1,r1,STACK_FRAME_OVERHEAD; \ + ld r0,16(r1); \ + ld r3,STK_PARM(r3)(r1); \ + mtlr r0; \ 1: #else #define HCALL_INST_PRECALL diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 3631a4f277e..e44e1035f13 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -26,6 +26,7 @@ #include #include #include +#include DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); @@ -100,6 +101,34 @@ static const struct file_operations hcall_inst_seq_fops = { #define HCALL_ROOT_DIR "hcall_inst" #define CPU_NAME_BUF_SIZE 32 + +static void probe_hcall_entry(unsigned long opcode) +{ + struct hcall_stats *h; + + if (opcode > MAX_HCALL_OPCODE) + return; + + h = &get_cpu_var(hcall_stats)[opcode / 4]; + h->tb_start = mftb(); + h->purr_start = mfspr(SPRN_PURR); +} + +static void probe_hcall_exit(unsigned long opcode, unsigned long retval) +{ + struct hcall_stats *h; + + if (opcode > MAX_HCALL_OPCODE) + return; + + h = &__get_cpu_var(hcall_stats)[opcode / 4]; + h->num_calls++; + h->tb_total = mftb() - h->tb_start; + h->purr_total = mfspr(SPRN_PURR) - h->purr_start; + + put_cpu_var(hcall_stats); +} + static int __init hcall_inst_init(void) { struct dentry *hcall_root; @@ -110,6 +139,14 @@ static int __init hcall_inst_init(void) if (!firmware_has_feature(FW_FEATURE_LPAR)) return 0; + if (register_trace_hcall_entry(probe_hcall_entry)) + return -EINVAL; + + if (register_trace_hcall_exit(probe_hcall_exit)) { + unregister_trace_hcall_entry(probe_hcall_entry); + return -EINVAL; + } + hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); if (!hcall_root) return -ENOMEM; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 903eb9eec68..4b7b6e8e32d 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "plpar_wrappers.h" #include "pseries.h" @@ -661,3 +662,34 @@ void arch_free_page(struct page *page, int order) EXPORT_SYMBOL(arch_free_page); #endif + +#ifdef CONFIG_TRACEPOINTS +/* + * We optimise our hcall path by placing hcall_tracepoint_refcount + * directly in the TOC so we can check if the hcall tracepoints are + * enabled via a single load. + */ + +/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ +extern long hcall_tracepoint_refcount; + +void hcall_tracepoint_regfunc(void) +{ + hcall_tracepoint_refcount++; +} + +void hcall_tracepoint_unregfunc(void) +{ + hcall_tracepoint_refcount--; +} + +void __trace_hcall_entry(unsigned long opcode) +{ + trace_hcall_entry(opcode); +} + +void __trace_hcall_exit(long opcode, unsigned long retval) +{ + trace_hcall_exit(opcode, retval); +} +#endif From 6f26353ca29e96475208bce673efb6a2c58b73f2 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:51:09 +0000 Subject: [PATCH 190/470] powerpc: tracing: Give hypervisor call tracepoints access to arguments While most users of the hcall tracepoints will only want the opcode and return code, some will want all the arguments. To avoid the complexity of using varargs we pass a pointer to the register save area, which contains all the arguments. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/trace.h | 9 +++--- arch/powerpc/platforms/pseries/hvCall.S | 31 +++++++++++++------- arch/powerpc/platforms/pseries/hvCall_inst.c | 5 ++-- arch/powerpc/platforms/pseries/lpar.c | 9 +++--- 4 files changed, 34 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index 9b01c0e43b5..cbe2297d68b 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -82,9 +82,9 @@ extern void hcall_tracepoint_unregfunc(void); TRACE_EVENT_FN(hcall_entry, - TP_PROTO(unsigned long opcode), + TP_PROTO(unsigned long opcode, unsigned long *args), - TP_ARGS(opcode), + TP_ARGS(opcode, args), TP_STRUCT__entry( __field(unsigned long, opcode) @@ -101,9 +101,10 @@ TRACE_EVENT_FN(hcall_entry, TRACE_EVENT_FN(hcall_exit, - TP_PROTO(unsigned long opcode, unsigned long retval), + TP_PROTO(unsigned long opcode, unsigned long retval, + unsigned long *retbuf), - TP_ARGS(opcode, retval), + TP_ARGS(opcode, retval, retbuf), TP_STRUCT__entry( __field(unsigned long, opcode) diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 01e95ab18d3..383a5d0e981 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -30,7 +30,7 @@ hcall_tracepoint_refcount: * in early init (eg when populating the MMU hashtable) by using an * unconditional cpu feature. */ -#define HCALL_INST_PRECALL \ +#define HCALL_INST_PRECALL(FIRST_REG) \ BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ @@ -47,6 +47,7 @@ END_FTR_SECTION(0, 1); \ std r9,STK_PARM(r9)(r1); \ std r10,STK_PARM(r10)(r1); \ std r0,16(r1); \ + addi r4,r1,STK_PARM(FIRST_REG); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ bl .__trace_hcall_entry; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ @@ -68,7 +69,7 @@ END_FTR_SECTION(0, 1); \ * in early init (eg when populating the MMU hashtable) by using an * unconditional cpu feature. */ -#define HCALL_INST_POSTCALL \ +#define __HCALL_INST_POSTCALL \ BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ @@ -88,9 +89,19 @@ END_FTR_SECTION(0, 1); \ ld r3,STK_PARM(r3)(r1); \ mtlr r0; \ 1: + +#define HCALL_INST_POSTCALL_NORETS \ + li r5,0; \ + __HCALL_INST_POSTCALL + +#define HCALL_INST_POSTCALL(BUFREG) \ + mr r5,BUFREG; \ + __HCALL_INST_POSTCALL + #else -#define HCALL_INST_PRECALL -#define HCALL_INST_POSTCALL +#define HCALL_INST_PRECALL(FIRST_ARG) +#define HCALL_INST_POSTCALL_NORETS +#define HCALL_INST_POSTCALL(BUFREG) #endif .text @@ -101,11 +112,11 @@ _GLOBAL(plpar_hcall_norets) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL + HCALL_INST_PRECALL(r4) HVSC /* invoke the hypervisor */ - HCALL_INST_POSTCALL + HCALL_INST_POSTCALL_NORETS lwz r0,8(r1) mtcrf 0xff,r0 @@ -117,7 +128,7 @@ _GLOBAL(plpar_hcall) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL + HCALL_INST_PRECALL(r5) std r4,STK_PARM(r4)(r1) /* Save ret buffer */ @@ -136,7 +147,7 @@ _GLOBAL(plpar_hcall) std r6, 16(r12) std r7, 24(r12) - HCALL_INST_POSTCALL + HCALL_INST_POSTCALL(r12) lwz r0,8(r1) mtcrf 0xff,r0 @@ -183,7 +194,7 @@ _GLOBAL(plpar_hcall9) mfcr r0 stw r0,8(r1) - HCALL_INST_PRECALL + HCALL_INST_PRECALL(r5) std r4,STK_PARM(r4)(r1) /* Save ret buffer */ @@ -211,7 +222,7 @@ _GLOBAL(plpar_hcall9) std r11,56(r12) std r0, 64(r12) - HCALL_INST_POSTCALL + HCALL_INST_POSTCALL(r12) lwz r0,8(r1) mtcrf 0xff,r0 diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index e44e1035f13..2f58c71b725 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -102,7 +102,7 @@ static const struct file_operations hcall_inst_seq_fops = { #define CPU_NAME_BUF_SIZE 32 -static void probe_hcall_entry(unsigned long opcode) +static void probe_hcall_entry(unsigned long opcode, unsigned long *args) { struct hcall_stats *h; @@ -114,7 +114,8 @@ static void probe_hcall_entry(unsigned long opcode) h->purr_start = mfspr(SPRN_PURR); } -static void probe_hcall_exit(unsigned long opcode, unsigned long retval) +static void probe_hcall_exit(unsigned long opcode, unsigned long retval, + unsigned long *retbuf) { struct hcall_stats *h; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 4b7b6e8e32d..0707653612b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -683,13 +683,14 @@ void hcall_tracepoint_unregfunc(void) hcall_tracepoint_refcount--; } -void __trace_hcall_entry(unsigned long opcode) +void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { - trace_hcall_entry(opcode); + trace_hcall_entry(opcode, args); } -void __trace_hcall_exit(long opcode, unsigned long retval) +void __trace_hcall_exit(long opcode, unsigned long retval, + unsigned long *retbuf) { - trace_hcall_exit(opcode, retval); + trace_hcall_exit(opcode, retval, retbuf); } #endif From b3c86ee6d128dea7c671380090488887e73fa774 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:51:57 +0000 Subject: [PATCH 191/470] powerpc: Disable HCALL_STATS by default The overhead of HCALL_STATS is quite high and the functionality is very rarely used. Key statistics are also missing (eg min/max). With the new hcall tracepoints much more powerful tracing can be done in a kernel module. Lets disable this by default. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/configs/pseries_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index f1889abb89b..c568329723b 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -1683,7 +1683,7 @@ CONFIG_HAVE_ARCH_KGDB=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_HCALL_STATS=y +# CONFIG_HCALL_STATS is not set # CONFIG_CODE_PATCHING_SELFTEST is not set # CONFIG_FTR_FIXUP_SELFTEST is not set # CONFIG_MSI_BITMAP_SELFTEST is not set From 907b1f45d901c956e4bcd3f27c4f1f25d6fb36b2 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 26 Oct 2009 18:52:24 +0000 Subject: [PATCH 192/470] powerpc: Export powerpc_debugfs_root Kernel modules should be able to place their debug output inside our powerpc debugfs directory. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup-common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 4271f7a655a..845c72ab735 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -660,6 +660,7 @@ late_initcall(check_cache_coherency); #ifdef CONFIG_DEBUG_FS struct dentry *powerpc_debugfs_root; +EXPORT_SYMBOL(powerpc_debugfs_root); static int powerpc_debugfs_init(void) { From 3cd980dbc1050889acca7306cbcedf79a4ba2f81 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 01:23:28 +0000 Subject: [PATCH 193/470] powerpc: perf_event: Cleanup copy_page output by hiding setup symbol A lot of hits in "setup" doesn't make much sense, so hide this symbol and allow all the hits to end up in copy_4k_page. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/lib/copypage_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 75f3267fdc3..e68beac0a17 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -26,11 +26,11 @@ BEGIN_FTR_SECTION srd r8,r5,r11 mtctr r8 -setup: +.Lsetup: dcbt r9,r4 dcbz r9,r3 add r9,r9,r12 - bdnz setup + bdnz .Lsetup END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) addi r3,r3,-8 srdi r8,r5,7 /* page is copied in 128 byte strides */ From 917e407c762ba6d91d1a4bc1c804d518585082a3 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 01:24:29 +0000 Subject: [PATCH 194/470] powerpc: perf_event: Hide iseries_check_pending_irqs If CONFIG_PPC_ISERIES isn't defined we end up with iseries_check_pending_irqs and do_work at the same address. perf ends up picking iseries_check_pending_irqs which creates confusing backtraces. Hide it. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/entry_64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 900e0eea009..e722226cabc 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -551,7 +551,7 @@ restore: BEGIN_FW_FTR_SECTION ld r5,SOFTE(r1) FW_FTR_SECTION_ELSE - b iseries_check_pending_irqs + b .Liseries_check_pending_irqs ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 2: TRACE_AND_RESTORE_IRQ(r5); @@ -623,7 +623,7 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) #endif /* CONFIG_PPC_BOOK3E */ -iseries_check_pending_irqs: +.Liseries_check_pending_irqs: #ifdef CONFIG_PPC_ISERIES ld r5,SOFTE(r1) cmpdi 0,r5,0 From c86e2eaded39843e1bf4f07d1adfab4494f20894 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 18 Oct 2009 01:24:06 +0000 Subject: [PATCH 195/470] powerpc: perf_event: Cleanup output by adding symbols Add some dummy symbols for the branches at 0xf00, 0xf20 and 0xf40, otherwise hits end up in trap_0e which is confusing to the user. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/exceptions-64s.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1808876edcc..c7eb4e0eb86 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -185,12 +185,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) * prolog code of the PerformanceMonitor one. A little * trickery is thus necessary */ +performance_monitor_pSeries_1: . = 0xf00 b performance_monitor_pSeries +altivec_unavailable_pSeries_1: . = 0xf20 b altivec_unavailable_pSeries +vsx_unavailable_pSeries_1: . = 0xf40 b vsx_unavailable_pSeries From 66bd8424cc05e800db384053bf7ab967e4658468 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 28 Oct 2009 21:51:21 -0200 Subject: [PATCH 196/470] perf tools: Delay loading symtabs till we hit a map with it So that we can have a quicker start on perf top and even speedups in the other tools, as we can have maps with no hits, so no need to load its symtabs. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256773881-4191-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-report.c | 4 ++-- tools/perf/builtin-top.c | 5 ++--- tools/perf/util/event.h | 3 ++- tools/perf/util/map.c | 38 ++++++++++++++++++++--------------- tools/perf/util/symbol.c | 16 ++++++++------- tools/perf/util/symbol.h | 4 ++-- 7 files changed, 41 insertions(+), 33 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 6d63c2eea2c..8688bfee42a 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -165,7 +165,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (map != NULL) { got_map: ip = map->map_ip(map, ip); - sym = map->dso->find_symbol(map->dso, ip); + sym = map__find_symbol(map, ip, symbol_filter); } else { /* * If this is outside of all known maps, @@ -203,7 +203,7 @@ static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { struct map *map = map__new(&event->mmap, NULL, 0, - sizeof(struct sym_priv), symbol_filter); + sizeof(struct sym_priv)); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index b3d814b5455..f1bcd35bd22 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -455,7 +455,7 @@ got_map: dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - return map ? map->dso->find_symbol(map->dso, ip) : NULL; + return map ? map__find_symbol(map, ip, NULL) : NULL; } static int call__match(struct symbol *sym) @@ -751,7 +751,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen, 0, NULL); + struct map *map = map__new(&event->mmap, cwd, cwdlen, 0); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a02fc414601..ee87640b335 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -834,7 +834,7 @@ static void event__process_sample(const event_t *self, int counter) map = thread__find_map(thread, ip); if (map != NULL) { ip = map->map_ip(map, ip); - sym = map->dso->find_symbol(map->dso, ip); + sym = map__find_symbol(map, ip, symbol_filter); if (sym == NULL) return; userspace_samples++; @@ -879,8 +879,7 @@ static void event__process_mmap(event_t *self) if (thread != NULL) { struct map *map = map__new(&self->mmap, NULL, 0, - sizeof(struct sym_entry), - symbol_filter); + sizeof(struct sym_entry)); if (map != NULL) thread__insert_map(thread, map); } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2ae1177be40..3064a05f0f5 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -106,10 +106,11 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter); + unsigned int sym_priv_size); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); +struct symbol *map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter); int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); void event__synthesize_threads(int (*process)(event_t *event)); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index c1c55682534..d302e513e06 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -21,7 +21,7 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) } struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size, symbol_filter_t filter) + unsigned int sym_priv_size) { struct map *self = malloc(sizeof(*self)); @@ -29,7 +29,6 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, const char *filename = event->filename; char newfilename[PATH_MAX]; int anon; - bool new_dso; if (cwd) { int n = strcommon(filename, cwd, cwdlen); @@ -52,23 +51,10 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, self->end = event->start + event->len; self->pgoff = event->pgoff; - self->dso = dsos__findnew(filename, sym_priv_size, &new_dso); + self->dso = dsos__findnew(filename, sym_priv_size); if (self->dso == NULL) goto out_delete; - if (new_dso) { - int nr = dso__load(self->dso, self, filter); - - if (nr < 0) - pr_warning("Failed to open %s, continuing " - "without symbols\n", - self->dso->long_name); - else if (nr == 0) - pr_warning("No symbols found in %s, maybe " - "install a debug package?\n", - self->dso->long_name); - } - if (self->dso == vdso || anon) self->map_ip = self->unmap_ip = identity__map_ip; else { @@ -82,6 +68,26 @@ out_delete: return NULL; } +struct symbol * +map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) +{ + if (!self->dso->loaded) { + int nr = dso__load(self->dso, self, filter); + + if (nr < 0) { + pr_warning("Failed to open %s, continuing without symbols\n", + self->dso->long_name); + return NULL; + } else if (nr == 0) { + pr_warning("No symbols found in %s, maybe install a debug package?\n", + self->dso->long_name); + return NULL; + } + } + + return self->dso->find_symbol(self->dso, ip); +} + struct map *map__clone(struct map *self) { struct map *map = malloc(sizeof(*self)); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8f0208ce237..0273d83f728 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -909,6 +909,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) int ret = -1; int fd; + self->loaded = true; + if (!name) return -1; @@ -1019,6 +1021,8 @@ static int dso__load_module_sym(struct dso *self, struct map *map, { int err = 0, fd = open(self->long_name, O_RDONLY); + self->loaded = true; + if (fd < 0) { pr_err("%s: cannot open %s\n", __func__, self->long_name); return err; @@ -1214,6 +1218,8 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, { int err, fd = open(vmlinux, O_RDONLY); + self->loaded = true; + if (fd < 0) return -1; @@ -1312,19 +1318,15 @@ static struct dso *dsos__find(const char *name) return NULL; } -struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, - bool *is_new) +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size) { struct dso *dso = dsos__find(name); if (!dso) { dso = dso__new(name, sym_priv_size); - if (dso) { + if (dso != NULL) dsos__add(dso); - *is_new = true; - } - } else - *is_new = false; + } return dso; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 77b7b3e4241..432edbca780 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -46,6 +46,7 @@ struct dso { unsigned int sym_priv_size; unsigned char adjust_symbols; unsigned char slen_calculated; + bool loaded; unsigned char origin; const char *short_name; char *long_name; @@ -64,8 +65,7 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, symbol_filter_t filter, int modules); -struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size, - bool *is_new); +struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); From 7f387d3f2421781610588faa2f49ae5f1737b137 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:04 -0400 Subject: [PATCH 197/470] x86: Fix SSE opcode map bug Fix superscripts position because some superscripts of SSE opcode are not put in correct position. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204204.30545.97296.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 701c4678687..efef3cada84 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -401,9 +401,9 @@ Referrer: 2-byte escape 62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66) 63: packsswb Pq,Qq | packsswb Vdq,Wdq (66) 64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66) -65: pcmpgtw Pq,Qq | pcmpgtw(66) Vdq,Wdq +65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66) 66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66) -67: packuswb Pq,Qq | packuswb(66) Vdq,Wdq +67: packuswb Pq,Qq | packuswb Vdq,Wdq (66) 68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66) 69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66) 6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66) @@ -425,8 +425,8 @@ Referrer: 2-byte escape 79: VMWRITE Gd/q,Ed/q 7a: 7b: -7c: haddps(F2) Vps,Wps | haddpd(66) Vpd,Wpd -7d: hsubps(F2) Vps,Wps | hsubpd(66) Vpd,Wpd +7c: haddps Vps,Wps (F2) | haddpd Vpd,Wpd (66) +7d: hsubps Vps,Wps (F2) | hsubpd Vpd,Wpd (66) 7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66) | movq Vq,Wq (F3) 7f: movq Qq,Pq | movdqa Wdq,Vdq (66) | movdqu Wdq,Vdq (F3) # 0x0f 0x80-0x8f @@ -574,7 +574,7 @@ Referrer: 3-byte escape 1 01: phaddw Pq,Qq | phaddw Vdq,Wdq (66) 02: phaddd Pq,Qq | phaddd Vdq,Wdq (66) 03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66) -04: pmaddubsw Pq,Qq | pmaddubsw (66)Vdq,Wdq +04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66) 05: phsubw Pq,Qq | phsubw Vdq,Wdq (66) 06: phsubd Pq,Qq | phsubd Vdq,Wdq (66) 07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66) From 04d46c1b13b02e1e5c24eb270a01cf3f94ee4d04 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:11 -0400 Subject: [PATCH 198/470] x86: Merge INAT_REXPFX into INAT_PFX_* Merge INAT_REXPFX into INAT_PFX_* macro and rename it to INAT_PFX_REX. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204211.30545.58090.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/inat.h | 36 +++++++++++++++------------- arch/x86/lib/insn.c | 2 +- arch/x86/tools/gen-insn-attr-x86.awk | 6 ++--- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index 2866fddd184..c2487d2aca2 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -30,10 +30,11 @@ #define INAT_OPCODE_TABLE_SIZE 256 #define INAT_GROUP_TABLE_SIZE 8 -/* Legacy instruction prefixes */ +/* Legacy last prefixes */ #define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ #define INAT_PFX_REPNE 2 /* 0xF2 */ /* LPFX2 */ #define INAT_PFX_REPE 3 /* 0xF3 */ /* LPFX3 */ +/* Other Legacy prefixes */ #define INAT_PFX_LOCK 4 /* 0xF0 */ #define INAT_PFX_CS 5 /* 0x2E */ #define INAT_PFX_DS 6 /* 0x3E */ @@ -42,8 +43,11 @@ #define INAT_PFX_GS 9 /* 0x65 */ #define INAT_PFX_SS 10 /* 0x36 */ #define INAT_PFX_ADDRSZ 11 /* 0x67 */ +/* x86-64 REX prefix */ +#define INAT_PFX_REX 12 /* 0x4X */ -#define INAT_LPREFIX_MAX 3 +#define INAT_LSTPFX_MAX 3 +#define INAT_LGCPFX_MAX 11 /* Immediate size */ #define INAT_IMM_BYTE 1 @@ -75,12 +79,11 @@ #define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) /* Flags */ #define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) -#define INAT_REXPFX (1 << INAT_FLAG_OFFS) -#define INAT_MODRM (1 << (INAT_FLAG_OFFS + 1)) -#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 2)) -#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 3)) -#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 4)) -#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 5)) +#define INAT_MODRM (1 << (INAT_FLAG_OFFS)) +#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1)) +#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) +#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) +#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) /* Attribute making macros for attribute tables */ #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) @@ -97,9 +100,10 @@ extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_attr_t esc_attr); /* Attribute checking functions */ -static inline int inat_is_prefix(insn_attr_t attr) +static inline int inat_is_legacy_prefix(insn_attr_t attr) { - return attr & INAT_PFX_MASK; + attr &= INAT_PFX_MASK; + return attr && attr <= INAT_LGCPFX_MAX; } static inline int inat_is_address_size_prefix(insn_attr_t attr) @@ -112,9 +116,14 @@ static inline int inat_is_operand_size_prefix(insn_attr_t attr) return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; } +static inline int inat_is_rex_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_REX; +} + static inline int inat_last_prefix_id(insn_attr_t attr) { - if ((attr & INAT_PFX_MASK) > INAT_LPREFIX_MAX) + if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX) return 0; else return attr & INAT_PFX_MASK; @@ -155,11 +164,6 @@ static inline int inat_immediate_size(insn_attr_t attr) return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; } -static inline int inat_is_rex_prefix(insn_attr_t attr) -{ - return attr & INAT_REXPFX; -} - static inline int inat_has_modrm(insn_attr_t attr) { return attr & INAT_MODRM; diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index dfd56a30053..9f483179a8a 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -69,7 +69,7 @@ void insn_get_prefixes(struct insn *insn) lb = 0; b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); - while (inat_is_prefix(attr)) { + while (inat_is_legacy_prefix(attr)) { /* Skip if same prefix */ for (i = 0; i < nb; i++) if (prefixes->bytes[i] == b) diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 19ba096b7dd..7d5492951e2 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -278,7 +278,7 @@ function convert_operands(opnd, i,imm,mod) # check REX prefix if (match(opcode, rex_expr)) - flags = add_flags(flags, "INAT_REXPFX") + flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)") # check coprocessor escape : TODO if (match(opcode, fpu_expr)) @@ -316,7 +316,7 @@ END { # print escape opcode map's array print "/* Escape opcode map array */" print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ - "[INAT_LPREFIX_MAX + 1] = {" + "[INAT_LSTPFX_MAX + 1] = {" for (i = 0; i < geid; i++) for (j = 0; j < max_lprefix; j++) if (etable[i,j]) @@ -325,7 +325,7 @@ END { # print group opcode map's array print "/* Group opcode map array */" print "const insn_attr_t const *inat_group_tables[INAT_GRP_MAX + 1]"\ - "[INAT_LPREFIX_MAX + 1] = {" + "[INAT_LSTPFX_MAX + 1] = {" for (i = 0; i < ggid; i++) for (j = 0; j < max_lprefix; j++) if (gtable[i,j]) From 82cb57028c864822c5a260f806d051e2ce28c86a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:19 -0400 Subject: [PATCH 199/470] x86: Add pclmulq to x86 opcode map Add pclmulq opcode to x86 opcode map. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204219.30545.82039.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index efef3cada84..1f41246e6e3 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -672,6 +672,7 @@ Referrer: 3-byte escape 2 40: dpps Vdq,Wdq,Ib (66) 41: dppd Vdq,Wdq,Ib (66) 42: mpsadbw Vdq,Wdq,Ib (66) +44: pclmulq Vdq,Wdq,Ib (66) 60: pcmpestrm Vdq,Wdq,Ib (66) 61: pcmpestri Vdq,Wdq,Ib (66) 62: pcmpistrm Vdq,Wdq,Ib (66) From e0e492e99b372c6990a5daca9e4683c341f1330e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:27 -0400 Subject: [PATCH 200/470] x86: AVX instruction set decoder support Add Intel AVX(Advanced Vector Extensions) instruction set support to x86 instruction decoder. This adds insn.vex_prefix field for storing VEX prefixes, and introduces some original tags for expressing opcodes attributes. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204226.30545.23451.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/inat.h | 32 +- arch/x86/include/asm/insn.h | 43 ++- arch/x86/lib/inat.c | 12 + arch/x86/lib/insn.c | 52 ++++ arch/x86/lib/x86-opcode-map.txt | 431 ++++++++++++++------------- arch/x86/tools/gen-insn-attr-x86.awk | 94 ++++-- 6 files changed, 431 insertions(+), 233 deletions(-) diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h index c2487d2aca2..205b063e3e3 100644 --- a/arch/x86/include/asm/inat.h +++ b/arch/x86/include/asm/inat.h @@ -32,8 +32,8 @@ /* Legacy last prefixes */ #define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ -#define INAT_PFX_REPNE 2 /* 0xF2 */ /* LPFX2 */ -#define INAT_PFX_REPE 3 /* 0xF3 */ /* LPFX3 */ +#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */ +#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */ /* Other Legacy prefixes */ #define INAT_PFX_LOCK 4 /* 0xF0 */ #define INAT_PFX_CS 5 /* 0x2E */ @@ -45,6 +45,9 @@ #define INAT_PFX_ADDRSZ 11 /* 0x67 */ /* x86-64 REX prefix */ #define INAT_PFX_REX 12 /* 0x4X */ +/* AVX VEX prefixes */ +#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */ +#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */ #define INAT_LSTPFX_MAX 3 #define INAT_LGCPFX_MAX 11 @@ -84,6 +87,8 @@ #define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2)) #define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3)) #define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4)) +#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5)) +#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6)) /* Attribute making macros for attribute tables */ #define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) #define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) @@ -98,6 +103,9 @@ extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, insn_attr_t esc_attr); +extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, + insn_byte_t vex_m, + insn_byte_t vex_pp); /* Attribute checking functions */ static inline int inat_is_legacy_prefix(insn_attr_t attr) @@ -129,6 +137,17 @@ static inline int inat_last_prefix_id(insn_attr_t attr) return attr & INAT_PFX_MASK; } +static inline int inat_is_vex_prefix(insn_attr_t attr) +{ + attr &= INAT_PFX_MASK; + return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3; +} + +static inline int inat_is_vex3_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3; +} + static inline int inat_is_escape(insn_attr_t attr) { return attr & INAT_ESC_MASK; @@ -189,4 +208,13 @@ static inline int inat_has_variant(insn_attr_t attr) return attr & INAT_VARIANT; } +static inline int inat_accept_vex(insn_attr_t attr) +{ + return attr & INAT_VEXOK; +} + +static inline int inat_must_vex(insn_attr_t attr) +{ + return attr & INAT_VEXONLY; +} #endif diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 12b4e3751d3..96c2e0ad04c 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -39,6 +39,7 @@ struct insn { * prefixes.bytes[3]: last prefix */ struct insn_field rex_prefix; /* REX prefix */ + struct insn_field vex_prefix; /* VEX prefix */ struct insn_field opcode; /* * opcode.bytes[0]: opcode1 * opcode.bytes[1]: opcode2 @@ -80,6 +81,19 @@ struct insn { #define X86_REX_X(rex) ((rex) & 2) #define X86_REX_B(rex) ((rex) & 1) +/* VEX bit flags */ +#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */ +#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */ +#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */ +#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */ +#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */ +/* VEX bit fields */ +#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */ +#define X86_VEX2_M 1 /* VEX2.M always 1 */ +#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */ +#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */ +#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */ + /* The last prefix is needed for two-byte and three-byte opcodes */ static inline insn_byte_t insn_last_prefix(struct insn *insn) { @@ -114,15 +128,42 @@ static inline void kernel_insn_init(struct insn *insn, const void *kaddr) #endif } +static inline int insn_is_avx(struct insn *insn) +{ + if (!insn->prefixes.got) + insn_get_prefixes(insn); + return (insn->vex_prefix.value != 0); +} + +static inline insn_byte_t insn_vex_m_bits(struct insn *insn) +{ + if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ + return X86_VEX2_M; + else + return X86_VEX3_M(insn->vex_prefix.bytes[1]); +} + +static inline insn_byte_t insn_vex_p_bits(struct insn *insn) +{ + if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */ + return X86_VEX_P(insn->vex_prefix.bytes[1]); + else + return X86_VEX_P(insn->vex_prefix.bytes[2]); +} + /* Offset of each field from kaddr */ static inline int insn_offset_rex_prefix(struct insn *insn) { return insn->prefixes.nbytes; } -static inline int insn_offset_opcode(struct insn *insn) +static inline int insn_offset_vex_prefix(struct insn *insn) { return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; } +static inline int insn_offset_opcode(struct insn *insn) +{ + return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes; +} static inline int insn_offset_modrm(struct insn *insn) { return insn_offset_opcode(insn) + insn->opcode.nbytes; diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c index 3fb5998b823..46fc4ee09fc 100644 --- a/arch/x86/lib/inat.c +++ b/arch/x86/lib/inat.c @@ -76,3 +76,15 @@ insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, inat_group_common_attribute(grp_attr); } +insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, + insn_byte_t vex_p) +{ + const insn_attr_t *table; + if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) + return 0; + table = inat_avx_tables[vex_m][vex_p]; + if (!table) + return 0; + return table[opcode]; +} + diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 9f483179a8a..9f33b984d0e 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -28,6 +28,9 @@ #define peek_next(t, insn) \ ({t r; r = *(t*)insn->next_byte; r; }) +#define peek_nbyte_next(t, insn, n) \ + ({t r; r = *(t*)((insn)->next_byte + n); r; }) + /** * insn_init() - initialize struct insn * @insn: &struct insn to be initialized @@ -107,6 +110,7 @@ found: insn->prefixes.bytes[3] = lb; } + /* Decode REX prefix */ if (insn->x86_64) { b = peek_next(insn_byte_t, insn); attr = inat_get_opcode_attribute(b); @@ -120,6 +124,39 @@ found: } } insn->rex_prefix.got = 1; + + /* Decode VEX prefix */ + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + if (inat_is_vex_prefix(attr)) { + insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1); + if (!insn->x86_64) { + /* + * In 32-bits mode, if the [7:6] bits (mod bits of + * ModRM) on the second byte are not 11b, it is + * LDS or LES. + */ + if (X86_MODRM_MOD(b2) != 3) + goto vex_end; + } + insn->vex_prefix.bytes[0] = b; + insn->vex_prefix.bytes[1] = b2; + if (inat_is_vex3_prefix(attr)) { + b2 = peek_nbyte_next(insn_byte_t, insn, 2); + insn->vex_prefix.bytes[2] = b2; + insn->vex_prefix.nbytes = 3; + insn->next_byte += 3; + if (insn->x86_64 && X86_VEX_W(b2)) + /* VEX.W overrides opnd_size */ + insn->opnd_bytes = 8; + } else { + insn->vex_prefix.nbytes = 2; + insn->next_byte += 2; + } + } +vex_end: + insn->vex_prefix.got = 1; + prefixes->got = 1; return; } @@ -147,6 +184,18 @@ void insn_get_opcode(struct insn *insn) op = get_next(insn_byte_t, insn); opcode->bytes[0] = op; opcode->nbytes = 1; + + /* Check if there is VEX prefix or not */ + if (insn_is_avx(insn)) { + insn_byte_t m, p; + m = insn_vex_m_bits(insn); + p = insn_vex_p_bits(insn); + insn->attr = inat_get_avx_attribute(op, m, p); + if (!inat_accept_vex(insn->attr)) + insn->attr = 0; /* This instruction is bad */ + goto end; /* VEX has only 1 byte for opcode */ + } + insn->attr = inat_get_opcode_attribute(op); while (inat_is_escape(insn->attr)) { /* Get escaped opcode */ @@ -155,6 +204,9 @@ void insn_get_opcode(struct insn *insn) pfx = insn_last_prefix(insn); insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); } + if (inat_must_vex(insn->attr)) + insn->attr = 0; /* This instruction is bad */ +end: opcode->got = 1; } diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 1f41246e6e3..9887bfeeb2d 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -3,6 +3,7 @@ # # Table: table-name # Referrer: escaped-name +# AVXcode: avx-code # opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] # (or) # opcode: escape # escaped-name @@ -13,9 +14,16 @@ # reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] # EndTable # +# AVX Superscripts +# (VEX): this opcode can accept VEX prefix. +# (oVEX): this opcode requires VEX prefix. +# (o128): this opcode only supports 128bit VEX. +# (o256): this opcode only supports 256bit VEX. +# Table: one byte opcode Referrer: +AVXcode: # 0x00 - 0x0f 00: ADD Eb,Gb 01: ADD Ev,Gv @@ -225,8 +233,8 @@ c0: Grp2 Eb,Ib (1A) c1: Grp2 Ev,Ib (1A) c2: RETN Iw (f64) c3: RETN -c4: LES Gz,Mp (i64) -c5: LDS Gz,Mp (i64) +c4: LES Gz,Mp (i64) | 3bytes-VEX (Prefix) +c5: LDS Gz,Mp (i64) | 2bytes-VEX (Prefix) c6: Grp11 Eb,Ib (1A) c7: Grp11 Ev,Iz (1A) c8: ENTER Iw,Ib @@ -290,8 +298,9 @@ fe: Grp4 (1A) ff: Grp5 (1A) EndTable -Table: 2-byte opcode # First Byte is 0x0f +Table: 2-byte opcode (0x0f) Referrer: 2-byte escape +AVXcode: 1 # 0x0f 0x00-0x0f 00: Grp6 (1A) 01: Grp7 (1A) @@ -311,14 +320,14 @@ Referrer: 2-byte escape # 3DNow! uses the last imm byte as opcode extension. 0f: 3DNow! Pq,Qq,Ib # 0x0f 0x10-0x1f -10: movups Vps,Wps | movss Vss,Wss (F3) | movupd Vpd,Wpd (66) | movsd Vsd,Wsd (F2) -11: movups Wps,Vps | movss Wss,Vss (F3) | movupd Wpd,Vpd (66) | movsd Wsd,Vsd (F2) -12: movlps Vq,Mq | movlpd Vq,Mq (66) | movhlps Vq,Uq | movddup Vq,Wq (F2) | movsldup Vq,Wq (F3) -13: mpvlps Mq,Vq | movlpd Mq,Vq (66) -14: unpcklps Vps,Wq | unpcklpd Vpd,Wq (66) -15: unpckhps Vps,Wq | unpckhpd Vpd,Wq (66) -16: movhps Vq,Mq | movhpd Vq,Mq (66) | movlsps Vq,Uq | movshdup Vq,Wq (F3) -17: movhps Mq,Vq | movhpd Mq,Vq (66) +10: movups Vps,Wps (VEX) | movss Vss,Wss (F3),(VEX),(o128) | movupd Vpd,Wpd (66),(VEX) | movsd Vsd,Wsd (F2),(VEX),(o128) +11: movups Wps,Vps (VEX) | movss Wss,Vss (F3),(VEX),(o128) | movupd Wpd,Vpd (66),(VEX) | movsd Wsd,Vsd (F2),(VEX),(o128) +12: movlps Vq,Mq (VEX),(o128) | movlpd Vq,Mq (66),(VEX),(o128) | movhlps Vq,Uq (VEX),(o128) | movddup Vq,Wq (F2),(VEX) | movsldup Vq,Wq (F3),(VEX) +13: mpvlps Mq,Vq (VEX),(o128) | movlpd Mq,Vq (66),(VEX),(o128) +14: unpcklps Vps,Wq (VEX) | unpcklpd Vpd,Wq (66),(VEX) +15: unpckhps Vps,Wq (VEX) | unpckhpd Vpd,Wq (66),(VEX) +16: movhps Vq,Mq (VEX),(o128) | movhpd Vq,Mq (66),(VEX),(o128) | movlsps Vq,Uq (VEX),(o128) | movshdup Vq,Wq (F3),(VEX) +17: movhps Mq,Vq (VEX),(o128) | movhpd Mq,Vq (66),(VEX),(o128) 18: Grp16 (1A) 19: 1a: @@ -336,14 +345,14 @@ Referrer: 2-byte escape 25: 26: 27: -28: movaps Vps,Wps | movapd Vpd,Wpd (66) -29: movaps Wps,Vps | movapd Wpd,Vpd (66) -2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2) -2b: movntps Mps,Vps | movntpd Mpd,Vpd (66) -2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2) -2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2) -2e: ucomiss Vss,Wss | ucomisd Vsd,Wsd (66) -2f: comiss Vss,Wss | comisd Vsd,Wsd (66) +28: movaps Vps,Wps (VEX) | movapd Vpd,Wpd (66),(VEX) +29: movaps Wps,Vps (VEX) | movapd Wpd,Vpd (66),(VEX) +2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3),(VEX),(o128) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2),(VEX),(o128) +2b: movntps Mps,Vps (VEX) | movntpd Mpd,Vpd (66),(VEX) +2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3),(VEX),(o128) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2),(VEX),(o128) +2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3),(VEX),(o128) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2),(VEX),(o128) +2e: ucomiss Vss,Wss (VEX),(o128) | ucomisd Vsd,Wsd (66),(VEX),(o128) +2f: comiss Vss,Wss (VEX),(o128) | comisd Vsd,Wsd (66),(VEX),(o128) # 0x0f 0x30-0x3f 30: WRMSR 31: RDTSC @@ -379,56 +388,56 @@ Referrer: 2-byte escape 4e: CMOVLE/NG Gv,Ev 4f: CMOVNLE/G Gv,Ev # 0x0f 0x50-0x5f -50: movmskps Gd/q,Ups | movmskpd Gd/q,Upd (66) -51: sqrtps Vps,Wps | sqrtss Vss,Wss (F3) | sqrtpd Vpd,Wpd (66) | sqrtsd Vsd,Wsd (F2) -52: rsqrtps Vps,Wps | rsqrtss Vss,Wss (F3) -53: rcpps Vps,Wps | rcpss Vss,Wss (F3) -54: andps Vps,Wps | andpd Vpd,Wpd (66) -55: andnps Vps,Wps | andnpd Vpd,Wpd (66) -56: orps Vps,Wps | orpd Vpd,Wpd (66) -57: xorps Vps,Wps | xorpd Vpd,Wpd (66) -58: addps Vps,Wps | addss Vss,Wss (F3) | addpd Vpd,Wpd (66) | addsd Vsd,Wsd (F2) -59: mulps Vps,Wps | mulss Vss,Wss (F3) | mulpd Vpd,Wpd (66) | mulsd Vsd,Wsd (F2) -5a: cvtps2pd Vpd,Wps | cvtss2sd Vsd,Wss (F3) | cvtpd2ps Vps,Wpd (66) | cvtsd2ss Vsd,Wsd (F2) -5b: cvtdq2ps Vps,Wdq | cvtps2dq Vdq,Wps (66) | cvttps2dq Vdq,Wps (F3) -5c: subps Vps,Wps | subss Vss,Wss (F3) | subpd Vpd,Wpd (66) | subsd Vsd,Wsd (F2) -5d: minps Vps,Wps | minss Vss,Wss (F3) | minpd Vpd,Wpd (66) | minsd Vsd,Wsd (F2) -5e: divps Vps,Wps | divss Vss,Wss (F3) | divpd Vpd,Wpd (66) | divsd Vsd,Wsd (F2) -5f: maxps Vps,Wps | maxss Vss,Wss (F3) | maxpd Vpd,Wpd (66) | maxsd Vsd,Wsd (F2) +50: movmskps Gd/q,Ups (VEX) | movmskpd Gd/q,Upd (66),(VEX) +51: sqrtps Vps,Wps (VEX) | sqrtss Vss,Wss (F3),(VEX),(o128) | sqrtpd Vpd,Wpd (66),(VEX) | sqrtsd Vsd,Wsd (F2),(VEX),(o128) +52: rsqrtps Vps,Wps (VEX) | rsqrtss Vss,Wss (F3),(VEX),(o128) +53: rcpps Vps,Wps (VEX) | rcpss Vss,Wss (F3),(VEX),(o128) +54: andps Vps,Wps (VEX) | andpd Vpd,Wpd (66),(VEX) +55: andnps Vps,Wps (VEX) | andnpd Vpd,Wpd (66),(VEX) +56: orps Vps,Wps (VEX) | orpd Vpd,Wpd (66),(VEX) +57: xorps Vps,Wps (VEX) | xorpd Vpd,Wpd (66),(VEX) +58: addps Vps,Wps (VEX) | addss Vss,Wss (F3),(VEX),(o128) | addpd Vpd,Wpd (66),(VEX) | addsd Vsd,Wsd (F2),(VEX),(o128) +59: mulps Vps,Wps (VEX) | mulss Vss,Wss (F3),(VEX),(o128) | mulpd Vpd,Wpd (66),(VEX) | mulsd Vsd,Wsd (F2),(VEX),(o128) +5a: cvtps2pd Vpd,Wps (VEX) | cvtss2sd Vsd,Wss (F3),(VEX),(o128) | cvtpd2ps Vps,Wpd (66),(VEX) | cvtsd2ss Vsd,Wsd (F2),(VEX),(o128) +5b: cvtdq2ps Vps,Wdq (VEX) | cvtps2dq Vdq,Wps (66),(VEX) | cvttps2dq Vdq,Wps (F3),(VEX) +5c: subps Vps,Wps (VEX) | subss Vss,Wss (F3),(VEX),(o128) | subpd Vpd,Wpd (66),(VEX) | subsd Vsd,Wsd (F2),(VEX),(o128) +5d: minps Vps,Wps (VEX) | minss Vss,Wss (F3),(VEX),(o128) | minpd Vpd,Wpd (66),(VEX) | minsd Vsd,Wsd (F2),(VEX),(o128) +5e: divps Vps,Wps (VEX) | divss Vss,Wss (F3),(VEX),(o128) | divpd Vpd,Wpd (66),(VEX) | divsd Vsd,Wsd (F2),(VEX),(o128) +5f: maxps Vps,Wps (VEX) | maxss Vss,Wss (F3),(VEX),(o128) | maxpd Vpd,Wpd (66),(VEX) | maxsd Vsd,Wsd (F2),(VEX),(o128) # 0x0f 0x60-0x6f -60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66) -61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66) -62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66) -63: packsswb Pq,Qq | packsswb Vdq,Wdq (66) -64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66) -65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66) -66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66) -67: packuswb Pq,Qq | packuswb Vdq,Wdq (66) -68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66) -69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66) -6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66) -6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66) -6c: punpcklqdq Vdq,Wdq (66) -6d: punpckhqdq Vdq,Wdq (66) -6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66) -6f: movq Pq,Qq | movdqa Vdq,Wdq (66) | movdqu Vdq,Wdq (F3) +60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66),(VEX),(o128) +61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66),(VEX),(o128) +62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66),(VEX),(o128) +63: packsswb Pq,Qq | packsswb Vdq,Wdq (66),(VEX),(o128) +64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66),(VEX),(o128) +65: pcmpgtw Pq,Qq | pcmpgtw Vdq,Wdq (66),(VEX),(o128) +66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66),(VEX),(o128) +67: packuswb Pq,Qq | packuswb Vdq,Wdq (66),(VEX),(o128) +68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66),(VEX),(o128) +69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66),(VEX),(o128) +6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66),(VEX),(o128) +6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66),(VEX),(o128) +6c: punpcklqdq Vdq,Wdq (66),(VEX),(o128) +6d: punpckhqdq Vdq,Wdq (66),(VEX),(o128) +6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66),(VEX),(o128) +6f: movq Pq,Qq | movdqa Vdq,Wdq (66),(VEX) | movdqu Vdq,Wdq (F3),(VEX) # 0x0f 0x70-0x7f -70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66) | pshufhw Vdq,Wdq,Ib (F3) | pshuflw VdqWdq,Ib (F2) +70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66),(VEX),(o128) | pshufhw Vdq,Wdq,Ib (F3),(VEX),(o128) | pshuflw VdqWdq,Ib (F2),(VEX),(o128) 71: Grp12 (1A) 72: Grp13 (1A) 73: Grp14 (1A) -74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66) -75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66) -76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66) -77: emms +74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66),(VEX),(o128) +75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66),(VEX),(o128) +76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66),(VEX),(o128) +77: emms/vzeroupper/vzeroall (VEX) 78: VMREAD Ed/q,Gd/q 79: VMWRITE Gd/q,Ed/q 7a: 7b: -7c: haddps Vps,Wps (F2) | haddpd Vpd,Wpd (66) -7d: hsubps Vps,Wps (F2) | hsubpd Vpd,Wpd (66) -7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66) | movq Vq,Wq (F3) -7f: movq Qq,Pq | movdqa Wdq,Vdq (66) | movdqu Wdq,Vdq (F3) +7c: haddps Vps,Wps (F2),(VEX) | haddpd Vpd,Wpd (66),(VEX) +7d: hsubps Vps,Wps (F2),(VEX) | hsubpd Vpd,Wpd (66),(VEX) +7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66),(VEX),(o128) | movq Vq,Wq (F3),(VEX),(o128) +7f: movq Qq,Pq | movdqa Wdq,Vdq (66),(VEX) | movdqu Wdq,Vdq (F3),(VEX) # 0x0f 0x80-0x8f 80: JO Jz (f64) 81: JNO Jz (f64) @@ -500,11 +509,11 @@ bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf c0: XADD Eb,Gb c1: XADD Ev,Gv -c2: cmpps Vps,Wps,Ib | cmpss Vss,Wss,Ib (F3) | cmppd Vpd,Wpd,Ib (66) | cmpsd Vsd,Wsd,Ib (F2) +c2: cmpps Vps,Wps,Ib (VEX) | cmpss Vss,Wss,Ib (F3),(VEX),(o128) | cmppd Vpd,Wpd,Ib (66),(VEX) | cmpsd Vsd,Wsd,Ib (F2),(VEX) c3: movnti Md/q,Gd/q -c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66) -c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66) -c6: shufps Vps,Wps,Ib | shufpd Vpd,Wpd,Ib (66) +c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66),(VEX),(o128) +c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66),(VEX),(o128) +c6: shufps Vps,Wps,Ib (VEX) | shufpd Vpd,Wpd,Ib (66),(VEX) c7: Grp9 (1A) c8: BSWAP RAX/EAX/R8/R8D c9: BSWAP RCX/ECX/R9/R9D @@ -515,77 +524,78 @@ cd: BSWAP RBP/EBP/R13/R13D ce: BSWAP RSI/ESI/R14/R14D cf: BSWAP RDI/EDI/R15/R15D # 0x0f 0xd0-0xdf -d0: addsubps Vps,Wps (F2) | addsubpd Vpd,Wpd (66) -d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66) -d2: psrld Pq,Qq | psrld Vdq,Wdq (66) -d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66) -d4: paddq Pq,Qq | paddq Vdq,Wdq (66) -d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66) -d6: movq Wq,Vq (66) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) -d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66) -d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66) -d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66) -da: pminub Pq,Qq | pminub Vdq,Wdq (66) -db: pand Pq,Qq | pand Vdq,Wdq (66) -dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66) -dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66) -de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66) -df: pandn Pq,Qq | pandn Vdq,Wdq (66) +d0: addsubps Vps,Wps (F2),(VEX) | addsubpd Vpd,Wpd (66),(VEX) +d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66),(VEX),(o128) +d2: psrld Pq,Qq | psrld Vdq,Wdq (66),(VEX),(o128) +d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66),(VEX),(o128) +d4: paddq Pq,Qq | paddq Vdq,Wdq (66),(VEX),(o128) +d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66),(VEX),(o128) +d6: movq Wq,Vq (66),(VEX),(o128) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66),(VEX),(o128) +d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66),(VEX),(o128) +d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66),(VEX),(o128) +da: pminub Pq,Qq | pminub Vdq,Wdq (66),(VEX),(o128) +db: pand Pq,Qq | pand Vdq,Wdq (66),(VEX),(o128) +dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66),(VEX),(o128) +dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66),(VEX),(o128) +de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66),(VEX),(o128) +df: pandn Pq,Qq | pandn Vdq,Wdq (66),(VEX),(o128) # 0x0f 0xe0-0xef -e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66) -e1: psraw Pq,Qq | psraw Vdq,Wdq (66) -e2: psrad Pq,Qq | psrad Vdq,Wdq (66) -e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66) -e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66) -e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66) -e6: cvtpd2dq Vdq,Wpd (F2) | cvttpd2dq Vdq,Wpd (66) | cvtdq2pd Vpd,Wdq (F3) -e7: movntq Mq,Pq | movntdq Mdq,Vdq (66) -e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66) -e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66) -ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66) -eb: por Pq,Qq | por Vdq,Wdq (66) -ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66) -ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66) -ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66) -ef: pxor Pq,Qq | pxor Vdq,Wdq (66) +e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66),(VEX),(o128) +e1: psraw Pq,Qq | psraw Vdq,Wdq (66),(VEX),(o128) +e2: psrad Pq,Qq | psrad Vdq,Wdq (66),(VEX),(o128) +e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66),(VEX),(o128) +e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66),(VEX),(o128) +e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66),(VEX),(o128) +e6: cvtpd2dq Vdq,Wpd (F2),(VEX) | cvttpd2dq Vdq,Wpd (66),(VEX) | cvtdq2pd Vpd,Wdq (F3),(VEX) +e7: movntq Mq,Pq | movntdq Mdq,Vdq (66),(VEX) +e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66),(VEX),(o128) +e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66),(VEX),(o128) +ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66),(VEX),(o128) +eb: por Pq,Qq | por Vdq,Wdq (66),(VEX),(o128) +ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66),(VEX),(o128) +ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66),(VEX),(o128) +ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66),(VEX),(o128) +ef: pxor Pq,Qq | pxor Vdq,Wdq (66),(VEX),(o128) # 0x0f 0xf0-0xff -f0: lddqu Vdq,Mdq (F2) -f1: psllw Pq,Qq | psllw Vdq,Wdq (66) -f2: pslld Pq,Qq | pslld Vdq,Wdq (66) -f3: psllq Pq,Qq | psllq Vdq,Wdq (66) -f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66) -f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66) -f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66) -f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66) -f8: psubb Pq,Qq | psubb Vdq,Wdq (66) -f9: psubw Pq,Qq | psubw Vdq,Wdq (66) -fa: psubd Pq,Qq | psubd Vdq,Wdq (66) -fb: psubq Pq,Qq | psubq Vdq,Wdq (66) -fc: paddb Pq,Qq | paddb Vdq,Wdq (66) -fd: paddw Pq,Qq | paddw Vdq,Wdq (66) -fe: paddd Pq,Qq | paddd Vdq,Wdq (66) +f0: lddqu Vdq,Mdq (F2),(VEX) +f1: psllw Pq,Qq | psllw Vdq,Wdq (66),(VEX),(o128) +f2: pslld Pq,Qq | pslld Vdq,Wdq (66),(VEX),(o128) +f3: psllq Pq,Qq | psllq Vdq,Wdq (66),(VEX),(o128) +f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66),(VEX),(o128) +f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66),(VEX),(o128) +f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66),(VEX),(o128) +f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66),(VEX),(o128) +f8: psubb Pq,Qq | psubb Vdq,Wdq (66),(VEX),(o128) +f9: psubw Pq,Qq | psubw Vdq,Wdq (66),(VEX),(o128) +fa: psubd Pq,Qq | psubd Vdq,Wdq (66),(VEX),(o128) +fb: psubq Pq,Qq | psubq Vdq,Wdq (66),(VEX),(o128) +fc: paddb Pq,Qq | paddb Vdq,Wdq (66),(VEX),(o128) +fd: paddw Pq,Qq | paddw Vdq,Wdq (66),(VEX),(o128) +fe: paddd Pq,Qq | paddd Vdq,Wdq (66),(VEX),(o128) ff: EndTable Table: 3-byte opcode 1 (0x0f 0x38) Referrer: 3-byte escape 1 +AVXcode: 2 # 0x0f 0x38 0x00-0x0f -00: pshufb Pq,Qq | pshufb Vdq,Wdq (66) -01: phaddw Pq,Qq | phaddw Vdq,Wdq (66) -02: phaddd Pq,Qq | phaddd Vdq,Wdq (66) -03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66) -04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66) -05: phsubw Pq,Qq | phsubw Vdq,Wdq (66) -06: phsubd Pq,Qq | phsubd Vdq,Wdq (66) -07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66) -08: psignb Pq,Qq | psignb Vdq,Wdq (66) -09: psignw Pq,Qq | psignw Vdq,Wdq (66) -0a: psignd Pq,Qq | psignd Vdq,Wdq (66) -0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66) -0c: -0d: -0e: -0f: +00: pshufb Pq,Qq | pshufb Vdq,Wdq (66),(VEX),(o128) +01: phaddw Pq,Qq | phaddw Vdq,Wdq (66),(VEX),(o128) +02: phaddd Pq,Qq | phaddd Vdq,Wdq (66),(VEX),(o128) +03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66),(VEX),(o128) +04: pmaddubsw Pq,Qq | pmaddubsw Vdq,Wdq (66),(VEX),(o128) +05: phsubw Pq,Qq | phsubw Vdq,Wdq (66),(VEX),(o128) +06: phsubd Pq,Qq | phsubd Vdq,Wdq (66),(VEX),(o128) +07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66),(VEX),(o128) +08: psignb Pq,Qq | psignb Vdq,Wdq (66),(VEX),(o128) +09: psignw Pq,Qq | psignw Vdq,Wdq (66),(VEX),(o128) +0a: psignd Pq,Qq | psignd Vdq,Wdq (66),(VEX),(o128) +0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66),(VEX),(o128) +0c: Vpermilps /r (66),(oVEX) +0d: Vpermilpd /r (66),(oVEX) +0e: vtestps /r (66),(oVEX) +0f: vtestpd /r (66),(oVEX) # 0x0f 0x38 0x10-0x1f 10: pblendvb Vdq,Wdq (66) 11: @@ -594,90 +604,99 @@ Referrer: 3-byte escape 1 14: blendvps Vdq,Wdq (66) 15: blendvpd Vdq,Wdq (66) 16: -17: ptest Vdq,Wdq (66) -18: -19: -1a: +17: ptest Vdq,Wdq (66),(VEX) +18: vbroadcastss /r (66),(oVEX) +19: vbroadcastsd /r (66),(oVEX),(o256) +1a: vbroadcastf128 /r (66),(oVEX),(o256) 1b: -1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66) -1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66) -1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66) +1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66),(VEX),(o128) +1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66),(VEX),(o128) +1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66),(VEX),(o128) 1f: # 0x0f 0x38 0x20-0x2f -20: pmovsxbw Vdq,Udq/Mq (66) -21: pmovsxbd Vdq,Udq/Md (66) -22: pmovsxbq Vdq,Udq/Mw (66) -23: pmovsxwd Vdq,Udq/Mq (66) -24: pmovsxwq Vdq,Udq/Md (66) -25: pmovsxdq Vdq,Udq/Mq (66) +20: pmovsxbw Vdq,Udq/Mq (66),(VEX),(o128) +21: pmovsxbd Vdq,Udq/Md (66),(VEX),(o128) +22: pmovsxbq Vdq,Udq/Mw (66),(VEX),(o128) +23: pmovsxwd Vdq,Udq/Mq (66),(VEX),(o128) +24: pmovsxwq Vdq,Udq/Md (66),(VEX),(o128) +25: pmovsxdq Vdq,Udq/Mq (66),(VEX),(o128) 26: 27: -28: pmuldq Vdq,Wdq (66) -29: pcmpeqq Vdq,Wdq (66) -2a: movntdqa Vdq,Mdq (66) -2b: packusdw Vdq,Wdq (66) -2c: -2d: -2e: -2f: +28: pmuldq Vdq,Wdq (66),(VEX),(o128) +29: pcmpeqq Vdq,Wdq (66),(VEX),(o128) +2a: movntdqa Vdq,Mdq (66),(VEX),(o128) +2b: packusdw Vdq,Wdq (66),(VEX),(o128) +2c: vmaskmovps(ld) /r (66),(oVEX) +2d: vmaskmovpd(ld) /r (66),(oVEX) +2e: vmaskmovps(st) /r (66),(oVEX) +2f: vmaskmovpd(st) /r (66),(oVEX) # 0x0f 0x38 0x30-0x3f -30: pmovzxbw Vdq,Udq/Mq (66) -31: pmovzxbd Vdq,Udq/Md (66) -32: pmovzxbq Vdq,Udq/Mw (66) -33: pmovzxwd Vdq,Udq/Mq (66) -34: pmovzxwq Vdq,Udq/Md (66) -35: pmovzxdq Vdq,Udq/Mq (66) +30: pmovzxbw Vdq,Udq/Mq (66),(VEX),(o128) +31: pmovzxbd Vdq,Udq/Md (66),(VEX),(o128) +32: pmovzxbq Vdq,Udq/Mw (66),(VEX),(o128) +33: pmovzxwd Vdq,Udq/Mq (66),(VEX),(o128) +34: pmovzxwq Vdq,Udq/Md (66),(VEX),(o128) +35: pmovzxdq Vdq,Udq/Mq (66),(VEX),(o128) 36: -37: pcmpgtq Vdq,Wdq (66) -38: pminsb Vdq,Wdq (66) -39: pminsd Vdq,Wdq (66) -3a: pminuw Vdq,Wdq (66) -3b: pminud Vdq,Wdq (66) -3c: pmaxsb Vdq,Wdq (66) -3d: pmaxsd Vdq,Wdq (66) -3e: pmaxuw Vdq,Wdq (66) -3f: pmaxud Vdq,Wdq (66) +37: pcmpgtq Vdq,Wdq (66),(VEX),(o128) +38: pminsb Vdq,Wdq (66),(VEX),(o128) +39: pminsd Vdq,Wdq (66),(VEX),(o128) +3a: pminuw Vdq,Wdq (66),(VEX),(o128) +3b: pminud Vdq,Wdq (66),(VEX),(o128) +3c: pmaxsb Vdq,Wdq (66),(VEX),(o128) +3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) +3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) +3f: pmaxud Vdq,Wdq (66),(VEX),(o128) # 0x0f 0x38 0x4f-0xff -40: pmulld Vdq,Wdq (66) -41: phminposuw Vdq,Wdq (66) +40: pmulld Vdq,Wdq (66),(VEX),(o128) +41: phminposuw Vdq,Wdq (66),(VEX),(o128) 80: INVEPT Gd/q,Mdq (66) 81: INVPID Gd/q,Mdq (66) -db: aesimc Vdq,Wdq (66) -dc: aesenc Vdq,Wdq (66) -dd: aesenclast Vdq,Wdq (66) -de: aesdec Vdq,Wdq (66) -df: aesdeclast Vdq,Wdq (66) +db: aesimc Vdq,Wdq (66),(VEX),(o128) +dc: aesenc Vdq,Wdq (66),(VEX),(o128) +dd: aesenclast Vdq,Wdq (66),(VEX),(o128) +de: aesdec Vdq,Wdq (66),(VEX),(o128) +df: aesdeclast Vdq,Wdq (66),(VEX),(o128) f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) EndTable Table: 3-byte opcode 2 (0x0f 0x3a) Referrer: 3-byte escape 2 +AVXcode: 3 # 0x0f 0x3a 0x00-0xff -08: roundps Vdq,Wdq,Ib (66) -09: roundpd Vdq,Wdq,Ib (66) -0a: roundss Vss,Wss,Ib (66) -0b: roundsd Vsd,Wsd,Ib (66) -0c: blendps Vdq,Wdq,Ib (66) -0d: blendpd Vdq,Wdq,Ib (66) -0e: pblendw Vdq,Wdq,Ib (66) -0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66) -14: pextrb Rd/Mb,Vdq,Ib (66) -15: pextrw Rd/Mw,Vdq,Ib (66) -16: pextrd/pextrq Ed/q,Vdq,Ib (66) -17: extractps Ed,Vdq,Ib (66) -20: pinsrb Vdq,Rd/q/Mb,Ib (66) -21: insertps Vdq,Udq/Md,Ib (66) -22: pinsrd/pinsrq Vdq,Ed/q,Ib (66) -40: dpps Vdq,Wdq,Ib (66) -41: dppd Vdq,Wdq,Ib (66) -42: mpsadbw Vdq,Wdq,Ib (66) -44: pclmulq Vdq,Wdq,Ib (66) -60: pcmpestrm Vdq,Wdq,Ib (66) -61: pcmpestri Vdq,Wdq,Ib (66) -62: pcmpistrm Vdq,Wdq,Ib (66) -63: pcmpistri Vdq,Wdq,Ib (66) -df: aeskeygenassist Vdq,Wdq,Ib (66) +04: vpermilps /r,Ib (66),(oVEX) +05: vpermilpd /r,Ib (66),(oVEX) +06: vperm2f128 /r,Ib (66),(oVEX),(o256) +08: roundps Vdq,Wdq,Ib (66),(VEX) +09: roundpd Vdq,Wdq,Ib (66),(VEX) +0a: roundss Vss,Wss,Ib (66),(VEX),(o128) +0b: roundsd Vsd,Wsd,Ib (66),(VEX),(o128) +0c: blendps Vdq,Wdq,Ib (66),(VEX) +0d: blendpd Vdq,Wdq,Ib (66),(VEX) +0e: pblendw Vdq,Wdq,Ib (66),(VEX),(o128) +0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66),(VEX),(o128) +14: pextrb Rd/Mb,Vdq,Ib (66),(VEX),(o128) +15: pextrw Rd/Mw,Vdq,Ib (66),(VEX),(o128) +16: pextrd/pextrq Ed/q,Vdq,Ib (66),(VEX),(o128) +17: extractps Ed,Vdq,Ib (66),(VEX),(o128) +18: vinsertf128 /r,Ib (66),(oVEX),(o256) +19: vextractf128 /r,Ib (66),(oVEX),(o256) +20: pinsrb Vdq,Rd/q/Mb,Ib (66),(VEX),(o128) +21: insertps Vdq,Udq/Md,Ib (66),(VEX),(o128) +22: pinsrd/pinsrq Vdq,Ed/q,Ib (66),(VEX),(o128) +40: dpps Vdq,Wdq,Ib (66),(VEX) +41: dppd Vdq,Wdq,Ib (66),(VEX),(o128) +42: mpsadbw Vdq,Wdq,Ib (66),(VEX),(o128) +44: pclmulq Vdq,Wdq,Ib (66),(VEX),(o128) +4a: vblendvps /r,Ib (66),(oVEX) +4b: vblendvpd /r,Ib (66),(oVEX) +4c: vpblendvb /r,Ib (66),(oVEX),(o128) +60: pcmpestrm Vdq,Wdq,Ib (66),(VEX),(o128) +61: pcmpestri Vdq,Wdq,Ib (66),(VEX),(o128) +62: pcmpistrm Vdq,Wdq,Ib (66),(VEX),(o128) +63: pcmpistri Vdq,Wdq,Ib (66),(VEX),(o128) +df: aeskeygenassist Vdq,Wdq,Ib (66),(VEX),(o128) EndTable GrpTable: Grp1 @@ -785,29 +804,29 @@ GrpTable: Grp11 EndTable GrpTable: Grp12 -2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B) -4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B) -6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B) +2: psrlw Nq,Ib (11B) | psrlw Udq,Ib (66),(11B),(VEX),(o128) +4: psraw Nq,Ib (11B) | psraw Udq,Ib (66),(11B),(VEX),(o128) +6: psllw Nq,Ib (11B) | psllw Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp13 -2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B) -4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B) -6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B) +2: psrld Nq,Ib (11B) | psrld Udq,Ib (66),(11B),(VEX),(o128) +4: psrad Nq,Ib (11B) | psrad Udq,Ib (66),(11B),(VEX),(o128) +6: pslld Nq,Ib (11B) | pslld Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp14 -2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B) -3: psrldq Udq,Ib (66),(11B) -6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B) -7: pslldq Udq,Ib (66),(11B) +2: psrlq Nq,Ib (11B) | psrlq Udq,Ib (66),(11B),(VEX),(o128) +3: psrldq Udq,Ib (66),(11B),(VEX),(o128) +6: psllq Nq,Ib (11B) | psllq Udq,Ib (66),(11B),(VEX),(o128) +7: pslldq Udq,Ib (66),(11B),(VEX),(o128) EndTable GrpTable: Grp15 0: fxsave 1: fxstor -2: ldmxcsr -3: stmxcsr +2: ldmxcsr (VEX) +3: stmxcsr (VEX) 4: XSAVE 5: XRSTOR | lfence (11B) 6: mfence (11B) diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 7d5492951e2..e34e92a28eb 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -13,6 +13,18 @@ function check_awk_implement() { return "" } +# Clear working vars +function clear_vars() { + delete table + delete lptable2 + delete lptable1 + delete lptable3 + eid = -1 # escape id + gid = -1 # group id + aid = -1 # AVX id + tname = "" +} + BEGIN { # Implementation error checking awkchecked = check_awk_implement() @@ -24,11 +36,15 @@ BEGIN { # Setup generating tables print "/* x86 opcode map generated from x86-opcode-map.txt */" - print "/* Do not change this code. */" + print "/* Do not change this code. */\n" ggid = 1 geid = 1 + gaid = 0 + delete etable + delete gtable + delete atable - opnd_expr = "^[[:alpha:]]" + opnd_expr = "^[[:alpha:]/]" ext_expr = "^\\(" sep_expr = "^\\|$" group_expr = "^Grp[[:alnum:]]+" @@ -46,19 +62,19 @@ BEGIN { imm_flag["Ob"] = "INAT_MOFFSET" imm_flag["Ov"] = "INAT_MOFFSET" - modrm_expr = "^([CDEGMNPQRSUVW][[:lower:]]+|NTA|T[012])" + modrm_expr = "^([CDEGMNPQRSUVW/][[:lower:]]+|NTA|T[012])" force64_expr = "\\([df]64\\)" rex_expr = "^REX(\\.[XRWB]+)*" fpu_expr = "^ESC" # TODO lprefix1_expr = "\\(66\\)" - delete lptable1 - lprefix2_expr = "\\(F2\\)" - delete lptable2 - lprefix3_expr = "\\(F3\\)" - delete lptable3 + lprefix2_expr = "\\(F3\\)" + lprefix3_expr = "\\(F2\\)" max_lprefix = 4 + vexok_expr = "\\(VEX\\)" + vexonly_expr = "\\(oVEX\\)" + prefix_expr = "\\(Prefix\\)" prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" prefix_num["REPNE"] = "INAT_PFX_REPNE" @@ -71,12 +87,10 @@ BEGIN { prefix_num["SEG=GS"] = "INAT_PFX_GS" prefix_num["SEG=SS"] = "INAT_PFX_SS" prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" + prefix_num["2bytes-VEX"] = "INAT_PFX_VEX2" + prefix_num["3bytes-VEX"] = "INAT_PFX_VEX3" - delete table - delete etable - delete gtable - eid = -1 - gid = -1 + clear_vars() } function semantic_error(msg) { @@ -97,14 +111,12 @@ function array_size(arr, i,c) { /^Table:/ { print "/* " $0 " */" + if (tname != "") + semantic_error("Hit Table: before EndTable:."); } /^Referrer:/ { - if (NF == 1) { - # primary opcode table - tname = "inat_primary_table" - eid = -1 - } else { + if (NF != 1) { # escape opcode table ref = "" for (i = 2; i <= NF; i++) @@ -114,6 +126,19 @@ function array_size(arr, i,c) { } } +/^AVXcode:/ { + if (NF != 1) { + # AVX/escape opcode table + aid = $2 + if (gaid <= aid) + gaid = aid + 1 + if (tname == "") # AVX only opcode table + tname = sprintf("inat_avx_table_%d", $2) + } + if (aid == -1 && eid == -1) # primary opcode table + tname = "inat_primary_table" +} + /^GrpTable:/ { print "/* " $0 " */" if (!($2 in group)) @@ -162,30 +187,33 @@ function print_table(tbl,name,fmt,n) print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]", "0x%02x", 256) etable[eid,0] = tname + if (aid >= 0) + atable[aid,0] = tname } if (array_size(lptable1) != 0) { print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]", "0x%02x", 256) etable[eid,1] = tname "_1" + if (aid >= 0) + atable[aid,1] = tname "_1" } if (array_size(lptable2) != 0) { print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]", "0x%02x", 256) etable[eid,2] = tname "_2" + if (aid >= 0) + atable[aid,2] = tname "_2" } if (array_size(lptable3) != 0) { print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]", "0x%02x", 256) etable[eid,3] = tname "_3" + if (aid >= 0) + atable[aid,3] = tname "_3" } } print "" - delete table - delete lptable1 - delete lptable2 - delete lptable3 - gid = -1 - eid = -1 + clear_vars() } function add_flags(old,new) { @@ -284,6 +312,14 @@ function convert_operands(opnd, i,imm,mod) if (match(opcode, fpu_expr)) flags = add_flags(flags, "INAT_MODRM") + # check VEX only code + if (match(ext, vexonly_expr)) + flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY") + + # check VEX only code + if (match(ext, vexok_expr)) + flags = add_flags(flags, "INAT_VEXOK") + # check prefixes if (match(ext, prefix_expr)) { if (!prefix_num[opcode]) @@ -330,5 +366,15 @@ END { for (j = 0; j < max_lprefix; j++) if (gtable[i,j]) print " ["i"]["j"] = "gtable[i,j]"," + print "};\n" + # print AVX opcode map's array + print "/* AVX opcode map array */" + print "const insn_attr_t const *inat_avx_tables[X86_VEX_M_MAX + 1]"\ + "[INAT_LSTPFX_MAX + 1] = {" + for (i = 0; i < gaid; i++) + for (j = 0; j < max_lprefix; j++) + if (atable[i,j]) + print " ["i"]["j"] = "atable[i,j]"," print "};" } + From 3f7e454af1dd8b9cea410d9380d3f71477e94f2b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:35 -0400 Subject: [PATCH 201/470] x86: Add Intel FMA instructions to x86 opcode map Add Intel FMA(FUSED-MULTIPLY-ADD) instructions to x86 opcode map for x86 instruction decoder. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204235.30545.33997.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/lib/x86-opcode-map.txt | 34 ++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 9887bfeeb2d..a793da5e560 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -647,11 +647,43 @@ AVXcode: 2 3d: pmaxsd Vdq,Wdq (66),(VEX),(o128) 3e: pmaxuw Vdq,Wdq (66),(VEX),(o128) 3f: pmaxud Vdq,Wdq (66),(VEX),(o128) -# 0x0f 0x38 0x4f-0xff +# 0x0f 0x38 0x40-0x8f 40: pmulld Vdq,Wdq (66),(VEX),(o128) 41: phminposuw Vdq,Wdq (66),(VEX),(o128) 80: INVEPT Gd/q,Mdq (66) 81: INVPID Gd/q,Mdq (66) +# 0x0f 0x38 0x90-0xbf (FMA) +96: vfmaddsub132pd/ps /r (66),(VEX) +97: vfmsubadd132pd/ps /r (66),(VEX) +98: vfmadd132pd/ps /r (66),(VEX) +99: vfmadd132sd/ss /r (66),(VEX),(o128) +9a: vfmsub132pd/ps /r (66),(VEX) +9b: vfmsub132sd/ss /r (66),(VEX),(o128) +9c: vfnmadd132pd/ps /r (66),(VEX) +9d: vfnmadd132sd/ss /r (66),(VEX),(o128) +9e: vfnmsub132pd/ps /r (66),(VEX) +9f: vfnmsub132sd/ss /r (66),(VEX),(o128) +a6: vfmaddsub213pd/ps /r (66),(VEX) +a7: vfmsubadd213pd/ps /r (66),(VEX) +a8: vfmadd213pd/ps /r (66),(VEX) +a9: vfmadd213sd/ss /r (66),(VEX),(o128) +aa: vfmsub213pd/ps /r (66),(VEX) +ab: vfmsub213sd/ss /r (66),(VEX),(o128) +ac: vfnmadd213pd/ps /r (66),(VEX) +ad: vfnmadd213sd/ss /r (66),(VEX),(o128) +ae: vfnmsub213pd/ps /r (66),(VEX) +af: vfnmsub213sd/ss /r (66),(VEX),(o128) +b6: vfmaddsub231pd/ps /r (66),(VEX) +b7: vfmsubadd231pd/ps /r (66),(VEX) +b8: vfmadd231pd/ps /r (66),(VEX) +b9: vfmadd231sd/ss /r (66),(VEX),(o128) +ba: vfmsub231pd/ps /r (66),(VEX) +bb: vfmsub231sd/ss /r (66),(VEX),(o128) +bc: vfnmadd231pd/ps /r (66),(VEX) +bd: vfnmadd231sd/ss /r (66),(VEX),(o128) +be: vfnmsub231pd/ps /r (66),(VEX) +bf: vfnmsub231sd/ss /r (66),(VEX),(o128) +# 0x0f 0x38 0xc0-0xff db: aesimc Vdq,Wdq (66),(VEX),(o128) dc: aesenc Vdq,Wdq (66),(VEX),(o128) dd: aesenclast Vdq,Wdq (66),(VEX),(o128) From dd004c475cd15a5749b04b0283d41ffdfa57d658 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:44 -0400 Subject: [PATCH 202/470] kprobe-tracer: Compare both of event-name and event-group to find probe Fix find_probe_event() to compare both of event-name and event-group. Without this fix, kprobe-tracer overwrites existing same event-name probe even if its group-name is different. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204244.30545.27516.stgit@harusame> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b8ef707c84d..a86c3ac0df2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -353,12 +353,14 @@ static void free_trace_probe(struct trace_probe *tp) kfree(tp); } -static struct trace_probe *find_probe_event(const char *event) +static struct trace_probe *find_probe_event(const char *event, + const char *group) { struct trace_probe *tp; list_for_each_entry(tp, &probe_list, list) - if (!strcmp(tp->call.name, event)) + if (strcmp(tp->call.name, event) == 0 && + strcmp(tp->call.system, group) == 0) return tp; return NULL; } @@ -383,7 +385,7 @@ static int register_trace_probe(struct trace_probe *tp) mutex_lock(&probe_lock); /* register as an event */ - old_tp = find_probe_event(tp->call.name); + old_tp = find_probe_event(tp->call.name, tp->call.system); if (old_tp) { /* delete old event */ unregister_trace_probe(old_tp); From 8030c5f5a57e018fcdeb1f395d7adc123b48ced6 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:42:53 -0400 Subject: [PATCH 203/470] perf/probes: Exit searching after finding target function Exit searching after finding real (not-inlined) function, because there should be no same symbol in that CU. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204252.30545.19251.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-finder.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 54e70718530..b98d35ef711 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -585,14 +585,14 @@ static int probefunc_callback(struct die_link *dlink, void *data) DIE_IF(ret != DW_DLV_OK); pr_debug("inline definition offset %lld\n", pf->inl_offs); - return 0; + return 0; /* Continue to search */ } /* Get probe address */ pf->addr = die_get_entrypc(dlink->die); pf->addr += pp->offset; /* TODO: Check the address in this function */ show_probepoint(dlink->die, pp->offset, pf); - /* Continue to search */ + return 1; /* Exit; no same symbol in this CU. */ } } else if (tag == DW_TAG_inlined_subroutine && pf->inl_offs) { if (die_get_abstract_origin(dlink->die) == pf->inl_offs) { From 46ab49267d338eb5056d0077e16346509b9e9284 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:43:02 -0400 Subject: [PATCH 204/470] perf/probes: Improve command-line option of perf-probe Change command-line option from -P to --add, and accepting probes without --add too. perf probe --add "probe-define" or, just: perf probe "probe-define" Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204301.30545.48600.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index dcb406c7f82..3370dabed15 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -65,8 +65,8 @@ static struct { #define semantic_error(msg ...) die("Semantic error :" msg) -static int parse_probepoint(const struct option *opt __used, - const char *str, int unset __used) +/* Parse a probe point. Note that any error must die. */ +static void parse_probepoint(const char *str) { char *argv[MAX_PROBE_ARGS + 2]; /* Event + probe + args */ int argc, i; @@ -75,9 +75,6 @@ static int parse_probepoint(const struct option *opt __used, char **event = &session.events[session.nr_probe]; int retp = 0; - if (!str) /* The end of probe points */ - return 0; - pr_debug("probe-definition(%d): %s\n", session.nr_probe, str); if (++session.nr_probe == MAX_PROBES) semantic_error("Too many probes"); @@ -176,6 +173,13 @@ static int parse_probepoint(const struct option *opt __used, } pr_debug("%d arguments\n", pp->nr_args); +} + +static int opt_add_probepoint(const struct option *opt __used, + const char *str, int unset __used) +{ + if (str) + parse_probepoint(str); return 0; } @@ -211,7 +215,8 @@ static int open_default_vmlinux(void) #endif static const char * const probe_usage[] = { - "perf probe [] -P 'PROBEDEF' [-P 'PROBEDEF' ...]", + "perf probe [] 'PROBEDEF' ['PROBEDEF' ...]", + "perf probe [] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", NULL }; @@ -222,7 +227,7 @@ static const struct option options[] = { OPT_STRING('k', "vmlinux", &session.vmlinux, "file", "vmlinux/module pathname"), #endif - OPT_CALLBACK('P', "probe", NULL, + OPT_CALLBACK('a', "add", NULL, #ifdef NO_LIBDWARF "p|r:[GRP/]NAME FUNC[+OFFS] [ARG ...]", #else @@ -243,7 +248,7 @@ static const struct option options[] = { "\t\tARG:\tProbe argument (local variable name or\n" #endif "\t\t\tkprobe-tracer argument format is supported.)\n", - parse_probepoint), + opt_add_probepoint), OPT_END() }; @@ -296,8 +301,11 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) char buf[MAX_CMDLEN]; argc = parse_options(argc, argv, options, probe_usage, - PARSE_OPT_STOP_AT_NON_OPTION); - if (argc || session.nr_probe == 0) + PARSE_OPT_STOP_AT_NON_OPTION); + for (i = 0; i < argc; i++) + parse_probe_event(argv[i]); + + if (session.nr_probe == 0) usage_with_options(probe_usage, options); #ifdef NO_LIBDWARF From 253977b0d87fbb793f12b1661a763ae264028ccf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:43:10 -0400 Subject: [PATCH 205/470] perf/probes: Improve probe point syntax of perf-probe This changes probe point syntax of perf-probe as below [:ABS_LN] [ARGS] or [+OFFS|%return][@SRC] [ARGS] And event name and event group name are automatically generated based on probe-symbol and offset as below. perfprobes/SYMBOL_OFFSET[_NUM] Where SYMBOL is the probing symbol and OFFSET is the byte offset from the symbol. Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204310.30545.84984.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 181 ++++++++++++++++++++------------- tools/perf/util/probe-finder.c | 10 ++ tools/perf/util/probe-finder.h | 2 + 3 files changed, 123 insertions(+), 70 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 3370dabed15..92b4c491f23 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -52,6 +52,7 @@ const char *default_search_path[NR_SEARCH_PATH] = { #define MAX_PATH_LEN 256 #define MAX_PROBES 128 #define MAX_PROBE_ARGS 128 +#define PERFPROBE_GROUP "perfprobe" /* Session management structure */ static struct { @@ -60,20 +61,100 @@ static struct { int need_dwarf; int nr_probe; struct probe_point probes[MAX_PROBES]; - char *events[MAX_PROBES]; } session; #define semantic_error(msg ...) die("Semantic error :" msg) -/* Parse a probe point. Note that any error must die. */ -static void parse_probepoint(const char *str) +/* Parse probe point. Return 1 if return probe */ +static void parse_probe_point(char *arg, struct probe_point *pp) +{ + char *ptr, *tmp; + char c, nc; + /* + * + * perf probe SRC:LN + * perf probe FUNC[+OFFS|%return][@SRC] + */ + + ptr = strpbrk(arg, ":+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + + /* Check arg is function or file and copy it */ + if (strchr(arg, '.')) /* File */ + pp->file = strdup(arg); + else /* Function */ + pp->function = strdup(arg); + DIE_IF(pp->file == NULL && pp->function == NULL); + + /* Parse other options */ + while (ptr) { + arg = ptr; + c = nc; + ptr = strpbrk(arg, ":+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + switch (c) { + case ':': /* Line number */ + pp->line = strtoul(arg, &tmp, 0); + if (*tmp != '\0') + semantic_error("There is non-digit charactor" + " in line number."); + break; + case '+': /* Byte offset from a symbol */ + pp->offset = strtoul(arg, &tmp, 0); + if (*tmp != '\0') + semantic_error("There is non-digit charactor" + " in offset."); + break; + case '@': /* File name */ + if (pp->file) + semantic_error("SRC@SRC is not allowed."); + pp->file = strdup(arg); + DIE_IF(pp->file == NULL); + if (ptr) + semantic_error("@SRC must be the last " + "option."); + break; + case '%': /* Probe places */ + if (strcmp(arg, "return") == 0) { + pp->retprobe = 1; + } else /* Others not supported yet */ + semantic_error("%%%s is not supported.", arg); + break; + default: + DIE_IF("Program has a bug."); + break; + } + } + + /* Exclusion check */ + if (pp->line && pp->function) + semantic_error("Function-relative line number is not" + " supported yet."); + if (!pp->line && pp->file && !pp->function) + semantic_error("File always requires line number."); + if (pp->offset && !pp->function) + semantic_error("Offset requires an entry function."); + if (pp->retprobe && !pp->function) + semantic_error("Return probe requires an entry function."); + if (pp->offset && pp->retprobe) + semantic_error("Offset can't be used with return probe."); + + pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", + pp->function, pp->file, pp->line, pp->offset, pp->retprobe); +} + +/* Parse an event definition. Note that any error must die. */ +static void parse_probe_event(const char *str) { char *argv[MAX_PROBE_ARGS + 2]; /* Event + probe + args */ int argc, i; - char *arg, *ptr; struct probe_point *pp = &session.probes[session.nr_probe]; - char **event = &session.events[session.nr_probe]; - int retp = 0; pr_debug("probe-definition(%d): %s\n", session.nr_probe, str); if (++session.nr_probe == MAX_PROBES) @@ -103,70 +184,28 @@ static void parse_probepoint(const char *str) pr_debug("argv[%d]=%s\n", argc, argv[argc - 1]); } } while (*str != '\0'); - if (argc < 2) - semantic_error("Need event-name and probe-point at least."); - - /* Parse the event name */ - if (argv[0][0] == 'r') - retp = 1; - else if (argv[0][0] != 'p') - semantic_error("You must specify 'p'(kprobe) or" - " 'r'(kretprobe) first."); - /* TODO: check event name */ - *event = argv[0]; + if (!argc) + semantic_error("An empty argument."); /* Parse probe point */ - arg = argv[1]; - if (arg[0] == '@') { - /* Source Line */ - arg++; - ptr = strchr(arg, ':'); - if (!ptr || !isdigit(ptr[1])) - semantic_error("Line number is required."); - *ptr++ = '\0'; - if (strlen(arg) == 0) - semantic_error("No file name."); - pp->file = strdup(arg); - pp->line = atoi(ptr); - if (!pp->file || !pp->line) - semantic_error("Failed to parse line."); - pr_debug("file:%s line:%d\n", pp->file, pp->line); - } else { - /* Function name */ - ptr = strchr(arg, '+'); - if (ptr) { - if (!isdigit(ptr[1])) - semantic_error("Offset is required."); - *ptr++ = '\0'; - pp->offset = atoi(ptr); - } else - ptr = arg; - ptr = strchr(ptr, '@'); - if (ptr) { - *ptr++ = '\0'; - pp->file = strdup(ptr); - } - pp->function = strdup(arg); - pr_debug("symbol:%s file:%s offset:%d\n", - pp->function, pp->file, pp->offset); - } - free(argv[1]); + parse_probe_point(argv[0], pp); + free(argv[0]); if (pp->file) session.need_dwarf = 1; /* Copy arguments */ - pp->nr_args = argc - 2; + pp->nr_args = argc - 1; if (pp->nr_args > 0) { pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); if (!pp->args) die("malloc"); - memcpy(pp->args, &argv[2], sizeof(char *) * pp->nr_args); + memcpy(pp->args, &argv[1], sizeof(char *) * pp->nr_args); } /* Ensure return probe has no C argument */ for (i = 0; i < pp->nr_args; i++) if (is_c_varname(pp->args[i])) { - if (retp) + if (pp->retprobe) semantic_error("You can't specify local" " variable for kretprobe"); session.need_dwarf = 1; @@ -175,11 +214,11 @@ static void parse_probepoint(const char *str) pr_debug("%d arguments\n", pp->nr_args); } -static int opt_add_probepoint(const struct option *opt __used, +static int opt_add_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) - parse_probepoint(str); + parse_probe_event(str); return 0; } @@ -229,17 +268,16 @@ static const struct option options[] = { #endif OPT_CALLBACK('a', "add", NULL, #ifdef NO_LIBDWARF - "p|r:[GRP/]NAME FUNC[+OFFS] [ARG ...]", + "FUNC[+OFFS|%return] [ARG ...]", #else - "p|r:[GRP/]NAME FUNC[+OFFS][@SRC]|@SRC:LINE [ARG ...]", + "FUNC[+OFFS|%return][@SRC]|SRC:LINE [ARG ...]", #endif "probe point definition, where\n" - "\t\tp:\tkprobe probe\n" - "\t\tr:\tkretprobe probe\n" "\t\tGRP:\tGroup name (optional)\n" "\t\tNAME:\tEvent name\n" "\t\tFUNC:\tFunction name\n" "\t\tOFFS:\tOffset from function entry (in byte)\n" + "\t\t%return:\tPut the probe at function return\n" #ifdef NO_LIBDWARF "\t\tARG:\tProbe argument (only \n" #else @@ -248,7 +286,7 @@ static const struct option options[] = { "\t\tARG:\tProbe argument (local variable name or\n" #endif "\t\t\tkprobe-tracer argument format is supported.)\n", - opt_add_probepoint), + opt_add_probe_event), OPT_END() }; @@ -266,7 +304,7 @@ static int write_new_event(int fd, const char *buf) #define MAX_CMDLEN 256 -static int synthesize_probepoint(struct probe_point *pp) +static int synthesize_probe_event(struct probe_point *pp) { char *buf; int i, len, ret; @@ -316,12 +354,12 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) /* Synthesize probes without dwarf */ for (j = 0; j < session.nr_probe; j++) { #ifndef NO_LIBDWARF - if (session.events[j][0] != 'r') { + if (!session.probes[j].retprobe) { session.need_dwarf = 1; continue; } #endif - ret = synthesize_probepoint(&session.probes[j]); + ret = synthesize_probe_event(&session.probes[j]); if (ret == -E2BIG) semantic_error("probe point is too long."); else if (ret < 0) @@ -349,7 +387,6 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) ret = find_probepoint(fd, pp); if (ret <= 0) die("No probe point found.\n"); - pr_debug("probe event %s found\n", session.events[j]); } close(fd); @@ -364,13 +401,17 @@ setup_probes: for (j = 0; j < session.nr_probe; j++) { pp = &session.probes[j]; if (pp->found == 1) { - snprintf(buf, MAX_CMDLEN, "%s %s\n", - session.events[j], pp->probes[0]); + snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x %s\n", + pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP, + pp->function, pp->offset, pp->probes[0]); write_new_event(fd, buf); } else for (i = 0; i < pp->found; i++) { - snprintf(buf, MAX_CMDLEN, "%s%d %s\n", - session.events[j], i, pp->probes[i]); + snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x_%d %s\n", + pp->retprobe ? 'r' : 'p', + PERFPROBE_GROUP, + pp->function, pp->offset, i, + pp->probes[0]); write_new_event(fd, buf); } } diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index b98d35ef711..6d3bac9f947 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -483,10 +483,20 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, if (ret == DW_DLV_OK) { ret = snprintf(tmp, MAX_PROBE_BUFFER, "%s+%u", name, (unsigned int)offs); + /* Copy the function name if possible */ + if (!pp->function) { + pp->function = strdup(name); + pp->offset = offs; + } dwarf_dealloc(__dw_debug, name, DW_DLA_STRING); } else { /* This function has no name. */ ret = snprintf(tmp, MAX_PROBE_BUFFER, "0x%llx", pf->addr); + if (!pp->function) { + /* TODO: Use _stext */ + pp->function = strdup(""); + pp->offset = (int)pf->addr; + } } DIE_IF(ret < 0); DIE_IF(ret >= MAX_PROBE_BUFFER); diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index d17fafc2135..240d6cb3cc2 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -22,6 +22,8 @@ struct probe_point { int nr_args; /* Number of arguments */ char **args; /* Arguments */ + int retprobe; /* Return probe */ + /* Output */ int found; /* Number of found probe points */ char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/ From b0ef07324310d66f660a311d4a8d669eda74f801 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 27 Oct 2009 16:43:19 -0400 Subject: [PATCH 206/470] perf/probes: Support function entry relative line number Add function-entry relative line number specifying support to perf-probe. This allows users to define probes by line number from entry of the function. e.g. perf probe schedule:16 Signed-off-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091027204319.30545.30678.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 14 +++--- tools/perf/util/probe-finder.c | 79 ++++++++++++++++++++++++++-------- tools/perf/util/probe-finder.h | 2 + 3 files changed, 70 insertions(+), 25 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 92b4c491f23..a99a366230a 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -133,17 +133,16 @@ static void parse_probe_point(char *arg, struct probe_point *pp) } /* Exclusion check */ - if (pp->line && pp->function) - semantic_error("Function-relative line number is not" - " supported yet."); + if (pp->line && pp->offset) + semantic_error("Offset can't be used with line number."); if (!pp->line && pp->file && !pp->function) semantic_error("File always requires line number."); if (pp->offset && !pp->function) semantic_error("Offset requires an entry function."); if (pp->retprobe && !pp->function) semantic_error("Return probe requires an entry function."); - if (pp->offset && pp->retprobe) - semantic_error("Offset can't be used with return probe."); + if ((pp->offset || pp->line) && pp->retprobe) + semantic_error("Offset/Line can't be used with return probe."); pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", pp->function, pp->file, pp->line, pp->offset, pp->retprobe); @@ -270,7 +269,7 @@ static const struct option options[] = { #ifdef NO_LIBDWARF "FUNC[+OFFS|%return] [ARG ...]", #else - "FUNC[+OFFS|%return][@SRC]|SRC:LINE [ARG ...]", + "FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]", #endif "probe point definition, where\n" "\t\tGRP:\tGroup name (optional)\n" @@ -282,7 +281,8 @@ static const struct option options[] = { "\t\tARG:\tProbe argument (only \n" #else "\t\tSRC:\tSource code path\n" - "\t\tLINE:\tLine number\n" + "\t\tRLN:\tRelative line number from function entry.\n" + "\t\tALN:\tAbsolute line number in file.\n" "\t\tARG:\tProbe argument (local variable name or\n" #endif "\t\t\tkprobe-tracer argument format is supported.)\n", diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 6d3bac9f947..db96186e02a 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -114,7 +114,7 @@ static int strtailcmp(const char *s1, const char *s2) } /* Find the fileno of the target file. */ -static Dwarf_Unsigned die_get_fileno(Dwarf_Die cu_die, const char *fname) +static Dwarf_Unsigned cu_find_fileno(Dwarf_Die cu_die, const char *fname) { Dwarf_Signed cnt, i; Dwarf_Unsigned found = 0; @@ -335,6 +335,36 @@ static int attr_get_locdesc(Dwarf_Attribute attr, Dwarf_Locdesc *desc, return ret; } +/* Get decl_file attribute value (file number) */ +static Dwarf_Unsigned die_get_decl_file(Dwarf_Die sp_die) +{ + Dwarf_Attribute attr; + Dwarf_Unsigned fno; + int ret; + + ret = dwarf_attr(sp_die, DW_AT_decl_file, &attr, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + dwarf_formudata(attr, &fno, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); + return fno; +} + +/* Get decl_line attribute value (line number) */ +static Dwarf_Unsigned die_get_decl_line(Dwarf_Die sp_die) +{ + Dwarf_Attribute attr; + Dwarf_Unsigned lno; + int ret; + + ret = dwarf_attr(sp_die, DW_AT_decl_line, &attr, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + dwarf_formudata(attr, &lno, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + dwarf_dealloc(__dw_debug, attr, DW_DLA_ATTR); + return lno; +} + /* * Probe finder related functions */ @@ -501,6 +531,7 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs, DIE_IF(ret < 0); DIE_IF(ret >= MAX_PROBE_BUFFER); len = ret; + pr_debug("Probe point found: %s\n", tmp); /* Find each argument */ get_current_frame_base(sp_die, pf); @@ -536,17 +567,16 @@ static int probeaddr_callback(struct die_link *dlink, void *data) } /* Find probe point from its line number */ -static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) +static void find_by_line(struct probe_finder *pf) { - struct probe_point *pp = pf->pp; - Dwarf_Signed cnt, i; + Dwarf_Signed cnt, i, clm; Dwarf_Line *lines; Dwarf_Unsigned lineno = 0; Dwarf_Addr addr; Dwarf_Unsigned fno; int ret; - ret = dwarf_srclines(cu_die, &lines, &cnt, &__dw_error); + ret = dwarf_srclines(pf->cu_die, &lines, &cnt, &__dw_error); DIE_IF(ret != DW_DLV_OK); for (i = 0; i < cnt; i++) { @@ -557,15 +587,20 @@ static void find_by_line(Dwarf_Die cu_die, struct probe_finder *pf) ret = dwarf_lineno(lines[i], &lineno, &__dw_error); DIE_IF(ret != DW_DLV_OK); - if (lineno != (Dwarf_Unsigned)pp->line) + if (lineno != pf->lno) continue; + ret = dwarf_lineoff(lines[i], &clm, &__dw_error); + DIE_IF(ret != DW_DLV_OK); + ret = dwarf_lineaddr(lines[i], &addr, &__dw_error); DIE_IF(ret != DW_DLV_OK); - pr_debug("Probe point found: 0x%llx\n", addr); + pr_debug("Probe line found: line[%d]:%u,%d addr:0x%llx\n", + (int)i, (unsigned)lineno, (int)clm, addr); pf->addr = addr; /* Search a real subprogram including this line, */ - ret = search_die_from_children(cu_die, probeaddr_callback, pf); + ret = search_die_from_children(pf->cu_die, + probeaddr_callback, pf); if (ret == 0) die("Probe point is not found in subprograms.\n"); /* Continuing, because target line might be inlined. */ @@ -587,6 +622,13 @@ static int probefunc_callback(struct die_link *dlink, void *data) DIE_IF(ret == DW_DLV_ERROR); if (tag == DW_TAG_subprogram) { if (die_compare_name(dlink->die, pp->function) == 0) { + if (pp->line) { /* Function relative line */ + pf->fno = die_get_decl_file(dlink->die); + pf->lno = die_get_decl_line(dlink->die) + + pp->line; + find_by_line(pf); + return 1; + } if (die_inlined_subprogram(dlink->die)) { /* Inlined function, save it. */ ret = dwarf_die_CU_offset(dlink->die, @@ -631,9 +673,9 @@ found: return 0; } -static void find_by_func(Dwarf_Die cu_die, struct probe_finder *pf) +static void find_by_func(struct probe_finder *pf) { - search_die_from_children(cu_die, probefunc_callback, pf); + search_die_from_children(pf->cu_die, probefunc_callback, pf); } /* Find a probe point */ @@ -641,7 +683,6 @@ int find_probepoint(int fd, struct probe_point *pp) { Dwarf_Half addr_size = 0; Dwarf_Unsigned next_cuh = 0; - Dwarf_Die cu_die = 0; int cu_number = 0, ret; struct probe_finder pf = {.pp = pp}; @@ -659,25 +700,27 @@ int find_probepoint(int fd, struct probe_point *pp) break; /* Get the DIE(Debugging Information Entry) of this CU */ - ret = dwarf_siblingof(__dw_debug, 0, &cu_die, &__dw_error); + ret = dwarf_siblingof(__dw_debug, 0, &pf.cu_die, &__dw_error); DIE_IF(ret != DW_DLV_OK); /* Check if target file is included. */ if (pp->file) - pf.fno = die_get_fileno(cu_die, pp->file); + pf.fno = cu_find_fileno(pf.cu_die, pp->file); if (!pp->file || pf.fno) { /* Save CU base address (for frame_base) */ - ret = dwarf_lowpc(cu_die, &pf.cu_base, &__dw_error); + ret = dwarf_lowpc(pf.cu_die, &pf.cu_base, &__dw_error); DIE_IF(ret == DW_DLV_ERROR); if (ret == DW_DLV_NO_ENTRY) pf.cu_base = 0; - if (pp->line) - find_by_line(cu_die, &pf); if (pp->function) - find_by_func(cu_die, &pf); + find_by_func(&pf); + else { + pf.lno = pp->line; + find_by_line(&pf); + } } - dwarf_dealloc(__dw_debug, cu_die, DW_DLA_DIE); + dwarf_dealloc(__dw_debug, pf.cu_die, DW_DLA_DIE); } ret = dwarf_finish(__dw_debug, &__dw_error); DIE_IF(ret != DW_DLV_OK); diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 240d6cb3cc2..bdebca6697d 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -41,7 +41,9 @@ struct probe_finder { /* For function searching */ Dwarf_Addr addr; /* Address */ Dwarf_Unsigned fno; /* File number */ + Dwarf_Unsigned lno; /* Line number */ Dwarf_Off inl_offs; /* Inline offset */ + Dwarf_Die cu_die; /* Current CU */ /* For variable searching */ Dwarf_Addr cu_base; /* Current CU base address */ From 3ed67776fc23061180896086a206a02be649dd26 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 28 Oct 2009 17:37:01 +0800 Subject: [PATCH 207/470] tracing/filters: Fix to make system filter work commit fce29d15b59245597f7f320db4a9f2be0f5fb512 ("tracing/filters: Refactor subsystem filter code") broke system filter accidentally. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Tom Zanussi LKML-Reference: <4AE810BD.3070009@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_events_filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 21d34757b95..50504cb228d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1230,12 +1230,12 @@ static int replace_system_preds(struct event_subsystem *system, struct filter_parse_state *ps, char *filter_string) { - struct event_filter *filter = system->filter; struct ftrace_event_call *call; bool fail = true; int err; list_for_each_entry(call, &ftrace_events, list) { + struct event_filter *filter = call->filter; if (!call->define_fields) continue; From afb7b4f08e274cecd8337f9444affa288a9cd4c1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 30 Oct 2009 16:28:23 -0200 Subject: [PATCH 208/470] perf tools: Factor out the map initialization Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256927305-4628-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.h | 2 ++ tools/perf/util/map.c | 28 ++++++++++++++++++---------- tools/perf/util/symbol.c | 12 +++--------- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 3064a05f0f5..4a158a01bb9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -105,6 +105,8 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); +void map__init(struct map *self, u64 start, u64 end, u64 pgoff, + struct dso *dso); struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, unsigned int sym_priv_size); struct map *map__clone(struct map *self); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index d302e513e06..3b7ce1bf9f8 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -20,6 +20,18 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) return n; } +void map__init(struct map *self, u64 start, u64 end, u64 pgoff, + struct dso *dso) +{ + self->start = start; + self->end = end; + self->pgoff = pgoff; + self->dso = dso; + self->map_ip = map__map_ip; + self->unmap_ip = map__unmap_ip; + RB_CLEAR_NODE(&self->rb_node); +} + struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, unsigned int sym_priv_size) { @@ -28,6 +40,7 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, if (self != NULL) { const char *filename = event->filename; char newfilename[PATH_MAX]; + struct dso *dso; int anon; if (cwd) { @@ -47,20 +60,15 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, filename = newfilename; } - self->start = event->start; - self->end = event->start + event->len; - self->pgoff = event->pgoff; - - self->dso = dsos__findnew(filename, sym_priv_size); - if (self->dso == NULL) + dso = dsos__findnew(filename, sym_priv_size); + if (dso == NULL) goto out_delete; + map__init(self, event->start, event->start + event->len, + event->pgoff, dso); + if (self->dso == vdso || anon) self->map_ip = self->unmap_ip = identity__map_ip; - else { - self->map_ip = map__map_ip; - self->unmap_ip = map__unmap_ip; - } } return self; out_delete: diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0273d83f728..13677b5dbe5 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1132,18 +1132,12 @@ static struct map *map__new2(u64 start, struct dso *dso) struct map *self = malloc(sizeof(*self)); if (self != NULL) { - self->start = start; /* - * Will be filled after we load all the symbols + * ->end will be filled after we load all the symbols */ - self->end = 0; - - self->pgoff = 0; - self->dso = dso; - self->map_ip = map__map_ip; - self->unmap_ip = map__unmap_ip; - RB_CLEAR_NODE(&self->rb_node); + map__init(self, start, 0, 0, dso); } + return self; } From 00a192b395b0606ad0265243844b3cd68e73420a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 30 Oct 2009 16:28:24 -0200 Subject: [PATCH 209/470] perf tools: Simplify the symbol priv area mechanism Before we were storing this in the DSO, but in fact this is a property of the 'symbol' class, not something that will vary among DSOs, so move it to a global variable and initialize it using the existing symbol__init routine. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256927305-4628-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 21 +++++----- tools/perf/builtin-report.c | 4 +- tools/perf/builtin-sched.c | 2 +- tools/perf/builtin-timechart.c | 2 +- tools/perf/builtin-top.c | 12 +++--- tools/perf/builtin-trace.c | 2 +- tools/perf/util/data_map.c | 2 +- tools/perf/util/event.h | 3 +- tools/perf/util/map.c | 5 +-- tools/perf/util/symbol.c | 73 ++++++++++++++++------------------ tools/perf/util/symbol.h | 24 +++++------ 11 files changed, 70 insertions(+), 80 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 8688bfee42a..77d50a6d680 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -55,11 +55,11 @@ struct sym_priv { static const char *sym_hist_filter; -static int symbol_filter(struct map *map, struct symbol *sym) +static int symbol_filter(struct map *map __used, struct symbol *sym) { if (sym_hist_filter == NULL || strcmp(sym->name, sym_hist_filter) == 0) { - struct sym_priv *priv = dso__sym_priv(map->dso, sym); + struct sym_priv *priv = symbol__priv(sym); const int size = (sizeof(*priv->hist) + (sym->end - sym->start) * sizeof(u64)); @@ -92,7 +92,7 @@ static void hist_hit(struct hist_entry *he, u64 ip) if (!sym || !he->map) return; - priv = dso__sym_priv(he->map->dso, sym); + priv = symbol__priv(sym); if (!priv->hist) return; @@ -202,8 +202,7 @@ got_map: static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, NULL, 0, - sizeof(struct sym_priv)); + struct map *map = map__new(&event->mmap, NULL, 0); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", @@ -355,7 +354,7 @@ static int parse_line(FILE *file, struct hist_entry *he, u64 len) unsigned int hits = 0; double percent = 0.0; const char *color; - struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_priv *priv = symbol__priv(sym); struct sym_ext *sym_ext = priv->ext; struct sym_hist *h = priv->hist; @@ -422,7 +421,7 @@ static void insert_source_line(struct sym_ext *sym_ext) static void free_source_line(struct hist_entry *he, int len) { - struct sym_priv *priv = dso__sym_priv(he->map->dso, he->sym); + struct sym_priv *priv = symbol__priv(he->sym); struct sym_ext *sym_ext = priv->ext; int i; @@ -446,7 +445,7 @@ get_source_line(struct hist_entry *he, int len, const char *filename) int i; char cmd[PATH_MAX * 2]; struct sym_ext *sym_ext; - struct sym_priv *priv = dso__sym_priv(he->map->dso, sym); + struct sym_priv *priv = symbol__priv(sym); struct sym_hist *h = priv->hist; if (!h->sum) @@ -589,7 +588,7 @@ static void find_annotations(void) if (he->sym == NULL) continue; - priv = dso__sym_priv(he->map->dso, he->sym); + priv = symbol__priv(he->sym); if (priv->hist == NULL) continue; @@ -637,7 +636,7 @@ static int __cmd_annotate(void) exit(0); } - if (load_kernel(sizeof(struct sym_priv), symbol_filter) < 0) { + if (load_kernel(symbol_filter) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } @@ -769,7 +768,7 @@ static void setup_sorting(void) int cmd_annotate(int argc, const char **argv, const char *prefix __used) { - symbol__init(); + symbol__init(sizeof(struct sym_priv)); page_size = getpagesize(); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f1bcd35bd22..1a806d5f05c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -751,7 +751,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) static int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen, 0); + struct map *map = map__new(&event->mmap, cwd, cwdlen); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", @@ -1093,7 +1093,7 @@ static void setup_list(struct strlist **list, const char *list_str, int cmd_report(int argc, const char **argv, const char *prefix __used) { - symbol__init(); + symbol__init(0); argc = parse_options(argc, argv, options, report_usage, 0); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 9a48d9626be..df44b756cec 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1937,7 +1937,7 @@ static int __cmd_record(int argc, const char **argv) int cmd_sched(int argc, const char **argv, const char *prefix __used) { - symbol__init(); + symbol__init(0); argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 0a2f22261c3..665877e4a94 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1266,7 +1266,7 @@ static const struct option options[] = { int cmd_timechart(int argc, const char **argv, const char *prefix __used) { - symbol__init(); + symbol__init(0); page_size = getpagesize(); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ee87640b335..2aea913f7eb 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -789,7 +789,7 @@ static int symbol_filter(struct map *map, struct symbol *sym) strstr(name, "_text_end")) return 1; - syme = dso__sym_priv(map->dso, sym); + syme = symbol__priv(sym); syme->map = map; pthread_mutex_init(&syme->source_lock, NULL); if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) @@ -807,8 +807,7 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { - if (dsos__load_kernel(vmlinux_name, sizeof(struct sym_entry), - symbol_filter, 1) <= 0) + if (dsos__load_kernel(vmlinux_name, symbol_filter, 1) <= 0) return -1; if (dump_symtab) @@ -859,7 +858,7 @@ static void event__process_sample(const event_t *self, int counter) return; } - syme = dso__sym_priv(map->dso, sym); + syme = symbol__priv(sym); if (!syme->skip) { syme->count[counter]++; @@ -878,8 +877,7 @@ static void event__process_mmap(event_t *self) struct thread *thread = threads__findnew(self->mmap.pid); if (thread != NULL) { - struct map *map = map__new(&self->mmap, NULL, 0, - sizeof(struct sym_entry)); + struct map *map = map__new(&self->mmap, NULL, 0); if (map != NULL) thread__insert_map(thread, map); } @@ -1176,7 +1174,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) { int counter; - symbol__init(); + symbol__init(sizeof(struct sym_entry)); page_size = sysconf(_SC_PAGE_SIZE); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e566bbe3f22..d042d656c56 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -151,7 +151,7 @@ static const struct option options[] = { int cmd_trace(int argc, const char **argv, const char *prefix __used) { - symbol__init(); + symbol__init(0); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 18accb8fee4..c458db9ede6 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -130,7 +130,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, if (curr_handler->sample_type_check(sample_type) < 0) exit(-1); - if (load_kernel(0, NULL) < 0) { + if (load_kernel(NULL) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 4a158a01bb9..0a443bea68d 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -107,8 +107,7 @@ typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); void map__init(struct map *self, u64 start, u64 end, u64 pgoff, struct dso *dso); -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size); +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 3b7ce1bf9f8..679011c1b6d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -32,8 +32,7 @@ void map__init(struct map *self, u64 start, u64 end, u64 pgoff, RB_CLEAR_NODE(&self->rb_node); } -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, - unsigned int sym_priv_size) +struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) { struct map *self = malloc(sizeof(*self)); @@ -60,7 +59,7 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen, filename = newfilename; } - dso = dsos__findnew(filename, sym_priv_size); + dso = dsos__findnew(filename); if (dso == NULL) goto out_delete; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 13677b5dbe5..cf2c7f77886 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -26,6 +26,7 @@ static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); static struct map *map__new2(u64 start, struct dso *dso); static void kernel_maps__insert(struct map *map); +unsigned int symbol__priv_size; static struct rb_root kernel_maps; @@ -75,18 +76,17 @@ static void kernel_maps__fixup_end(void) } } -static struct symbol *symbol__new(u64 start, u64 len, const char *name, - unsigned int priv_size) +static struct symbol *symbol__new(u64 start, u64 len, const char *name) { size_t namelen = strlen(name) + 1; - struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); - + struct symbol *self = calloc(1, (symbol__priv_size + + sizeof(*self) + namelen)); if (!self) return NULL; - if (priv_size) { - memset(self, 0, priv_size); - self = ((void *)self) + priv_size; + if (symbol__priv_size) { + memset(self, 0, symbol__priv_size); + self = ((void *)self) + symbol__priv_size; } self->start = start; self->end = len ? start + len - 1 : start; @@ -98,9 +98,9 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name, return self; } -static void symbol__delete(struct symbol *self, unsigned int priv_size) +static void symbol__delete(struct symbol *self) { - free(((void *)self) - priv_size); + free(((void *)self) - symbol__priv_size); } static size_t symbol__fprintf(struct symbol *self, FILE *fp) @@ -109,7 +109,7 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) self->start, self->end, self->name); } -struct dso *dso__new(const char *name, unsigned int sym_priv_size) +struct dso *dso__new(const char *name) { struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); @@ -118,7 +118,6 @@ struct dso *dso__new(const char *name, unsigned int sym_priv_size) self->long_name = self->name; self->short_name = self->name; self->syms = RB_ROOT; - self->sym_priv_size = sym_priv_size; self->find_symbol = dso__find_symbol; self->slen_calculated = 0; self->origin = DSO__ORIG_NOT_FOUND; @@ -136,7 +135,7 @@ static void dso__delete_symbols(struct dso *self) pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, &self->syms); - symbol__delete(pos, self->sym_priv_size); + symbol__delete(pos); } } @@ -250,8 +249,7 @@ static int kernel_maps__load_all_kallsyms(void) /* * Will fix up the end later, when we have all symbols sorted. */ - sym = symbol__new(start, 0, symbol_name, - kernel_map->dso->sym_priv_size); + sym = symbol__new(start, 0, symbol_name); if (sym == NULL) goto out_delete_line; @@ -317,8 +315,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) snprintf(dso_name, sizeof(dso_name), "[kernel].%d", kernel_range++); - dso = dso__new(dso_name, - kernel_map->dso->sym_priv_size); + dso = dso__new(dso_name); if (dso == NULL) return -1; @@ -336,7 +333,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) if (filter && filter(map, pos)) { delete_symbol: rb_erase(&pos->rb_node, &kernel_map->dso->syms); - symbol__delete(pos, kernel_map->dso->sym_priv_size); + symbol__delete(pos); } else { if (map != kernel_map) { rb_erase(&pos->rb_node, &kernel_map->dso->syms); @@ -417,14 +414,13 @@ static int dso__load_perf_map(struct dso *self, struct map *map, if (len + 2 >= line_len) continue; - sym = symbol__new(start, size, line + len, - self->sym_priv_size); + sym = symbol__new(start, size, line + len); if (sym == NULL) goto out_delete_line; if (filter && filter(map, sym)) - symbol__delete(sym, self->sym_priv_size); + symbol__delete(sym); else { dso__insert_symbol(self, sym); nr_syms++; @@ -616,7 +612,7 @@ static int dso__synthesize_plt_symbols(struct dso *self) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size); + sympltname); if (!f) goto out_elf_end; @@ -634,7 +630,7 @@ static int dso__synthesize_plt_symbols(struct dso *self) "%s@plt", elf_sym__name(&sym, symstrs)); f = symbol__new(plt_offset, shdr_plt.sh_entsize, - sympltname, self->sym_priv_size); + sympltname); if (!f) goto out_elf_end; @@ -769,7 +765,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (kmodule) start += map->start + shdr.sh_offset; - curr_dso = dso__new(dso_name, self->sym_priv_size); + curr_dso = dso__new(dso_name); if (curr_dso == NULL) goto out_elf_end; curr_map = map__new2(start, curr_dso); @@ -803,14 +799,13 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, if (demangled != NULL) elf_name = demangled; new_symbol: - f = symbol__new(sym.st_value, sym.st_size, elf_name, - curr_dso->sym_priv_size); + f = symbol__new(sym.st_value, sym.st_size, elf_name); free(demangled); if (!f) goto out_elf_end; if (filter && filter(curr_map, f)) - symbol__delete(f, curr_dso->sym_priv_size); + symbol__delete(f); else { dso__insert_symbol(curr_dso, f); nr++; @@ -1141,7 +1136,7 @@ static struct map *map__new2(u64 start, struct dso *dso) return self; } -static int dsos__load_modules(unsigned int sym_priv_size) +static int dsos__load_modules(void) { char *line = NULL; size_t n; @@ -1180,7 +1175,7 @@ static int dsos__load_modules(unsigned int sym_priv_size) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - dso = dso__new(name, sym_priv_size); + dso = dso__new(name); if (dso == NULL) goto out_delete_line; @@ -1224,11 +1219,11 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return err; } -int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int use_modules) +int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, + int use_modules) { int err = -1; - struct dso *dso = dso__new(vmlinux, sym_priv_size); + struct dso *dso = dso__new(vmlinux); if (dso == NULL) return -1; @@ -1240,7 +1235,7 @@ int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; - if (use_modules && dsos__load_modules(sym_priv_size) < 0) { + if (use_modules && dsos__load_modules() < 0) { pr_warning("Failed to load list of modules in use! " "Continuing...\n"); use_modules = 0; @@ -1312,12 +1307,12 @@ static struct dso *dsos__find(const char *name) return NULL; } -struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size) +struct dso *dsos__findnew(const char *name) { struct dso *dso = dsos__find(name); if (!dso) { - dso = dso__new(name, sym_priv_size); + dso = dso__new(name); if (dso != NULL) dsos__add(dso); } @@ -1333,13 +1328,12 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } -int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) +int load_kernel(symbol_filter_t filter) { - if (dsos__load_kernel(vmlinux_name, sym_priv_size, filter, - modules) <= 0) + if (dsos__load_kernel(vmlinux_name, filter, modules) <= 0) return -1; - vdso = dso__new("[vdso]", 0); + vdso = dso__new("[vdso]"); if (!vdso) return -1; @@ -1348,7 +1342,8 @@ int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter) return 0; } -void symbol__init(void) +void symbol__init(unsigned int priv_size) { elf_version(EV_CURRENT); + symbol__priv_size = priv_size; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 432edbca780..a471a384073 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -39,11 +39,17 @@ struct symbol { char name[0]; }; +extern unsigned int symbol__priv_size; + +static inline void *symbol__priv(struct symbol *self) +{ + return ((void *)self) - symbol__priv_size; +} + struct dso { struct list_head node; struct rb_root syms; struct symbol *(*find_symbol)(struct dso *, u64 ip); - unsigned int sym_priv_size; unsigned char adjust_symbols; unsigned char slen_calculated; bool loaded; @@ -53,28 +59,22 @@ struct dso { char name[0]; }; -struct dso *dso__new(const char *name, unsigned int sym_priv_size); +struct dso *dso__new(const char *name); void dso__delete(struct dso *self); -static inline void *dso__sym_priv(struct dso *self, struct symbol *sym) -{ - return ((void *)sym) - self->sym_priv_size; -} - struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dsos__load_kernel(const char *vmlinux, unsigned int sym_priv_size, - symbol_filter_t filter, int modules); -struct dso *dsos__findnew(const char *name, unsigned int sym_priv_size); +int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, int modules); +struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); -int load_kernel(unsigned int sym_priv_size, symbol_filter_t filter); +int load_kernel(symbol_filter_t filter); -void symbol__init(void); +void symbol__init(unsigned int priv_size); extern struct list_head dsos; extern struct map *kernel_map; From d70a5402f9c2e2671b809363616b3508b4c5a565 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 30 Oct 2009 16:28:25 -0200 Subject: [PATCH 210/470] perf tools: Improve message about missing symtabs for deleted DSOs Instead of: no symbols found in /usr/lib/gstreamer-0.10/libgsttypefindfunctions.so (deleted), maybe install a debug package? no symbols found in /usr/lib/gstreamer-0.10/libgstaudioconvert.so (deleted), maybe install a debug package? We now emit: /usr/lib/gstreamer-0.10/libgsttypefindfunctions.so was updated, restart the long running apps that use it! /usr/lib/gstreamer-0.10/libgstaudioconvert.so was updated, restart the long running apps that use it! Which is far less misleading about what the cause of the symbol mismatch is. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256927305-4628-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/map.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 679011c1b6d..f1e21695542 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -75,6 +75,8 @@ out_delete: return NULL; } +#define DSO__DELETED "(deleted)" + struct symbol * map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) { @@ -86,8 +88,18 @@ map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) self->dso->long_name); return NULL; } else if (nr == 0) { - pr_warning("No symbols found in %s, maybe install a debug package?\n", - self->dso->long_name); + const char *name = self->dso->long_name; + const size_t len = strlen(name); + const size_t real_len = len - sizeof(DSO__DELETED); + + if (len > sizeof(DSO__DELETED) && + strcmp(name + real_len + 1, DSO__DELETED) == 0) + pr_warning("%.*s was updated, restart the " + "long running apps that use it!\n", + real_len, name); + else + pr_warning("no symbols found in %s, maybe " + "install a debug package?\n", name); return NULL; } } From 900b20d5900045fb9b48f2fb3d80cbdbae3f44c0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 2 Nov 2009 19:25:25 +0100 Subject: [PATCH 211/470] perf tools: Fix missing symtabs printouts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: util/map.c: In function ‘map__find_symbol’: util/map.c:97: error: field precision should have type ‘int’, but argument 3 has type ‘size_t’ Also clean up some line wrap damage - we dont line-wrap printk messages. Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1256927305-4628-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/map.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index f1e21695542..33f868420d7 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -93,13 +93,12 @@ map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) const size_t real_len = len - sizeof(DSO__DELETED); if (len > sizeof(DSO__DELETED) && - strcmp(name + real_len + 1, DSO__DELETED) == 0) - pr_warning("%.*s was updated, restart the " - "long running apps that use it!\n", - real_len, name); - else - pr_warning("no symbols found in %s, maybe " - "install a debug package?\n", name); + strcmp(name + real_len + 1, DSO__DELETED) == 0) { + pr_warning("%.*s was updated, restart the long running apps that use it!\n", + (int)real_len, name); + } else { + pr_warning("no symbols found in %s, maybe install a debug package?\n", name); + } return NULL; } } From 12e4db4790b1bd2b7ec70eb2a1386c00fc683740 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 3 Nov 2009 11:29:07 -0200 Subject: [PATCH 212/470] perf probe: Annotate variable initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Annotate away this false positive warning on older GCCs: cc1: warnings being treated as errors builtin-probe.c: In function ‘parse_probe_event’: builtin-probe.c:72: warning: ‘nc’ is used uninitialized in this function Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Masami Hiramatsu LKML-Reference: <1257254947-16789-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index a99a366230a..81245238e34 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -69,7 +69,7 @@ static struct { static void parse_probe_point(char *arg, struct probe_point *pp) { char *ptr, *tmp; - char c, nc; + char c, nc = 0; /* * * perf probe SRC:LN From fb0459d75c1d0a4ba3cafdd2c754e7486968a676 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Fri, 25 Sep 2009 12:25:56 +0200 Subject: [PATCH 213/470] perf/core: Provide a kernel-internal interface to get to performance counters There are reasons for kernel code to ask for, and use, performance counters. For example, in CPU freq governors this tends to be a good idea, but there are other examples possible as well of course. This patch adds the needed bits to do enable this functionality; they have been tested in an experimental cpufreq driver that I'm working on, and the changes are all that I needed to access counters properly. [fweisbec@gmail.com: added pid to perf_event_create_kernel_counter so that we can profile a particular task too TODO: Have a better error reporting, don't just return NULL in fail case.] v2: Remove the wrong comment about the fact perf_event_create_kernel_counter must be called from a kernel thread. Signed-off-by: Arjan van de Ven Acked-by: Peter Zijlstra Cc: "K.Prasad" Cc: Alan Stern Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Ingo Molnar Cc: Jan Kiszka Cc: Jiri Slaby Cc: Li Zefan Cc: Avi Kivity Cc: Paul Mackerras Cc: Mike Galbraith Cc: Masami Hiramatsu Cc: Paul Mundt Cc: Jan Kiszka Cc: Avi Kivity LKML-Reference: <20090925122556.2f8bd939@infradead.org> Signed-off-by: Frederic Weisbecker --- include/linux/perf_event.h | 6 +++ kernel/perf_event.c | 75 +++++++++++++++++++++++++++++++++++++- 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index df9d964c15f..fa151d49a2e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -744,6 +744,12 @@ extern int hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, struct perf_event_context *ctx, int cpu); extern void perf_event_update_userpage(struct perf_event *event); +extern int perf_event_release_kernel(struct perf_event *event); +extern struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, + int cpu, + pid_t pid); +extern u64 perf_event_read_value(struct perf_event *event); struct perf_sample_data { u64 type; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 12b5ec39bf9..02d4ff041b0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1725,6 +1725,26 @@ static int perf_release(struct inode *inode, struct file *file) return 0; } +int perf_event_release_kernel(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + perf_event_remove_from_context(event); + mutex_unlock(&ctx->mutex); + + mutex_lock(&event->owner->perf_event_mutex); + list_del_init(&event->owner_entry); + mutex_unlock(&event->owner->perf_event_mutex); + put_task_struct(event->owner); + + free_event(event); + + return 0; +} +EXPORT_SYMBOL_GPL(perf_event_release_kernel); + static int perf_event_read_size(struct perf_event *event) { int entry = sizeof(u64); /* value */ @@ -1750,7 +1770,7 @@ static int perf_event_read_size(struct perf_event *event) return size; } -static u64 perf_event_read_value(struct perf_event *event) +u64 perf_event_read_value(struct perf_event *event) { struct perf_event *child; u64 total = 0; @@ -1761,6 +1781,7 @@ static u64 perf_event_read_value(struct perf_event *event) return total; } +EXPORT_SYMBOL_GPL(perf_event_read_value); static int perf_event_read_entry(struct perf_event *event, u64 read_format, char __user *buf) @@ -4638,6 +4659,58 @@ err_put_context: return err; } +/** + * perf_event_create_kernel_counter + * + * @attr: attributes of the counter to create + * @cpu: cpu in which the counter is bound + * @pid: task to profile + */ +struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, + pid_t pid) +{ + struct perf_event *event; + struct perf_event_context *ctx; + int err; + + /* + * Get the target context (task or percpu): + */ + + ctx = find_get_context(pid, cpu); + if (IS_ERR(ctx)) + return NULL ; + + event = perf_event_alloc(attr, cpu, ctx, NULL, + NULL, GFP_KERNEL); + err = PTR_ERR(event); + if (IS_ERR(event)) + goto err_put_context; + + event->filp = NULL; + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + perf_install_in_context(ctx, event, cpu); + ++ctx->generation; + mutex_unlock(&ctx->mutex); + + event->owner = current; + get_task_struct(current); + mutex_lock(¤t->perf_event_mutex); + list_add_tail(&event->owner_entry, ¤t->perf_event_list); + mutex_unlock(¤t->perf_event_mutex); + + return event; + +err_put_context: + if (err < 0) + put_ctx(ctx); + + return NULL; +} +EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); + /* * inherit a event from parent task to child task: */ From 41a48d14f6991020c9bb6b93e289ca5b411ed09a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 5 Oct 2009 19:23:06 +0900 Subject: [PATCH 214/470] x86/hw-breakpoints: Actually flush thread breakpoints in flush_thread(). flush_thread() tries to do a TIF_DEBUG check before calling in to flush_thread_hw_breakpoint() (which subsequently clears the thread flag), but for some reason, the x86 code is manually clearing TIF_DEBUG immediately before the test, so this path will never be taken. This kills off the erroneous clear_tsk_thread_flag() and lets flush_thread_hw_breakpoint() actually get invoked. Presumably folks were getting lucky with testing and the free_thread_info() -> free_thread_xstate() path was taking care of the flush there. Signed-off-by: Paul Mundt Acked-by: "K.Prasad" Cc: Ingo Molnar Cc: Alan Stern LKML-Reference: <20091005102306.GA7889@linux-sh.org> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/process.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 2275ce5776d..cf8ee001630 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -107,8 +107,6 @@ void flush_thread(void) } #endif - clear_tsk_thread_flag(tsk, TIF_DEBUG); - if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) flush_thread_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); From c1e530178540df26eb39f10a972d06f96302ceb4 Mon Sep 17 00:00:00 2001 From: Thiago Farina Date: Tue, 3 Nov 2009 08:28:45 -0500 Subject: [PATCH 215/470] perf: Clean up trivial style issues in builtin-help.c Pointed out by checkpatch. Signed-off-by: Thiago Farina Cc: a.p.zijlstra@chello.nl Cc: paulus@samba.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <1257254925-5423-1-git-send-email-tfransosi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-help.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 4fb8734a796..768f9c82631 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -61,8 +61,7 @@ static const char *get_man_viewer_info(const char *name) { struct man_viewer_info_list *viewer; - for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) - { + for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) { if (!strcasecmp(name, viewer->name)) return viewer->info; } @@ -115,7 +114,7 @@ static int check_emacsclient_version(void) return 0; } -static void exec_woman_emacs(const char* path, const char *page) +static void exec_woman_emacs(const char *path, const char *page) { if (!check_emacsclient_version()) { /* This works only with emacsclient version >= 22. */ @@ -129,7 +128,7 @@ static void exec_woman_emacs(const char* path, const char *page) } } -static void exec_man_konqueror(const char* path, const char *page) +static void exec_man_konqueror(const char *path, const char *page) { const char *display = getenv("DISPLAY"); if (display && *display) { @@ -157,7 +156,7 @@ static void exec_man_konqueror(const char* path, const char *page) } } -static void exec_man_man(const char* path, const char *page) +static void exec_man_man(const char *path, const char *page) { if (!path) path = "man"; @@ -364,9 +363,8 @@ static void show_man_page(const char *perf_cmd) setup_man_path(); for (viewer = man_viewer_list; viewer; viewer = viewer->next) - { exec_viewer(viewer->name, page); /* will return when unable */ - } + if (fallback) exec_viewer(fallback, page); exec_viewer("man", page); From 6d7aa9d721c8c640066142fd9534afcdf68d7f9d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 3 Nov 2009 15:52:18 -0200 Subject: [PATCH 216/470] perf symbols: Initialize dso->loaded Brown paper bag bug introduced in: 66bd8424cc05e800db384053bf7ab967e4658468 ("perf tools: Delay loading symtabs till we hit a map with it") Without this we were not loading any symtabs that happened to be on a DSO for which the allocated memory for ->loaded was !0. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1257270738-5669-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cf2c7f77886..93c49f4685f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -121,6 +121,7 @@ struct dso *dso__new(const char *name) self->find_symbol = dso__find_symbol; self->slen_calculated = 0; self->origin = DSO__ORIG_NOT_FOUND; + self->loaded = false; } return self; From 97eaf5300b9d0cd99c310bf8c4a0f2f3296d88a3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 18 Oct 2009 15:33:50 +0200 Subject: [PATCH 217/470] perf/core: Add a callback to perf events A simple callback in a perf event can be used for multiple purposes. For example it is useful for triggered based events like hardware breakpoints that need a callback to dispatch a triggered breakpoint event. v2: Simplify a bit the callback attribution as suggested by Paul Mackerras Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: "K.Prasad" Cc: Alan Stern Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Ingo Molnar Cc: Paul Mackerras Cc: Mike Galbraith Cc: Paul Mundt --- include/linux/perf_event.h | 7 ++++++- kernel/perf_event.c | 14 ++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fa151d49a2e..8d54e6d25ee 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -544,6 +544,8 @@ struct perf_pending_entry { void (*func)(struct perf_pending_entry *); }; +typedef void (*perf_callback_t)(struct perf_event *, void *); + /** * struct perf_event - performance event kernel representation: */ @@ -639,6 +641,8 @@ struct perf_event { struct event_filter *filter; #endif + perf_callback_t callback; + #endif /* CONFIG_PERF_EVENTS */ }; @@ -748,7 +752,8 @@ extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - pid_t pid); + pid_t pid, + perf_callback_t callback); extern u64 perf_event_read_value(struct perf_event *event); struct perf_sample_data { diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 02d4ff041b0..5087125e2a0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4293,6 +4293,7 @@ perf_event_alloc(struct perf_event_attr *attr, struct perf_event_context *ctx, struct perf_event *group_leader, struct perf_event *parent_event, + perf_callback_t callback, gfp_t gfpflags) { const struct pmu *pmu; @@ -4335,6 +4336,11 @@ perf_event_alloc(struct perf_event_attr *attr, event->state = PERF_EVENT_STATE_INACTIVE; + if (!callback && parent_event) + callback = parent_event->callback; + + event->callback = callback; + if (attr->disabled) event->state = PERF_EVENT_STATE_OFF; @@ -4611,7 +4617,7 @@ SYSCALL_DEFINE5(perf_event_open, } event = perf_event_alloc(&attr, cpu, ctx, group_leader, - NULL, GFP_KERNEL); + NULL, NULL, GFP_KERNEL); err = PTR_ERR(event); if (IS_ERR(event)) goto err_put_context; @@ -4668,7 +4674,7 @@ err_put_context: */ struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - pid_t pid) + pid_t pid, perf_callback_t callback) { struct perf_event *event; struct perf_event_context *ctx; @@ -4683,7 +4689,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, return NULL ; event = perf_event_alloc(attr, cpu, ctx, NULL, - NULL, GFP_KERNEL); + NULL, callback, GFP_KERNEL); err = PTR_ERR(event); if (IS_ERR(event)) goto err_put_context; @@ -4736,7 +4742,7 @@ inherit_event(struct perf_event *parent_event, child_event = perf_event_alloc(&parent_event->attr, parent_event->cpu, child_ctx, group_leader, parent_event, - GFP_KERNEL); + NULL, GFP_KERNEL); if (IS_ERR(child_event)) return child_event; get_ctx(child_ctx); From 2643ce11457a99a85c5bed8dd631e35968e6ca5a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 3 Nov 2009 21:46:10 -0200 Subject: [PATCH 218/470] perf symbols: Factor out buildid reading routine So that we can run it without having a DSO instance. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith LKML-Reference: <1257291970-8208-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 51 ++++++++++++++++++++++++++-------------- tools/perf/util/symbol.h | 2 ++ 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index ac94d7b94f6..e7c7cdb851c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -825,27 +825,27 @@ out_close: return err; } -#define BUILD_ID_SIZE 128 +#define BUILD_ID_SIZE 20 -static char *dso__read_build_id(struct dso *self) +int filename__read_build_id(const char *filename, void *bf, size_t size) { - int i; + int fd, err = -1; GElf_Ehdr ehdr; GElf_Shdr shdr; Elf_Data *build_id_data; Elf_Scn *sec; - char *build_id = NULL, *bid; - unsigned char *raw; Elf *elf; - int fd = open(self->long_name, O_RDONLY); + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); if (fd < 0) goto out; elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); if (elf == NULL) { - pr_err("%s: cannot read %s ELF file.\n", __func__, - self->long_name); + pr_err("%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } @@ -854,29 +854,46 @@ static char *dso__read_build_id(struct dso *self) goto out_elf_end; } - sec = elf_section_by_name(elf, &ehdr, &shdr, ".note.gnu.build-id", NULL); + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note.gnu.build-id", NULL); if (sec == NULL) goto out_elf_end; build_id_data = elf_getdata(sec, NULL); if (build_id_data == NULL) goto out_elf_end; - build_id = malloc(BUILD_ID_SIZE); + memcpy(bf, build_id_data->d_buf + 16, BUILD_ID_SIZE); + err = BUILD_ID_SIZE; +out_elf_end: + elf_end(elf); +out_close: + close(fd); +out: + return err; +} + +static char *dso__read_build_id(struct dso *self) +{ + int i, len; + char *build_id = NULL, *bid; + unsigned char rawbf[BUILD_ID_SIZE], *raw; + + len = filename__read_build_id(self->long_name, rawbf, sizeof(rawbf)); + if (len < 0) + goto out; + + build_id = malloc(len * 2 + 1); if (build_id == NULL) - goto out_elf_end; - raw = build_id_data->d_buf + 16; + goto out; bid = build_id; - for (i = 0; i < 20; ++i) { + raw = rawbf; + for (i = 0; i < len; ++i) { sprintf(bid, "%02x", *raw); ++raw; bid += 2; } pr_debug2("%s(%s): %s\n", __func__, self->long_name, build_id); -out_elf_end: - elf_end(elf); -out_close: - close(fd); out: return build_id; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 088433062dd..e0d4a583f8d 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -82,6 +82,8 @@ void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); +int filename__read_build_id(const char *filename, void *bf, size_t size); + int load_kernel(symbol_filter_t filter); void symbol__init(unsigned int priv_size); From c43f9d1e61e265c6bfafdd65c7f07c8d71a7efc3 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2009 19:12:13 -0500 Subject: [PATCH 219/470] perf/probes: Update Documentation/perf-probe.txt Update Documentation/perf-probe.txt accoding to recent syntax changes. Signed-off-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091104001212.3454.19415.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-probe.txt | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index 6b6c6aecdf1..9270594e6df 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -8,7 +8,9 @@ perf-probe - Define new dynamic tracepoints SYNOPSIS -------- [verse] -'perf probe' [-k ] -P 'PROBE' [-P 'PROBE' ...] +'perf probe' [options] --add 'PROBE' [--add 'PROBE' ...] +or +'perf probe' [options] 'PROBE' ['PROBE' ...] DESCRIPTION @@ -21,26 +23,25 @@ and C local variables) with debuginfo. OPTIONS ------- -k:: ---vmlinux:: +--vmlinux=PATH:: Specify vmlinux path which has debuginfo (Dwarf binary). -v:: --verbose:: Be more verbose (show parsed arguments, etc). --P:: ---probe:: +-a:: +--add:: Define a probe point (see PROBE SYNTAX for detail) PROBE SYNTAX ------------ Probe points are defined by following syntax. - "TYPE:[GRP/]NAME FUNC[+OFFS][@SRC]|@SRC:LINE [ARG ...]" + "FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]" -'TYPE' specifies the type of probe point, you can use either "p" (kprobe) or "r" (kretprobe) for 'TYPE'. 'GRP' specifies the group name of this probe, and 'NAME' specifies the event name. If 'GRP' is omitted, "kprobes" is used for its group name. -'FUNC' and 'OFFS' specifies function and offset (in byte) where probe will be put. In addition, 'SRC' specifies a source file which has that function (this is mainly for inline functions). -You can specify a probe point by the source line number by using '@SRC:LINE' syntax, where 'SRC' is the source file path and 'LINE' is the line number. +'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function. +It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number. 'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc). SEE ALSO From a7f4328b91fb6e71dbe1fa4d46f3597c9555014d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2009 19:12:21 -0500 Subject: [PATCH 220/470] perf/probes: Improve error messages Improve error messages in perf-probe so that users can figure out problems easily. Reported-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091104001221.3454.52030.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 20 +++++++++++++------- tools/perf/util/probe-finder.c | 2 +- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 81245238e34..65bcaed0ef4 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -294,10 +294,11 @@ static int write_new_event(int fd, const char *buf) { int ret; - printf("Adding new event: %s\n", buf); ret = write(fd, buf, strlen(buf)); if (ret <= 0) - die("failed to create event."); + die("Failed to create event."); + else + printf("Added new event: %s\n", buf); return ret; } @@ -310,7 +311,7 @@ static int synthesize_probe_event(struct probe_point *pp) int i, len, ret; pp->probes[0] = buf = (char *)calloc(MAX_CMDLEN, sizeof(char)); if (!buf) - die("calloc"); + die("Failed to allocate memory by calloc."); ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); if (ret <= 0 || ret >= MAX_CMDLEN) goto error; @@ -363,7 +364,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (ret == -E2BIG) semantic_error("probe point is too long."); else if (ret < 0) - die("snprintf"); + die("Failed to synthesize a probe point."); } #ifndef NO_LIBDWARF @@ -375,7 +376,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) else fd = open_default_vmlinux(); if (fd < 0) - die("vmlinux/module file open"); + die("Could not open vmlinux/module file."); /* Searching probe points */ for (j = 0; j < session.nr_probe; j++) { @@ -396,8 +397,13 @@ setup_probes: /* Settng up probe points */ snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); fd = open(buf, O_WRONLY, O_APPEND); - if (fd < 0) - die("kprobe_events open"); + if (fd < 0) { + if (errno == ENOENT) + die("kprobe_events file does not exist - please rebuild with CONFIG_KPROBE_TRACER."); + else + die("Could not open kprobe_events file: %s", + strerror(errno)); + } for (j = 0; j < session.nr_probe; j++) { pp = &session.probes[j]; if (pp->found == 1) { diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index db96186e02a..35d5a69aaf9 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -688,7 +688,7 @@ int find_probepoint(int fd, struct probe_point *pp) ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); if (ret != DW_DLV_OK) - die("Failed to call dwarf_init(). Maybe, not a dwarf file.\n"); + die("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); pp->found = 0; while (++cu_number) { From a225a1d911f0e434dc0407df29fd08e4388f3fa4 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2009 19:12:30 -0500 Subject: [PATCH 221/470] perf/probes: Fall back to non-dwarf if possible Fall back to non-dwarf probe point if the probe definition may not need dwarf analysis, when perf can't find vmlinux/debuginfo. This might skip some inlined code of target function. Signed-off-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091104001229.3454.63987.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 64 +++++++++++++++++++--------------- tools/perf/util/probe-finder.c | 6 ++-- 2 files changed, 40 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 65bcaed0ef4..d111a93f220 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -189,7 +189,7 @@ static void parse_probe_event(const char *str) /* Parse probe point */ parse_probe_point(argv[0], pp); free(argv[0]); - if (pp->file) + if (pp->file || pp->line) session.need_dwarf = 1; /* Copy arguments */ @@ -347,36 +347,24 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (session.nr_probe == 0) usage_with_options(probe_usage, options); -#ifdef NO_LIBDWARF if (session.need_dwarf) - semantic_error("Dwarf-analysis is not supported"); -#endif - - /* Synthesize probes without dwarf */ - for (j = 0; j < session.nr_probe; j++) { -#ifndef NO_LIBDWARF - if (!session.probes[j].retprobe) { - session.need_dwarf = 1; - continue; - } -#endif - ret = synthesize_probe_event(&session.probes[j]); - if (ret == -E2BIG) - semantic_error("probe point is too long."); - else if (ret < 0) - die("Failed to synthesize a probe point."); - } - -#ifndef NO_LIBDWARF - if (!session.need_dwarf) - goto setup_probes; +#ifdef NO_LIBDWARF + semantic_error("Debuginfo-analysis is not supported"); +#else /* !NO_LIBDWARF */ + pr_info("Some probes require debuginfo.\n"); if (session.vmlinux) fd = open(session.vmlinux, O_RDONLY); else fd = open_default_vmlinux(); - if (fd < 0) - die("Could not open vmlinux/module file."); + if (fd < 0) { + if (session.need_dwarf) + die("Could not open vmlinux/module file."); + + pr_warning("Could not open vmlinux/module file." + " Try to use symbols.\n"); + goto end_dwarf; + } /* Searching probe points */ for (j = 0; j < session.nr_probe; j++) { @@ -386,14 +374,34 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) lseek(fd, SEEK_SET, 0); ret = find_probepoint(fd, pp); - if (ret <= 0) - die("No probe point found.\n"); + if (ret < 0) { + if (session.need_dwarf) + die("Could not analyze debuginfo."); + + pr_warning("An error occurred in debuginfo analysis. Try to use symbols.\n"); + break; + } + if (ret == 0) /* No error but failed to find probe point. */ + die("No probe point found."); } close(fd); -setup_probes: +end_dwarf: #endif /* !NO_LIBDWARF */ + /* Synthesize probes without dwarf */ + for (j = 0; j < session.nr_probe; j++) { + pp = &session.probes[j]; + if (pp->found) /* This probe is already found. */ + continue; + + ret = synthesize_probe_event(pp); + if (ret == -E2BIG) + semantic_error("probe point is too long."); + else if (ret < 0) + die("Failed to synthesize a probe point."); + } + /* Settng up probe points */ snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); fd = open(buf, O_WRONLY, O_APPEND); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 35d5a69aaf9..293cdfc1b8c 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -687,8 +687,10 @@ int find_probepoint(int fd, struct probe_point *pp) struct probe_finder pf = {.pp = pp}; ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error); - if (ret != DW_DLV_OK) - die("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); + if (ret != DW_DLV_OK) { + pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n"); + return -ENOENT; + } pp->found = 0; while (++cu_number) { From 91365bbe4f8c39a821f390f785d606304d6dee3c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2009 19:12:38 -0500 Subject: [PATCH 222/470] perf/probes: Rename perf probe events group name Rename the group name of perf probe events to 'probe'. Signed-off-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091104001238.3454.70508.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index d111a93f220..d78a3d94549 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -52,7 +52,7 @@ const char *default_search_path[NR_SEARCH_PATH] = { #define MAX_PATH_LEN 256 #define MAX_PROBES 128 #define MAX_PROBE_ARGS 128 -#define PERFPROBE_GROUP "perfprobe" +#define PERFPROBE_GROUP "probe" /* Session management structure */ static struct { From 77b44d1b7c28360910cdbd427fb62d485c08674c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2009 19:12:47 -0500 Subject: [PATCH 223/470] tracing/kprobes: Rename Kprobe-tracer to kprobe-event Rename Kprobes-based event tracer to kprobes-based tracing event (kprobe-event), since it is not a tracer but an extensible tracing event interface. This also changes CONFIG_KPROBE_TRACER to CONFIG_KPROBE_EVENT and sets it y by default. Signed-off-by: Masami Hiramatsu Acked-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju LKML-Reference: <20091104001247.3454.14131.stgit@harusame> Signed-off-by: Ingo Molnar --- Documentation/trace/kprobetrace.txt | 34 ++++++++++++++--------------- kernel/trace/Kconfig | 19 ++++++++++------ kernel/trace/Makefile | 2 +- kernel/trace/trace_kprobe.c | 6 ++--- 4 files changed, 31 insertions(+), 30 deletions(-) diff --git a/Documentation/trace/kprobetrace.txt b/Documentation/trace/kprobetrace.txt index 15415243a9a..47aabeebbdf 100644 --- a/Documentation/trace/kprobetrace.txt +++ b/Documentation/trace/kprobetrace.txt @@ -1,26 +1,23 @@ - Kprobe-based Event Tracer - ========================= + Kprobe-based Event Tracing + ========================== Documentation is written by Masami Hiramatsu Overview -------- -This tracer is similar to the events tracer which is based on Tracepoint -infrastructure. Instead of Tracepoint, this tracer is based on kprobes(kprobe -and kretprobe). It probes anywhere where kprobes can probe(this means, all -functions body except for __kprobes functions). +These events are similar to tracepoint based events. Instead of Tracepoint, +this is based on kprobes (kprobe and kretprobe). So it can probe wherever +kprobes can probe (this means, all functions body except for __kprobes +functions). Unlike the Tracepoint based event, this can be added and removed +dynamically, on the fly. -Unlike the function tracer, this tracer can probe instructions inside of -kernel functions. It allows you to check which instruction has been executed. +To enable this feature, build your kernel with CONFIG_KPROBE_TRACING=y. -Unlike the Tracepoint based events tracer, this tracer can add and remove -probe points on the fly. - -Similar to the events tracer, this tracer doesn't need to be activated via -current_tracer, instead of that, just set probe points via -/sys/kernel/debug/tracing/kprobe_events. And you can set filters on each -probe events via /sys/kernel/debug/tracing/events/kprobes//filter. +Similar to the events tracer, this doesn't need to be activated via +current_tracer. Instead of that, add probe points via +/sys/kernel/debug/tracing/kprobe_events, and enable it via +/sys/kernel/debug/tracing/events/kprobes//enabled. Synopsis of kprobe_events @@ -55,9 +52,9 @@ Per-Probe Event Filtering ------------------------- Per-probe event filtering feature allows you to set different filter on each probe and gives you what arguments will be shown in trace buffer. If an event -name is specified right after 'p:' or 'r:' in kprobe_events, the tracer adds -an event under tracing/events/kprobes/, at the directory you can see -'id', 'enabled', 'format' and 'filter'. +name is specified right after 'p:' or 'r:' in kprobe_events, it adds an event +under tracing/events/kprobes/, at the directory you can see 'id', +'enabled', 'format' and 'filter'. enabled: You can enable/disable the probe by writing 1 or 0 on it. @@ -71,6 +68,7 @@ filter: id: This shows the id of this probe event. + Event Profiling --------------- You can check the total number of probe hits and probe miss-hits via diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 15372a9f239..f05671609a8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -428,17 +428,22 @@ config BLK_DEV_IO_TRACE If unsure, say N. -config KPROBE_TRACER +config KPROBE_EVENT depends on KPROBES depends on X86 - bool "Trace kprobes" + bool "Enable kprobes-based dynamic events" select TRACING - select GENERIC_TRACER + default y help - This tracer probes everywhere where kprobes can probe it, and - records various registers and memories specified by user. - This also allows you to trace kprobe probe points as a dynamic - defined events. It provides per-probe event filtering interface. + This allows the user to add tracing events (similar to tracepoints) on the fly + via the ftrace interface. See Documentation/trace/kprobetrace.txt + for more details. + + Those events can be inserted wherever kprobes can probe, and record + various register and memory values. + + This option is also required by perf-probe subcommand of perf tools. If + you want to use perf tools, this option is strongly recommended. config DYNAMIC_FTRACE bool "enable/disable ftrace tracepoints dynamically" diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c8cb75d7f28..edc3a3cca1a 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -53,7 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o -obj-$(CONFIG_KPROBE_TRACER) += trace_kprobe.o +obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o obj-$(CONFIG_EVENT_TRACING) += power-traces.o libftrace-y := ftrace.o diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a86c3ac0df2..cf17a6694f3 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1,5 +1,5 @@ /* - * kprobe based kernel tracer + * Kprobes-based tracing events * * Created by Masami Hiramatsu * @@ -57,8 +57,6 @@ const char *reserved_field_names[] = { FIELD_STRING_FUNC, }; -/* currently, trace_kprobe only supports X86. */ - struct fetch_func { unsigned long (*func)(struct pt_regs *, void *); void *data; @@ -191,7 +189,7 @@ static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) } /** - * Kprobe tracer core functions + * Kprobe event core functions */ struct probe_arg { From 09879b99d44d701c603935ef2549004405d7f8f9 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Wed, 4 Nov 2009 12:58:15 +0900 Subject: [PATCH 224/470] x86: Gitignore: arch/x86/lib/inat-tables.c Ignore generated file arch/x86/lib/inat-tables.c. Signed-off-by: Hiroshi Shimamoto Acked-by: Masami Hiramatsu LKML-Reference: <4AF0FBD7.7000501@ct.jp.nec.com> Signed-off-by: Ingo Molnar --- arch/x86/lib/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 arch/x86/lib/.gitignore diff --git a/arch/x86/lib/.gitignore b/arch/x86/lib/.gitignore new file mode 100644 index 00000000000..8df89f0a3fe --- /dev/null +++ b/arch/x86/lib/.gitignore @@ -0,0 +1 @@ +inat-tables.c From 2da3e160cb3d226d87b907fab26850d838ed8d7c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 5 Nov 2009 23:06:50 +0100 Subject: [PATCH 225/470] hw-breakpoint: Move asm-generic/hw_breakpoint.h to linux/hw_breakpoint.h We plan to make the breakpoints parameters generic among architectures. For that it's better to move the asm-generic header to a generic linux header. Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/hw_breakpoint.h | 2 +- include/{asm-generic => linux}/hw_breakpoint.h | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) rename include/{asm-generic => linux}/hw_breakpoint.h (96%) diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 1acb4d45de7..3cfca8e2b5f 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -12,7 +12,7 @@ struct arch_hw_breakpoint { }; #include -#include +#include /* Available HW breakpoint length encodings */ #define HW_BREAKPOINT_LEN_1 0x40 diff --git a/include/asm-generic/hw_breakpoint.h b/include/linux/hw_breakpoint.h similarity index 96% rename from include/asm-generic/hw_breakpoint.h rename to include/linux/hw_breakpoint.h index 9bf2d12eb74..61ccc8f17ea 100644 --- a/include/asm-generic/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -1,9 +1,6 @@ -#ifndef _ASM_GENERIC_HW_BREAKPOINT_H -#define _ASM_GENERIC_HW_BREAKPOINT_H +#ifndef _LINUX_HW_BREAKPOINT_H +#define _LINUX_HW_BREAKPOINT_H -#ifndef __ARCH_HW_BREAKPOINT_H -#error "Please don't include this file directly" -#endif #ifdef __KERNEL__ #include @@ -136,4 +133,4 @@ extern void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp); extern unsigned int hbp_kernel_pos; #endif /* __KERNEL__ */ -#endif /* _ASM_GENERIC_HW_BREAKPOINT_H */ +#endif /* _LINUX_HW_BREAKPOINT_H */ From c426bba069e65ea438880a04aa4e7c5b880e1728 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:31 +0900 Subject: [PATCH 226/470] perf bench: Add new directory and header for new subcommand 'bench' This patch adds bench/ directory and bench/bench.h. bench/ directory will contain modules for bench subcommand. bench/bench.h is for listing prototypes of module functions. Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-2-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/bench.h | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 tools/perf/bench/bench.h diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h new file mode 100644 index 00000000000..59adb279cd7 --- /dev/null +++ b/tools/perf/bench/bench.h @@ -0,0 +1,9 @@ +#ifndef BENCH_H +#define BENCH_H + +extern int bench_sched_messaging(int argc, const char **argv, + const char *prefix); +extern int bench_sched_pipe(int argc, const char **argv, + const char *prefix); + +#endif From e27454cc6352c4226ddc76f5e3a5dedd7dff456a Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:32 +0900 Subject: [PATCH 227/470] perf bench: Add sched-messaging.c: Benchmark for scheduler and IPC mechanisms based on hackbench This patch adds bench/sched-messaging.c. This benchmark measures performance of scheduler and IPC mechanisms, and is based on hackbench by Rusty Russell. Example of usage: % perf bench sched messaging -g 20 -l 1000 -s 5.432 # in sec % perf bench sched messaging # run with default options (20 sender and receiver processes per group) (10 groups == 400 processes run) Total time:0.308 sec % perf bench sched messaging -t -g 20 # # be multi-thread, with 20 groups (20 sender and receiver threads per group) (20 groups == 800 threads run) Total time:0.582 sec ( Rusty is the original author of hackbench.c and he said the code is and was under the GPLv2 so fine to be merged. ) Signed-off-by: Hitoshi Mitake Acked-by: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-3-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/sched-messaging.c | 332 +++++++++++++++++++++++++++++ 1 file changed, 332 insertions(+) create mode 100644 tools/perf/bench/sched-messaging.c diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c new file mode 100644 index 00000000000..36b62c549e2 --- /dev/null +++ b/tools/perf/bench/sched-messaging.c @@ -0,0 +1,332 @@ +/* + * + * builtin-bench-messaging.c + * + * messaging: Benchmark for scheduler and IPC mechanisms + * + * Based on hackbench by Rusty Russell + * Ported to perf by Hitoshi Mitake + * + */ + +#include "../perf.h" +#include "../util/util.h" +#include "../util/parse-options.h" +#include "../builtin.h" +#include "bench.h" + +/* Test groups of 20 processes spraying to 20 receivers */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DATASIZE 100 + +static int use_pipes = 0; +static unsigned int loops = 100; +static unsigned int thread_mode = 0; +static unsigned int num_groups = 10; +static int simple = 0; + +struct sender_context { + unsigned int num_fds; + int ready_out; + int wakefd; + int out_fds[0]; +}; + +struct receiver_context { + unsigned int num_packets; + int in_fds[2]; + int ready_out; + int wakefd; +}; + +static void barf(const char *msg) +{ + fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno)); + exit(1); +} + +static void fdpair(int fds[2]) +{ + if (use_pipes) { + if (pipe(fds) == 0) + return; + } else { + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0) + return; + } + + barf(use_pipes ? "pipe()" : "socketpair()"); +} + +/* Block until we're ready to go */ +static void ready(int ready_out, int wakefd) +{ + char dummy; + struct pollfd pollfd = { .fd = wakefd, .events = POLLIN }; + + /* Tell them we're ready. */ + if (write(ready_out, &dummy, 1) != 1) + barf("CLIENT: ready write"); + + /* Wait for "GO" signal */ + if (poll(&pollfd, 1, -1) != 1) + barf("poll"); +} + +/* Sender sprays loops messages down each file descriptor */ +static void *sender(struct sender_context *ctx) +{ + char data[DATASIZE]; + unsigned int i, j; + + ready(ctx->ready_out, ctx->wakefd); + + /* Now pump to every receiver. */ + for (i = 0; i < loops; i++) { + for (j = 0; j < ctx->num_fds; j++) { + int ret, done = 0; + +again: + ret = write(ctx->out_fds[j], data + done, + sizeof(data)-done); + if (ret < 0) + barf("SENDER: write"); + done += ret; + if (done < DATASIZE) + goto again; + } + } + + return NULL; +} + + +/* One receiver per fd */ +static void *receiver(struct receiver_context* ctx) +{ + unsigned int i; + + if (!thread_mode) + close(ctx->in_fds[1]); + + /* Wait for start... */ + ready(ctx->ready_out, ctx->wakefd); + + /* Receive them all */ + for (i = 0; i < ctx->num_packets; i++) { + char data[DATASIZE]; + int ret, done = 0; + +again: + ret = read(ctx->in_fds[0], data + done, DATASIZE - done); + if (ret < 0) + barf("SERVER: read"); + done += ret; + if (done < DATASIZE) + goto again; + } + + return NULL; +} + +static pthread_t create_worker(void *ctx, void *(*func)(void *)) +{ + pthread_attr_t attr; + pthread_t childid; + int err; + + if (!thread_mode) { + /* process mode */ + /* Fork the receiver. */ + switch (fork()) { + case -1: + barf("fork()"); + break; + case 0: + (*func) (ctx); + exit(0); + break; + default: + break; + } + + return (pthread_t)0; + } + + if (pthread_attr_init(&attr) != 0) + barf("pthread_attr_init:"); + +#ifndef __ia64__ + if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0) + barf("pthread_attr_setstacksize"); +#endif + + err = pthread_create(&childid, &attr, func, ctx); + if (err != 0) { + fprintf(stderr, "pthread_create failed: %s (%d)\n", + strerror(err), err); + exit(-1); + } + return childid; +} + +static void reap_worker(pthread_t id) +{ + int proc_status; + void *thread_status; + + if (!thread_mode) { + /* process mode */ + wait(&proc_status); + if (!WIFEXITED(proc_status)) + exit(1); + } else { + pthread_join(id, &thread_status); + } +} + +/* One group of senders and receivers */ +static unsigned int group(pthread_t *pth, + unsigned int num_fds, + int ready_out, + int wakefd) +{ + unsigned int i; + struct sender_context *snd_ctx = malloc(sizeof(struct sender_context) + + num_fds * sizeof(int)); + + if (!snd_ctx) + barf("malloc()"); + + for (i = 0; i < num_fds; i++) { + int fds[2]; + struct receiver_context *ctx = malloc(sizeof(*ctx)); + + if (!ctx) + barf("malloc()"); + + + /* Create the pipe between client and server */ + fdpair(fds); + + ctx->num_packets = num_fds * loops; + ctx->in_fds[0] = fds[0]; + ctx->in_fds[1] = fds[1]; + ctx->ready_out = ready_out; + ctx->wakefd = wakefd; + + pth[i] = create_worker(ctx, (void *)receiver); + + snd_ctx->out_fds[i] = fds[1]; + if (!thread_mode) + close(fds[0]); + } + + /* Now we have all the fds, fork the senders */ + for (i = 0; i < num_fds; i++) { + snd_ctx->ready_out = ready_out; + snd_ctx->wakefd = wakefd; + snd_ctx->num_fds = num_fds; + + pth[num_fds+i] = create_worker(snd_ctx, (void *)sender); + } + + /* Close the fds we have left */ + if (!thread_mode) + for (i = 0; i < num_fds; i++) + close(snd_ctx->out_fds[i]); + + /* Return number of children to reap */ + return num_fds * 2; +} + +static const struct option options[] = { + OPT_BOOLEAN('p', "pipe", &use_pipes, + "Use pipe() instead of socketpair()"), + OPT_BOOLEAN('t', "thread", &thread_mode, + "Be multi thread instead of multi process"), + OPT_INTEGER('g', "group", &num_groups, + "Specify number of groups"), + OPT_INTEGER('l', "loop", &loops, + "Specify number of loops"), + OPT_BOOLEAN('s', "simple-output", &simple, + "Do simple output (this maybe useful for" + "processing by scripts or graph tools like gnuplot)"), + OPT_END() +}; + +static const char * const bench_sched_message_usage[] = { + "perf bench sched messaging ", + NULL +}; + +int bench_sched_messaging(int argc, const char **argv, + const char *prefix __used) +{ + unsigned int i, total_children; + struct timeval start, stop, diff; + unsigned int num_fds = 20; + int readyfds[2], wakefds[2]; + char dummy; + pthread_t *pth_tab; + + argc = parse_options(argc, argv, options, + bench_sched_message_usage, 0); + + pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t)); + if (!pth_tab) + barf("main:malloc()"); + + fdpair(readyfds); + fdpair(wakefds); + + total_children = 0; + for (i = 0; i < num_groups; i++) + total_children += group(pth_tab+total_children, num_fds, + readyfds[1], wakefds[0]); + + /* Wait for everyone to be ready */ + for (i = 0; i < total_children; i++) + if (read(readyfds[0], &dummy, 1) != 1) + barf("Reading for readyfds"); + + gettimeofday(&start, NULL); + + /* Kick them off */ + if (write(wakefds[1], &dummy, 1) != 1) + barf("Writing to start them"); + + /* Reap them all */ + for (i = 0; i < total_children; i++) + reap_worker(pth_tab[i]); + + gettimeofday(&stop, NULL); + + timersub(&stop, &start, &diff); + + if (simple) + printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000); + else { + printf("(%d sender and receiver %s per group)\n", + num_fds, thread_mode ? "threads" : "processes"); + printf("(%d groups == %d %s run)\n\n", + num_groups, num_groups * 2 * num_fds, + thread_mode ? "threads" : "processes"); + printf("\tTotal time:%lu.%03lu sec\n", + diff.tv_sec, diff.tv_usec/1000); + } + + return 0; +} From c7d9300f367f480aee4663a0e3695c5b48859a1a Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:33 +0900 Subject: [PATCH 228/470] perf bench: Add sched-pipe.c: Benchmark for pipe() system call This patch adds bench/sched-pipe.c. bench/sched-pipe.c is a benchmark program to measure performance of pipe() system call. This benchmark is based on pipe-test-1m.c by Ingo Molnar: http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c Example of use: % perf bench sched pipe (executing 1000000 pipe operations between two tasks) Total time:4.499 sec 4.499179 usecs/op 222262 ops/sec % perf bench sched pipe -s -l 1000 0.015 Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-4-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/sched-pipe.c | 113 ++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 tools/perf/bench/sched-pipe.c diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c new file mode 100644 index 00000000000..3214ed20b1a --- /dev/null +++ b/tools/perf/bench/sched-pipe.c @@ -0,0 +1,113 @@ +/* + * + * builtin-bench-pipe.c + * + * pipe: Benchmark for pipe() + * + * Based on pipe-test-1m.c by Ingo Molnar + * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c + * Ported to perf by Hitoshi Mitake + * + */ + +#include "../perf.h" +#include "../util/util.h" +#include "../util/parse-options.h" +#include "../builtin.h" +#include "bench.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LOOPS_DEFAULT 1000000 +static int loops = LOOPS_DEFAULT; +static int simple = 0; + +static const struct option options[] = { + OPT_INTEGER('l', "loop", &loops, + "Specify number of loops"), + OPT_BOOLEAN('s', "simple-output", &simple, + "Do simple output (this maybe useful for" + "processing by scripts or graph tools like gnuplot)"), + OPT_END() +}; + +static const char * const bench_sched_pipe_usage[] = { + "perf bench sched pipe ", + NULL +}; + +int bench_sched_pipe(int argc, const char **argv, + const char *prefix __used) +{ + int pipe_1[2], pipe_2[2]; + int m = 0, i; + struct timeval start, stop, diff; + unsigned long long result_usec = 0; + + /* + * why does "ret" exist? + * discarding returned value of read(), write() + * causes error in building environment for perf + */ + int ret; + pid_t pid; + + argc = parse_options(argc, argv, options, + bench_sched_pipe_usage, 0); + + assert(!pipe(pipe_1)); + assert(!pipe(pipe_2)); + + pid = fork(); + assert(pid >= 0); + + gettimeofday(&start, NULL); + + if (!pid) { + for (i = 0; i < loops; i++) { + ret = read(pipe_1[0], &m, sizeof(int)); + ret = write(pipe_2[1], &m, sizeof(int)); + } + } else { + for (i = 0; i < loops; i++) { + ret = write(pipe_1[1], &m, sizeof(int)); + ret = read(pipe_2[0], &m, sizeof(int)); + } + } + + gettimeofday(&stop, NULL); + timersub(&stop, &start, &diff); + + if (pid) + return 0; + + if (simple) + printf("%lu.%03lu\n", + diff.tv_sec, diff.tv_usec / 1000); + else { + printf("(executing %d pipe operations between two tasks)\n\n", + loops); + + result_usec = diff.tv_sec * 1000000; + result_usec += diff.tv_usec; + + printf("\tTotal time:%lu.%03lu sec\n", + diff.tv_sec, diff.tv_usec / 1000); + printf("\t\t%lf usecs/op\n", + (double)result_usec / (double)loops); + printf("\t\t%d ops/sec\n", + (int)((double)loops / + ((double)result_usec / (double)1000000))); + } + + return 0; +} From 629cc356653719c206a05f4dee5c5e242edb6546 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:34 +0900 Subject: [PATCH 229/470] perf bench: Add builtin-bench.c: General framework for benchmark suites This patch adds builtin-bench.c builtin-bench.c is a general framework for benchmark suites. Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-5-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-bench.c | 128 +++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 tools/perf/builtin-bench.c diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c new file mode 100644 index 00000000000..31f41643b0c --- /dev/null +++ b/tools/perf/builtin-bench.c @@ -0,0 +1,128 @@ +/* + * + * builtin-bench.c + * + * General benchmarking subsystem provided by perf + * + * Copyright (C) 2009, Hitoshi Mitake + * + */ + +/* + * + * Available subsystem list: + * sched ... scheduler and IPC mechanism + * + */ + +#include "perf.h" +#include "util/util.h" +#include "util/parse-options.h" +#include "builtin.h" +#include "bench/bench.h" + +#include +#include +#include + +struct bench_suite { + const char *name; + const char *summary; + int (*fn)(int, const char **, const char *); +}; + +static struct bench_suite sched_suites[] = { + { "messaging", + "Benchmark for scheduler and IPC mechanisms", + bench_sched_messaging }, + { "pipe", + "Flood of communication over pipe() between two processes", + bench_sched_pipe }, + { NULL, + NULL, + NULL } +}; + +struct bench_subsys { + const char *name; + const char *summary; + struct bench_suite *suites; +}; + +static struct bench_subsys subsystems[] = { + { "sched", + "scheduler and IPC mechanism", + sched_suites }, + { NULL, + NULL, + NULL } +}; + +static void dump_suites(int subsys_index) +{ + int i; + + printf("List of available suites for %s...\n\n", + subsystems[subsys_index].name); + + for (i = 0; subsystems[subsys_index].suites[i].name; i++) + printf("\t%s: %s\n", + subsystems[subsys_index].suites[i].name, + subsystems[subsys_index].suites[i].summary); + + printf("\n"); + return; +} + +int cmd_bench(int argc, const char **argv, const char *prefix __used) +{ + int i, j, status = 0; + + if (argc < 2) { + /* No subsystem specified. */ + printf("Usage: perf bench []\n\n"); + printf("List of available subsystems...\n\n"); + + for (i = 0; subsystems[i].name; i++) + printf("\t%s: %s\n", + subsystems[i].name, subsystems[i].summary); + printf("\n"); + + goto end; + } + + for (i = 0; subsystems[i].name; i++) { + if (strcmp(subsystems[i].name, argv[1])) + continue; + + if (argc < 3) { + /* No suite specified. */ + dump_suites(i); + goto end; + } + + for (j = 0; subsystems[i].suites[j].name; j++) { + if (strcmp(subsystems[i].suites[j].name, argv[2])) + continue; + + status = subsystems[i].suites[j].fn(argc - 2, + argv + 2, prefix); + goto end; + } + + if (!strcmp(argv[2], "-h") || !strcmp(argv[2], "--help")) { + dump_suites(i); + goto end; + } + + printf("Unknown suite:%s for %s\n", argv[2], argv[1]); + status = 1; + goto end; + } + + printf("Unknown subsystem:%s\n", argv[1]); + status = 1; + +end: + return status; +} From 11bd341c043348ecb7462d3bd8e1ad6d00f6892a Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:35 +0900 Subject: [PATCH 230/470] perf bench: Modify builtin.h for new prototype This patch modifies builtin.h to add prototype of cmd_bench(). Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-6-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index e11d8d231c3..f0cd5b139b7 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -15,6 +15,7 @@ extern int read_line_with_nul(char *buf, int size, FILE *file); extern int check_pager_config(const char *cmd); extern int cmd_annotate(int argc, const char **argv, const char *prefix); +extern int cmd_bench(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); extern int cmd_sched(int argc, const char **argv, const char *prefix); extern int cmd_list(int argc, const char **argv, const char *prefix); From dcba8848d3bc83ec9ee0858b9ae6e4f1c1fa7fa3 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:36 +0900 Subject: [PATCH 231/470] perf bench: Add new subcommand 'bench' to perf.c This patch modifies perf.c for invoking 'bench' subcommand. Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-7-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 624e62d9d1e..f90ca5ec83f 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -288,6 +288,7 @@ static void handle_internal_command(int argc, const char **argv) { "list", cmd_list, 0 }, { "record", cmd_record, 0 }, { "report", cmd_report, 0 }, + { "bench", cmd_bench, 0 }, { "stat", cmd_stat, 0 }, { "timechart", cmd_timechart, 0 }, { "top", cmd_top, 0 }, From bfde82ef51e3ea6ab8634d0fdbf5adcdd1b429cb Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 5 Nov 2009 09:31:37 +0900 Subject: [PATCH 232/470] perf bench: Add subcommand 'bench' to the Makefile This patch modifies Makefile for new files related to 'bench' subcommand. The new code is active from this point on. Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: fweisbec@gmail.com Cc: Jiri Kosina LKML-Reference: <1257381097-4743-8-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 542b29e2e38..0a2542844a6 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -416,6 +416,13 @@ LIB_OBJS += util/hist.o LIB_OBJS += util/data_map.o BUILTIN_OBJS += builtin-annotate.o + +BUILTIN_OBJS += builtin-bench.o + +# Benchmark modules +BUILTIN_OBJS += bench/sched-messaging.o +BUILTIN_OBJS += bench/sched-pipe.o + BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-sched.o BUILTIN_OBJS += builtin-list.o From 444a2a3bcd6d5bed5c823136f68fcc93c0fe283f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 6 Nov 2009 04:13:05 +0100 Subject: [PATCH 233/470] tracing, perf_events: Protect the buffer from recursion in perf While tracing using events with perf, if one enables the lockdep:lock_acquire event, it will infect every other perf trace events. Basically, you can enable whatever set of trace events through perf but if this event is part of the set, the only result we can get is a long list of lock_acquire events of rcu read lock, and only that. This is because of a recursion inside perf. 1) When a trace event is triggered, it will fill a per cpu buffer and submit it to perf. 2) Perf will commit this event but will also protect some data using rcu_read_lock 3) A recursion appears: rcu_read_lock triggers a lock_acquire event that will fill the per cpu event and then submit the buffer to perf. 4) Perf detects a recursion and ignores it 5) Perf continues its work on the previous event, but its buffer has been overwritten by the lock_acquire event, it has then been turned into a lock_acquire event of rcu read lock Such scenario also happens with lock_release with rcu_read_unlock(). We could turn the rcu_read_lock() into __rcu_read_lock() to drop the lock debugging from perf fast path, but that would make us lose the rcu debugging and that doesn't prevent from other possible kind of recursion from perf in the future. This patch adds a recursion protection based on a counter on the perf trace per cpu buffers to solve the problem. -v2: Fixed lost whitespace, added reviewed-by tag Signed-off-by: Frederic Weisbecker Reviewed-by: Masami Hiramatsu Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt Cc: Li Zefan Cc: Jason Baron LKML-Reference: <1257477185-7838-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 9 ++++-- include/trace/ftrace.h | 39 +++++++++++++++++------ kernel/trace/trace_event_profile.c | 41 +++++++++++------------- kernel/trace/trace_kprobe.c | 50 +++++++++++++++++++++++++----- kernel/trace/trace_syscalls.c | 44 +++++++++++++++++++++----- 5 files changed, 133 insertions(+), 50 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index f7b47c33670..43360c1d8f7 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -137,8 +137,13 @@ struct ftrace_event_call { #define FTRACE_MAX_PROFILE_SIZE 2048 -extern char *trace_profile_buf; -extern char *trace_profile_buf_nmi; +struct perf_trace_buf { + char buf[FTRACE_MAX_PROFILE_SIZE]; + int recursion; +}; + +extern struct perf_trace_buf *perf_trace_buf; +extern struct perf_trace_buf *perf_trace_buf_nmi; #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a7f94609412..4945d1c9986 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -649,6 +649,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * struct ftrace_event_call *event_call = &event_; * extern void perf_tp_event(int, u64, u64, void *, int); * struct ftrace_raw_##call *entry; + * struct perf_trace_buf *trace_buf; * u64 __addr = 0, __count = 1; * unsigned long irq_flags; * struct trace_entry *ent; @@ -673,14 +674,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ * __cpu = smp_processor_id(); * * if (in_nmi()) - * raw_data = rcu_dereference(trace_profile_buf_nmi); + * trace_buf = rcu_dereference(perf_trace_buf_nmi); * else - * raw_data = rcu_dereference(trace_profile_buf); + * trace_buf = rcu_dereference(perf_trace_buf); * - * if (!raw_data) + * if (!trace_buf) * goto end; * - * raw_data = per_cpu_ptr(raw_data, __cpu); + * trace_buf = per_cpu_ptr(trace_buf, __cpu); + * + * // Avoid recursion from perf that could mess up the buffer + * if (trace_buf->recursion++) + * goto end_recursion; + * + * raw_data = trace_buf->buf; + * + * // Make recursion update visible before entering perf_tp_event + * // so that we protect from perf recursions. + * + * barrier(); * * //zero dead bytes from alignment to avoid stack leak to userspace: * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; @@ -713,8 +725,9 @@ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_event_call *event_call = &event_##call; \ - extern void perf_tp_event(int, u64, u64, void *, int); \ + extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ + struct perf_trace_buf *trace_buf; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ struct trace_entry *ent; \ @@ -739,14 +752,20 @@ static void ftrace_profile_##call(proto) \ __cpu = smp_processor_id(); \ \ if (in_nmi()) \ - raw_data = rcu_dereference(trace_profile_buf_nmi); \ + trace_buf = rcu_dereference(perf_trace_buf_nmi); \ else \ - raw_data = rcu_dereference(trace_profile_buf); \ + trace_buf = rcu_dereference(perf_trace_buf); \ \ - if (!raw_data) \ + if (!trace_buf) \ goto end; \ \ - raw_data = per_cpu_ptr(raw_data, __cpu); \ + trace_buf = per_cpu_ptr(trace_buf, __cpu); \ + if (trace_buf->recursion++) \ + goto end_recursion; \ + \ + barrier(); \ + \ + raw_data = trace_buf->buf; \ \ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ entry = (struct ftrace_raw_##call *)raw_data; \ @@ -761,6 +780,8 @@ static void ftrace_profile_##call(proto) \ perf_tp_event(event_call->id, __addr, __count, entry, \ __entry_size); \ \ +end_recursion: \ + trace_buf->recursion--; \ end: \ local_irq_restore(irq_flags); \ \ diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index c9f687ab0d4..e0d351b01f5 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -8,41 +8,36 @@ #include #include "trace.h" -/* - * We can't use a size but a type in alloc_percpu() - * So let's create a dummy type that matches the desired size - */ -typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t; -char *trace_profile_buf; -EXPORT_SYMBOL_GPL(trace_profile_buf); +struct perf_trace_buf *perf_trace_buf; +EXPORT_SYMBOL_GPL(perf_trace_buf); -char *trace_profile_buf_nmi; -EXPORT_SYMBOL_GPL(trace_profile_buf_nmi); +struct perf_trace_buf *perf_trace_buf_nmi; +EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); /* Count the events in use (per event id, not per instance) */ static int total_profile_count; static int ftrace_profile_enable_event(struct ftrace_event_call *event) { - char *buf; + struct perf_trace_buf *buf; int ret = -ENOMEM; if (atomic_inc_return(&event->profile_count)) return 0; if (!total_profile_count) { - buf = (char *)alloc_percpu(profile_buf_t); + buf = alloc_percpu(struct perf_trace_buf); if (!buf) goto fail_buf; - rcu_assign_pointer(trace_profile_buf, buf); + rcu_assign_pointer(perf_trace_buf, buf); - buf = (char *)alloc_percpu(profile_buf_t); + buf = alloc_percpu(struct perf_trace_buf); if (!buf) goto fail_buf_nmi; - rcu_assign_pointer(trace_profile_buf_nmi, buf); + rcu_assign_pointer(perf_trace_buf_nmi, buf); } ret = event->profile_enable(event); @@ -53,10 +48,10 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) fail_buf_nmi: if (!total_profile_count) { - free_percpu(trace_profile_buf_nmi); - free_percpu(trace_profile_buf); - trace_profile_buf_nmi = NULL; - trace_profile_buf = NULL; + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; } fail_buf: atomic_dec(&event->profile_count); @@ -84,7 +79,7 @@ int ftrace_profile_enable(int event_id) static void ftrace_profile_disable_event(struct ftrace_event_call *event) { - char *buf, *nmi_buf; + struct perf_trace_buf *buf, *nmi_buf; if (!atomic_add_negative(-1, &event->profile_count)) return; @@ -92,11 +87,11 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) event->profile_disable(event); if (!--total_profile_count) { - buf = trace_profile_buf; - rcu_assign_pointer(trace_profile_buf, NULL); + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); - nmi_buf = trace_profile_buf_nmi; - rcu_assign_pointer(trace_profile_buf_nmi, NULL); + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); /* * Ensure every events in profiling have finished before diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index cf17a6694f3..3696476f307 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1208,6 +1208,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry *entry; + struct perf_trace_buf *trace_buf; struct trace_entry *ent; int size, __size, i, pc, __cpu; unsigned long irq_flags; @@ -1229,14 +1230,26 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, __cpu = smp_processor_id(); if (in_nmi()) - raw_data = rcu_dereference(trace_profile_buf_nmi); + trace_buf = rcu_dereference(perf_trace_buf_nmi); else - raw_data = rcu_dereference(trace_profile_buf); + trace_buf = rcu_dereference(perf_trace_buf); - if (!raw_data) + if (!trace_buf) goto end; - raw_data = per_cpu_ptr(raw_data, __cpu); + trace_buf = per_cpu_ptr(trace_buf, __cpu); + + if (trace_buf->recursion++) + goto end_recursion; + + /* + * Make recursion update visible before entering perf_tp_event + * so that we protect from perf recursions. + */ + barrier(); + + raw_data = trace_buf->buf; + /* Zero dead bytes from alignment to avoid buffer leak to userspace */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; entry = (struct kprobe_trace_entry *)raw_data; @@ -1249,8 +1262,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tp_event(call->id, entry->ip, 1, entry, size); + +end_recursion: + trace_buf->recursion--; end: local_irq_restore(irq_flags); + return 0; } @@ -1261,6 +1278,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry *entry; + struct perf_trace_buf *trace_buf; struct trace_entry *ent; int size, __size, i, pc, __cpu; unsigned long irq_flags; @@ -1282,14 +1300,26 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, __cpu = smp_processor_id(); if (in_nmi()) - raw_data = rcu_dereference(trace_profile_buf_nmi); + trace_buf = rcu_dereference(perf_trace_buf_nmi); else - raw_data = rcu_dereference(trace_profile_buf); + trace_buf = rcu_dereference(perf_trace_buf); - if (!raw_data) + if (!trace_buf) goto end; - raw_data = per_cpu_ptr(raw_data, __cpu); + trace_buf = per_cpu_ptr(trace_buf, __cpu); + + if (trace_buf->recursion++) + goto end_recursion; + + /* + * Make recursion update visible before entering perf_tp_event + * so that we protect from perf recursions. + */ + barrier(); + + raw_data = trace_buf->buf; + /* Zero dead bytes from alignment to avoid buffer leak to userspace */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; entry = (struct kretprobe_trace_entry *)raw_data; @@ -1303,8 +1333,12 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tp_event(call->id, entry->ret_ip, 1, entry, size); + +end_recursion: + trace_buf->recursion--; end: local_irq_restore(irq_flags); + return 0; } diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 58b8e537076..51213b0aa81 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -477,6 +477,7 @@ static int sys_prof_refcount_exit; static void prof_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; + struct perf_trace_buf *trace_buf; struct syscall_trace_enter *rec; unsigned long flags; char *raw_data; @@ -507,14 +508,25 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) cpu = smp_processor_id(); if (in_nmi()) - raw_data = rcu_dereference(trace_profile_buf_nmi); + trace_buf = rcu_dereference(perf_trace_buf_nmi); else - raw_data = rcu_dereference(trace_profile_buf); + trace_buf = rcu_dereference(perf_trace_buf); - if (!raw_data) + if (!trace_buf) goto end; - raw_data = per_cpu_ptr(raw_data, cpu); + trace_buf = per_cpu_ptr(trace_buf, cpu); + + if (trace_buf->recursion++) + goto end_recursion; + + /* + * Make recursion update visible before entering perf_tp_event + * so that we protect from perf recursions. + */ + barrier(); + + raw_data = trace_buf->buf; /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -527,6 +539,8 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) (unsigned long *)&rec->args); perf_tp_event(sys_data->enter_id, 0, 1, rec, size); +end_recursion: + trace_buf->recursion--; end: local_irq_restore(flags); } @@ -574,6 +588,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; + struct perf_trace_buf *trace_buf; unsigned long flags; int syscall_nr; char *raw_data; @@ -605,14 +620,25 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) cpu = smp_processor_id(); if (in_nmi()) - raw_data = rcu_dereference(trace_profile_buf_nmi); + trace_buf = rcu_dereference(perf_trace_buf_nmi); else - raw_data = rcu_dereference(trace_profile_buf); + trace_buf = rcu_dereference(perf_trace_buf); - if (!raw_data) + if (!trace_buf) goto end; - raw_data = per_cpu_ptr(raw_data, cpu); + trace_buf = per_cpu_ptr(trace_buf, cpu); + + if (trace_buf->recursion++) + goto end_recursion; + + /* + * Make recursion update visible before entering perf_tp_event + * so that we protect from perf recursions. + */ + barrier(); + + raw_data = trace_buf->buf; /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -626,6 +652,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) perf_tp_event(sys_data->exit_id, 0, 1, rec, size); +end_recursion: + trace_buf->recursion--; end: local_irq_restore(flags); } From 8d06367fa79c053a4a56a2ce0bb9e840f5da1236 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 4 Nov 2009 18:50:43 -0200 Subject: [PATCH 234/470] perf symbols: Use the buildids if present With this change 'perf record' will intercept PERF_RECORD_MMAP calls, creating a linked list of DSOs, then when the session finishes, it will traverse this list and read the buildids, stashing them at the end of the file and will set up a new feature bit in the header bitmask. 'perf report' will then notice this feature and populate the 'dsos' list and set the build ids. When reading the symtabs it will refuse to load from a file that doesn't have the same build id. This improves the reliability of the profiler output, as symbols and profiling data is more guaranteed to match. Example: [root@doppio ~]# perf report | head /home/acme/bin/perf with build id b1ea544ac3746e7538972548a09aadecc5753868 not found, continuing without symbols # Samples: 2621434559 # # Overhead Command Shared Object Symbol # ........ ............... ............................. ...... # 7.91% init [kernel] [k] read_hpet 7.64% init [kernel] [k] mwait_idle_with_hints 7.60% swapper [kernel] [k] read_hpet 7.60% swapper [kernel] [k] mwait_idle_with_hints 3.65% init [kernel] [k] 0xffffffffa02339d9 [root@doppio ~]# In this case the 'perf' binary was an older one, vanished, so its symbols probably wouldn't match or would cause subtly different (and misleading) output. Next patches will support the kernel as well, reading the build id notes for it and the modules from /sys. Another patch should also introduce a new plumbing command: 'perf list-buildids' that will then be used in porcelain that is distro specific to fetch -debuginfo packages where such buildids are present. This will in turn allow for one to run 'perf record' in one machine and 'perf report' in another. Future work on having the buildid sent directly from the kernel in the PERF_RECORD_MMAP event is needed to close races, as the DSO can be changed during a 'perf record' session, but this patch at least helps with non-corner cases and current/older kernels. Signed-off-by: Arnaldo Carvalho de Melo Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: Jim Keniston Cc: K. Prasad Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Roland McGrath Cc: Srikar Dronamraju Cc: Steven Rostedt LKML-Reference: <1257367843-26224-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 49 +++++++++++++++++++++-- tools/perf/util/data_map.c | 37 ++++++++++++++++++ tools/perf/util/event.h | 7 ++++ tools/perf/util/header.c | 10 +++++ tools/perf/util/header.h | 4 ++ tools/perf/util/map.c | 14 ++++++- tools/perf/util/symbol.c | 78 ++++++++++++++++++++++++++----------- tools/perf/util/symbol.h | 10 +++-- 8 files changed, 179 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4a73d89ce5d..ab333812ace 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -17,6 +17,7 @@ #include "util/header.h" #include "util/event.h" #include "util/debug.h" +#include "util/symbol.h" #include #include @@ -109,9 +110,21 @@ static void write_output(void *buf, size_t size) } } +static void write_event(event_t *buf, size_t size) +{ + /* + * Add it to the list of DSOs, so that when we finish this + * record session we can pick the available build-ids. + */ + if (buf->header.type == PERF_RECORD_MMAP) + dsos__findnew(buf->mmap.filename); + + write_output(buf, size); +} + static int process_synthesized_event(event_t *event) { - write_output(event, event->header.size); + write_event(event, event->header.size); return 0; } @@ -163,14 +176,14 @@ static void mmap_read(struct mmap_data *md) size = md->mask + 1 - (old & md->mask); old += size; - write_output(buf, size); + write_event(buf, size); } buf = &data[old & md->mask]; size = head - old; old += size; - write_output(buf, size); + write_event(buf, size); md->prev = old; mmap_write_tail(md, old); @@ -365,10 +378,38 @@ static void open_counters(int cpu, pid_t pid) nr_cpu++; } +static bool write_buildid_table(void) +{ + struct dso *pos; + bool have_buildid = false; + + list_for_each_entry(pos, &dsos, node) { + struct build_id_event b; + size_t len; + + if (filename__read_build_id(pos->long_name, + &b.build_id, + sizeof(b.build_id)) < 0) + continue; + have_buildid = true; + memset(&b.header, 0, sizeof(b.header)); + len = strlen(pos->long_name) + 1; + len = ALIGN(len, 64); + b.header.size = sizeof(b) + len; + write_output(&b, sizeof(b)); + write_output(pos->long_name, len); + } + + return have_buildid; +} + static void atexit_header(void) { header->data_size += bytes_written; + if (write_buildid_table()) + perf_header__set_feat(header, HEADER_BUILD_ID); + perf_header__write(header, output); } @@ -572,6 +613,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) { int counter; + symbol__init(0); + argc = parse_options(argc, argv, options, record_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc && target_pid == -1 && !system_wide) diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index c458db9ede6..00a9c114c8d 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -70,6 +70,39 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } } +static int perf_header__read_build_ids(const struct perf_header *self, + int input, off_t file_size) +{ + off_t offset = self->data_offset + self->data_size; + struct build_id_event bev; + char filename[PATH_MAX]; + int err = -1; + + if (lseek(input, offset, SEEK_SET) < 0) + return -1; + + while (offset < file_size) { + struct dso *dso; + ssize_t len; + + if (read(input, &bev, sizeof(bev)) != sizeof(bev)) + goto out; + + len = bev.header.size - sizeof(bev); + if (read(input, filename, len) != len) + goto out; + + dso = dsos__findnew(filename); + if (dso != NULL) + dso__set_build_id(dso, &bev.build_id); + + offset += bev.header.size; + } + err = 0; +out: + return err; +} + int mmap_dispatch_perf_file(struct perf_header **pheader, const char *input_name, int force, @@ -130,6 +163,10 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, if (curr_handler->sample_type_check(sample_type) < 0) exit(-1); + if (perf_header__has_feat(header, HEADER_BUILD_ID) && + perf_header__read_build_ids(header, input, input_stat.st_size)) + pr_debug("failed to read buildids, continuing...\n"); + if (load_kernel(NULL) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 0a443bea68d..34c6fcb82d9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -61,6 +61,13 @@ struct sample_event{ u64 array[]; }; +#define BUILD_ID_SIZE 20 + +struct build_id_event { + struct perf_event_header header; + u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + char filename[]; +}; typedef union event_union { struct perf_event_header header; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 7d26659b806..050f543fd96 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -149,6 +149,16 @@ void perf_header__feat_trace_info(struct perf_header *header) set_bit(HEADER_TRACE_INFO, header->adds_features); } +void perf_header__set_feat(struct perf_header *self, int feat) +{ + set_bit(feat, self->adds_features); +} + +bool perf_header__has_feat(const struct perf_header *self, int feat) +{ + return test_bit(feat, self->adds_features); +} + static void do_write(int fd, void *buf, size_t size) { while (size) { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 2ea9dfb1236..2f233c5db7e 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -3,6 +3,7 @@ #include "../../../include/linux/perf_event.h" #include +#include #include "types.h" #include @@ -15,6 +16,7 @@ struct perf_header_attr { }; #define HEADER_TRACE_INFO 1 +#define HEADER_BUILD_ID 2 #define HEADER_FEAT_BITS 256 @@ -48,6 +50,8 @@ u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); void perf_header__feat_trace_info(struct perf_header *header); +void perf_header__set_feat(struct perf_header *self, int feat); +bool perf_header__has_feat(const struct perf_header *self, int feat); struct perf_header *perf_header__new(void); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 33f868420d7..94ca95073c4 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -84,8 +84,18 @@ map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) int nr = dso__load(self->dso, self, filter); if (nr < 0) { - pr_warning("Failed to open %s, continuing without symbols\n", - self->dso->long_name); + if (self->dso->has_build_id) { + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(self->dso->build_id, + sizeof(self->dso->build_id), + sbuild_id); + pr_warning("%s with build id %s not found", + self->dso->long_name, sbuild_id); + } else + pr_warning("Failed to open %s", + self->dso->long_name); + pr_warning(", continuing without symbols\n"); return NULL; } else if (nr == 0) { const char *name = self->dso->long_name; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e7c7cdb851c..a2e95ce1f22 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -121,7 +121,8 @@ struct dso *dso__new(const char *name) self->find_symbol = dso__find_symbol; self->slen_calculated = 0; self->origin = DSO__ORIG_NOT_FOUND; - self->loaded = false; + self->loaded = 0; + self->has_build_id = 0; } return self; @@ -148,6 +149,12 @@ void dso__delete(struct dso *self) free(self); } +void dso__set_build_id(struct dso *self, void *build_id) +{ + memcpy(self->build_id, build_id, sizeof(self->build_id)); + self->has_build_id = 1; +} + static void dso__insert_symbol(struct dso *self, struct symbol *sym) { struct rb_node **p = &self->syms.rb_node; @@ -190,11 +197,30 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) return NULL; } +int build_id__sprintf(u8 *self, int len, char *bf) +{ + char *bid = bf; + u8 *raw = self; + int i; + + for (i = 0; i < len; ++i) { + sprintf(bid, "%02x", *raw); + ++raw; + bid += 2; + } + + return raw - self; +} + size_t dso__fprintf(struct dso *self, FILE *fp) { - size_t ret = fprintf(fp, "dso: %s\n", self->short_name); - + char sbuild_id[BUILD_ID_SIZE * 2 + 1]; struct rb_node *nd; + size_t ret; + + build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); + ret = fprintf(fp, "dso: %s (%s)\n", self->short_name, sbuild_id); + for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); @@ -825,8 +851,6 @@ out_close: return err; } -#define BUILD_ID_SIZE 20 - int filename__read_build_id(const char *filename, void *bf, size_t size) { int fd, err = -1; @@ -845,7 +869,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size) elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); if (elf == NULL) { - pr_err("%s: cannot read %s ELF file.\n", __func__, filename); + pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); goto out_close; } @@ -874,9 +898,9 @@ out: static char *dso__read_build_id(struct dso *self) { - int i, len; - char *build_id = NULL, *bid; - unsigned char rawbf[BUILD_ID_SIZE], *raw; + int len; + char *build_id = NULL; + unsigned char rawbf[BUILD_ID_SIZE]; len = filename__read_build_id(self->long_name, rawbf, sizeof(rawbf)); if (len < 0) @@ -885,15 +909,8 @@ static char *dso__read_build_id(struct dso *self) build_id = malloc(len * 2 + 1); if (build_id == NULL) goto out; - bid = build_id; - raw = rawbf; - for (i = 0; i < len; ++i) { - sprintf(bid, "%02x", *raw); - ++raw; - bid += 2; - } - pr_debug2("%s(%s): %s\n", __func__, self->long_name, build_id); + build_id__sprintf(rawbf, len, build_id); out: return build_id; } @@ -922,7 +939,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) int ret = -1; int fd; - self->loaded = true; + self->loaded = 1; if (!name) return -1; @@ -940,6 +957,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) more: do { + int berr = 0; + self->origin++; switch (self->origin) { case DSO__ORIG_FEDORA: @@ -956,8 +975,7 @@ more: snprintf(name, size, "/usr/lib/debug/.build-id/%.2s/%s.debug", build_id, build_id + 2); - free(build_id); - break; + goto compare_build_id; } self->origin++; /* Fall thru */ @@ -969,6 +987,22 @@ more: goto out; } + if (self->has_build_id) { + bool match; + build_id = malloc(BUILD_ID_SIZE); + if (build_id == NULL) + goto more; + berr = filename__read_build_id(name, build_id, + BUILD_ID_SIZE); +compare_build_id: + match = berr > 0 && memcmp(build_id, self->build_id, + sizeof(self->build_id)) == 0; + free(build_id); + build_id = NULL; + if (!match) + goto more; + } + fd = open(name, O_RDONLY); } while (fd < 0); @@ -1034,7 +1068,7 @@ static int dso__load_module_sym(struct dso *self, struct map *map, { int err = 0, fd = open(self->long_name, O_RDONLY); - self->loaded = true; + self->loaded = 1; if (fd < 0) { pr_err("%s: cannot open %s\n", __func__, self->long_name); @@ -1225,7 +1259,7 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, { int err, fd = open(vmlinux, O_RDONLY); - self->loaded = true; + self->loaded = 1; if (fd < 0) return -1; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index e0d4a583f8d..f8c1899af48 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -60,10 +60,12 @@ struct dso { struct list_head node; struct rb_root syms; struct symbol *(*find_symbol)(struct dso *, u64 ip); - unsigned char adjust_symbols; - unsigned char slen_calculated; - bool loaded; + u8 adjust_symbols:1; + u8 slen_calculated:1; + u8 loaded:1; + u8 has_build_id:1; unsigned char origin; + u8 build_id[BUILD_ID_SIZE]; const char *short_name; char *long_name; char name[0]; @@ -81,8 +83,10 @@ void dsos__fprintf(FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); +void dso__set_build_id(struct dso *self, void *build_id); int filename__read_build_id(const char *filename, void *bf, size_t size); +int build_id__sprintf(u8 *self, int len, char *bf); int load_kernel(symbol_filter_t filter); From 24f1e32c60c45c89a997c73395b69c8af6f0a84e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 9 Sep 2009 19:22:48 +0200 Subject: [PATCH 235/470] hw-breakpoints: Rewrite the hw-breakpoints layer on top of perf events This patch rebase the implementation of the breakpoints API on top of perf events instances. Each breakpoints are now perf events that handle the register scheduling, thread/cpu attachment, etc.. The new layering is now made as follows: ptrace kgdb ftrace perf syscall \ | / / \ | / / / Core breakpoint API / / | / | / Breakpoints perf events | | Breakpoints PMU ---- Debug Register constraints handling (Part of core breakpoint API) | | Hardware debug registers Reasons of this rewrite: - Use the centralized/optimized pmu registers scheduling, implying an easier arch integration - More powerful register handling: perf attributes (pinned/flexible events, exclusive/non-exclusive, tunable period, etc...) Impact: - New perf ABI: the hardware breakpoints counters - Ptrace breakpoints setting remains tricky and still needs some per thread breakpoints references. Todo (in the order): - Support breakpoints perf counter events for perf tools (ie: implement perf_bpcounter_event()) - Support from perf tools Changes in v2: - Follow the perf "event " rename - The ptrace regression have been fixed (ptrace breakpoint perf events weren't released when a task ended) - Drop the struct hw_breakpoint and store generic fields in perf_event_attr. - Separate core and arch specific headers, drop asm-generic/hw_breakpoint.h and create linux/hw_breakpoint.h - Use new generic len/type for breakpoint - Handle off case: when breakpoints api is not supported by an arch Changes in v3: - Fix broken CONFIG_KVM, we need to propagate the breakpoint api changes to kvm when we exit the guest and restore the bp registers to the host. Changes in v4: - Drop the hw_breakpoint_restore() stub as it is only used by KVM - EXPORT_SYMBOL_GPL hw_breakpoint_restore() as KVM can be built as a module - Restore the breakpoints unconditionally on kvm guest exit: TIF_DEBUG_THREAD doesn't anymore cover every cases of running breakpoints and vcpu->arch.switch_db_regs might not always be set when the guest used debug registers. (Waiting for a reliable optimization) Changes in v5: - Split-up the asm-generic/hw-breakpoint.h moving to linux/hw_breakpoint.h into a separate patch - Optimize the breakpoints restoring while switching from kvm guest to host. We only want to restore the state if we have active breakpoints to the host, otherwise we don't care about messed-up address registers. - Add asm/hw_breakpoint.h to Kbuild - Fix bad breakpoint type in trace_selftest.c Changes in v6: - Fix wrong header inclusion in trace.h (triggered a build error with CONFIG_FTRACE_SELFTEST Signed-off-by: Frederic Weisbecker Cc: Prasad Cc: Alan Stern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Ingo Molnar Cc: Jan Kiszka Cc: Jiri Slaby Cc: Li Zefan Cc: Avi Kivity Cc: Paul Mackerras Cc: Mike Galbraith Cc: Masami Hiramatsu Cc: Paul Mundt --- arch/Kconfig | 3 + arch/x86/include/asm/Kbuild | 1 + arch/x86/include/asm/debugreg.h | 11 +- arch/x86/include/asm/hw_breakpoint.h | 58 ++-- arch/x86/include/asm/processor.h | 12 +- arch/x86/kernel/hw_breakpoint.c | 395 +++++++++++++++-------- arch/x86/kernel/process.c | 7 +- arch/x86/kernel/process_32.c | 26 +- arch/x86/kernel/process_64.c | 26 +- arch/x86/kernel/ptrace.c | 182 +++++++---- arch/x86/kernel/smpboot.c | 3 - arch/x86/kvm/x86.c | 18 +- arch/x86/power/cpu.c | 6 - include/linux/hw_breakpoint.h | 243 +++++++------- include/linux/perf_event.h | 26 +- kernel/exit.c | 5 + kernel/hw_breakpoint.c | 452 +++++++++++---------------- kernel/perf_event.c | 53 +++- kernel/trace/trace.h | 5 +- kernel/trace/trace_entries.h | 6 +- kernel/trace/trace_ksym.c | 124 ++++---- kernel/trace/trace_selftest.c | 3 +- 22 files changed, 900 insertions(+), 765 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index acb66439794..eef3bbb9707 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -128,6 +128,9 @@ config HAVE_DEFAULT_NO_SPIN_MUTEXES config HAVE_HW_BREAKPOINT bool + depends on HAVE_PERF_EVENTS + select ANON_INODES + select PERF_EVENTS source "kernel/gcov/Kconfig" diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa..9f828f87ca3 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -10,6 +10,7 @@ header-y += ptrace-abi.h header-y += sigcontext32.h header-y += ucontext.h header-y += processor-flags.h +header-y += hw_breakpoint.h unifdef-y += e820.h unifdef-y += ist.h diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 23439fbb1d0..9a3333c91f9 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -75,13 +75,8 @@ */ #ifdef __KERNEL__ -/* For process management */ -extern void flush_thread_hw_breakpoint(struct task_struct *tsk); -extern int copy_thread_hw_breakpoint(struct task_struct *tsk, - struct task_struct *child, unsigned long clone_flags); +DECLARE_PER_CPU(unsigned long, dr7); -/* For CPU management */ -extern void load_debug_registers(void); static inline void hw_breakpoint_disable(void) { /* Zero the control register for HW Breakpoint */ @@ -94,6 +89,10 @@ static inline void hw_breakpoint_disable(void) set_debugreg(0UL, 3); } +#ifdef CONFIG_KVM +extern void hw_breakpoint_restore(void); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_X86_DEBUGREG_H */ diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 3cfca8e2b5f..0675a7c4c20 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -4,6 +4,11 @@ #ifdef __KERNEL__ #define __ARCH_HW_BREAKPOINT_H +/* + * The name should probably be something dealt in + * a higher level. While dealing with the user + * (display/resolving) + */ struct arch_hw_breakpoint { char *name; /* Contains name of the symbol to set bkpt */ unsigned long address; @@ -12,44 +17,57 @@ struct arch_hw_breakpoint { }; #include -#include +#include +#include /* Available HW breakpoint length encodings */ -#define HW_BREAKPOINT_LEN_1 0x40 -#define HW_BREAKPOINT_LEN_2 0x44 -#define HW_BREAKPOINT_LEN_4 0x4c -#define HW_BREAKPOINT_LEN_EXECUTE 0x40 +#define X86_BREAKPOINT_LEN_1 0x40 +#define X86_BREAKPOINT_LEN_2 0x44 +#define X86_BREAKPOINT_LEN_4 0x4c +#define X86_BREAKPOINT_LEN_EXECUTE 0x40 #ifdef CONFIG_X86_64 -#define HW_BREAKPOINT_LEN_8 0x48 +#define X86_BREAKPOINT_LEN_8 0x48 #endif /* Available HW breakpoint type encodings */ /* trigger on instruction execute */ -#define HW_BREAKPOINT_EXECUTE 0x80 +#define X86_BREAKPOINT_EXECUTE 0x80 /* trigger on memory write */ -#define HW_BREAKPOINT_WRITE 0x81 +#define X86_BREAKPOINT_WRITE 0x81 /* trigger on memory read or write */ -#define HW_BREAKPOINT_RW 0x83 +#define X86_BREAKPOINT_RW 0x83 /* Total number of available HW breakpoint registers */ #define HBP_NUM 4 -extern struct hw_breakpoint *hbp_kernel[HBP_NUM]; -DECLARE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); -extern unsigned int hbp_user_refcount[HBP_NUM]; +struct perf_event; +struct pmu; -extern void arch_install_thread_hw_breakpoint(struct task_struct *tsk); -extern void arch_uninstall_thread_hw_breakpoint(void); extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); -extern int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, - struct task_struct *tsk); -extern void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk); -extern void arch_flush_thread_hw_breakpoint(struct task_struct *tsk); -extern void arch_update_kernel_hw_breakpoint(void *); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp, + struct task_struct *tsk); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, - unsigned long val, void *data); + unsigned long val, void *data); + + +int arch_install_hw_breakpoint(struct perf_event *bp); +void arch_uninstall_hw_breakpoint(struct perf_event *bp); +void hw_breakpoint_pmu_read(struct perf_event *bp); +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp); + +extern void +arch_fill_perf_breakpoint(struct perf_event *bp); + +unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type); +int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type); + +extern int arch_bp_generic_fields(int x86_len, int x86_type, + int *gen_len, int *gen_type); + +extern struct pmu perf_ops_bp; + #endif /* __KERNEL__ */ #endif /* _I386_HW_BREAKPOINT_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 61aafb71c7e..820f3000f73 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -423,6 +423,8 @@ extern unsigned int xstate_size; extern void free_thread_xstate(struct task_struct *); extern struct kmem_cache *task_xstate_cachep; +struct perf_event; + struct thread_struct { /* Cached TLS descriptors: */ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; @@ -444,12 +446,10 @@ struct thread_struct { unsigned long fs; #endif unsigned long gs; - /* Hardware debugging registers: */ - unsigned long debugreg[HBP_NUM]; - unsigned long debugreg6; - unsigned long debugreg7; - /* Hardware breakpoint info */ - struct hw_breakpoint *hbp[HBP_NUM]; + /* Save middle states of ptrace breakpoints */ + struct perf_event *ptrace_bps[HBP_NUM]; + /* Debug status used for traps, single steps, etc... */ + unsigned long debugreg6; /* Fault info: */ unsigned long cr2; unsigned long trap_no; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 9316a9de4de..e622620790b 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -15,6 +15,7 @@ * * Copyright (C) 2007 Alan Stern * Copyright (C) 2009 IBM Corporation + * Copyright (C) 2009 Frederic Weisbecker */ /* @@ -22,6 +23,8 @@ * using the CPU's debug registers. */ +#include +#include #include #include #include @@ -38,26 +41,24 @@ #include #include -/* Unmasked kernel DR7 value */ -static unsigned long kdr7; +/* Per cpu debug control register value */ +DEFINE_PER_CPU(unsigned long, dr7); + +/* Per cpu debug address registers values */ +static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); /* - * Masks for the bits corresponding to registers DR0 - DR3 in DR7 register. - * Used to clear and verify the status of bits corresponding to DR0 - DR3 + * Stores the breakpoints currently in use on each breakpoint address + * register for each cpus */ -static const unsigned long dr7_masks[HBP_NUM] = { - 0x000f0003, /* LEN0, R/W0, G0, L0 */ - 0x00f0000c, /* LEN1, R/W1, G1, L1 */ - 0x0f000030, /* LEN2, R/W2, G2, L2 */ - 0xf00000c0 /* LEN3, R/W3, G3, L3 */ -}; +static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); /* * Encode the length, type, Exact, and Enable bits for a particular breakpoint * as stored in debug register 7. */ -static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) +unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) { unsigned long bp_info; @@ -68,64 +69,89 @@ static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) return bp_info; } -void arch_update_kernel_hw_breakpoint(void *unused) +/* + * Decode the length and type bits for a particular breakpoint as + * stored in debug register 7. Return the "enabled" status. + */ +int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type) { - struct hw_breakpoint *bp; - int i, cpu = get_cpu(); - unsigned long temp_kdr7 = 0; + int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); - /* Don't allow debug exceptions while we update the registers */ - set_debugreg(0UL, 7); + *len = (bp_info & 0xc) | 0x40; + *type = (bp_info & 0x3) | 0x80; - for (i = hbp_kernel_pos; i < HBP_NUM; i++) { - per_cpu(this_hbp_kernel[i], cpu) = bp = hbp_kernel[i]; - if (bp) { - temp_kdr7 |= encode_dr7(i, bp->info.len, bp->info.type); - set_debugreg(bp->info.address, i); + return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; +} + +/* + * Install a perf counter breakpoint. + * + * We seek a free debug address register and use it for this + * breakpoint. Eventually we enable it in the debug control register. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + unsigned long *dr7; + int i; + + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + + if (!*slot) { + *slot = bp; + break; } } - /* No need to set DR6. Update the debug registers with kernel-space - * breakpoint values from kdr7 and user-space requests from the - * current process - */ - kdr7 = temp_kdr7; - set_debugreg(kdr7 | current->thread.debugreg7, 7); - put_cpu(); + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return -EBUSY; + + set_debugreg(info->address, i); + __get_cpu_var(cpu_debugreg[i]) = info->address; + + dr7 = &__get_cpu_var(dr7); + *dr7 |= encode_dr7(i, info->len, info->type); + + set_debugreg(*dr7, 7); + + return 0; } /* - * Install the thread breakpoints in their debug registers. + * Uninstall the breakpoint contained in the given counter. + * + * First we search the debug address register it uses and then we disable + * it. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. */ -void arch_install_thread_hw_breakpoint(struct task_struct *tsk) +void arch_uninstall_hw_breakpoint(struct perf_event *bp) { - struct thread_struct *thread = &(tsk->thread); + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + unsigned long *dr7; + int i; - switch (hbp_kernel_pos) { - case 4: - set_debugreg(thread->debugreg[3], 3); - case 3: - set_debugreg(thread->debugreg[2], 2); - case 2: - set_debugreg(thread->debugreg[1], 1); - case 1: - set_debugreg(thread->debugreg[0], 0); - default: - break; + for (i = 0; i < HBP_NUM; i++) { + struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); + + if (*slot == bp) { + *slot = NULL; + break; + } } - /* No need to set DR6 */ - set_debugreg((kdr7 | thread->debugreg7), 7); -} + if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) + return; -/* - * Install the debug register values for just the kernel, no thread. - */ -void arch_uninstall_thread_hw_breakpoint(void) -{ - /* Clear the user-space portion of debugreg7 by setting only kdr7 */ - set_debugreg(kdr7, 7); + dr7 = &__get_cpu_var(dr7); + *dr7 &= ~encode_dr7(i, info->len, info->type); + set_debugreg(*dr7, 7); } static int get_hbp_len(u8 hbp_len) @@ -133,17 +159,17 @@ static int get_hbp_len(u8 hbp_len) unsigned int len_in_bytes = 0; switch (hbp_len) { - case HW_BREAKPOINT_LEN_1: + case X86_BREAKPOINT_LEN_1: len_in_bytes = 1; break; - case HW_BREAKPOINT_LEN_2: + case X86_BREAKPOINT_LEN_2: len_in_bytes = 2; break; - case HW_BREAKPOINT_LEN_4: + case X86_BREAKPOINT_LEN_4: len_in_bytes = 4; break; #ifdef CONFIG_X86_64 - case HW_BREAKPOINT_LEN_8: + case X86_BREAKPOINT_LEN_8: len_in_bytes = 8; break; #endif @@ -178,67 +204,146 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) /* * Store a breakpoint's encoded address, length, and type. */ -static int arch_store_info(struct hw_breakpoint *bp, struct task_struct *tsk) +static int arch_store_info(struct perf_event *bp) { - /* - * User-space requests will always have the address field populated - * Symbol names from user-space are rejected - */ - if (tsk && bp->info.name) - return -EINVAL; + struct arch_hw_breakpoint *info = counter_arch_bp(bp); /* * For kernel-addresses, either the address or symbol name can be * specified. */ - if (bp->info.name) - bp->info.address = (unsigned long) - kallsyms_lookup_name(bp->info.name); - if (bp->info.address) + if (info->name) + info->address = (unsigned long) + kallsyms_lookup_name(info->name); + if (info->address) return 0; + return -EINVAL; } -/* - * Validate the arch-specific HW Breakpoint register settings - */ -int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, - struct task_struct *tsk) +int arch_bp_generic_fields(int x86_len, int x86_type, + int *gen_len, int *gen_type) { - unsigned int align; - int ret = -EINVAL; - - switch (bp->info.type) { - /* - * Ptrace-refactoring code - * For now, we'll allow instruction breakpoint only for user-space - * addresses - */ - case HW_BREAKPOINT_EXECUTE: - if ((!arch_check_va_in_userspace(bp->info.address, - bp->info.len)) && - bp->info.len != HW_BREAKPOINT_LEN_EXECUTE) - return ret; + /* Len */ + switch (x86_len) { + case X86_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; break; - case HW_BREAKPOINT_WRITE: + case X86_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; break; - case HW_BREAKPOINT_RW: + case X86_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; break; +#ifdef CONFIG_X86_64 + case X86_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; +#endif default: - return ret; + return -EINVAL; } - switch (bp->info.len) { + /* Type */ + switch (x86_type) { + case X86_BREAKPOINT_EXECUTE: + *gen_type = HW_BREAKPOINT_X; + break; + case X86_BREAKPOINT_WRITE: + *gen_type = HW_BREAKPOINT_W; + break; + case X86_BREAKPOINT_RW: + *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; + break; + default: + return -EINVAL; + } + + return 0; +} + + +static int arch_build_bp_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + info->address = bp->attr.bp_addr; + + /* Len */ + switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: - align = 0; + info->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: - align = 1; + info->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: - align = 3; + info->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: + info->len = X86_BREAKPOINT_LEN_8; + break; +#endif + default: + return -EINVAL; + } + + /* Type */ + switch (bp->attr.bp_type) { + case HW_BREAKPOINT_W: + info->type = X86_BREAKPOINT_WRITE; + break; + case HW_BREAKPOINT_W | HW_BREAKPOINT_R: + info->type = X86_BREAKPOINT_RW; + break; + case HW_BREAKPOINT_X: + info->type = X86_BREAKPOINT_EXECUTE; + break; + default: + return -EINVAL; + } + + return 0; +} +/* + * Validate the arch-specific HW Breakpoint register settings + */ +int arch_validate_hwbkpt_settings(struct perf_event *bp, + struct task_struct *tsk) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + unsigned int align; + int ret; + + + ret = arch_build_bp_info(bp); + if (ret) + return ret; + + ret = -EINVAL; + + if (info->type == X86_BREAKPOINT_EXECUTE) + /* + * Ptrace-refactoring code + * For now, we'll allow instruction breakpoint only for user-space + * addresses + */ + if ((!arch_check_va_in_userspace(info->address, info->len)) && + info->len != X86_BREAKPOINT_EXECUTE) + return ret; + + switch (info->len) { + case X86_BREAKPOINT_LEN_1: + align = 0; + break; + case X86_BREAKPOINT_LEN_2: + align = 1; + break; + case X86_BREAKPOINT_LEN_4: + align = 3; + break; +#ifdef CONFIG_X86_64 + case X86_BREAKPOINT_LEN_8: align = 7; break; #endif @@ -246,8 +351,8 @@ int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, return ret; } - if (bp->triggered) - ret = arch_store_info(bp, tsk); + if (bp->callback) + ret = arch_store_info(bp); if (ret < 0) return ret; @@ -255,45 +360,48 @@ int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ - if (bp->info.address & align) + if (info->address & align) return -EINVAL; /* Check that the virtual address is in the proper range */ if (tsk) { - if (!arch_check_va_in_userspace(bp->info.address, bp->info.len)) + if (!arch_check_va_in_userspace(info->address, info->len)) return -EFAULT; } else { - if (!arch_check_va_in_kernelspace(bp->info.address, - bp->info.len)) + if (!arch_check_va_in_kernelspace(info->address, info->len)) return -EFAULT; } + return 0; } -void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk) -{ - struct thread_struct *thread = &(tsk->thread); - struct hw_breakpoint *bp = thread->hbp[pos]; - - thread->debugreg7 &= ~dr7_masks[pos]; - if (bp) { - thread->debugreg[pos] = bp->info.address; - thread->debugreg7 |= encode_dr7(pos, bp->info.len, - bp->info.type); - } else - thread->debugreg[pos] = 0; -} - -void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) +/* + * Release the user breakpoints used by ptrace + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; - struct thread_struct *thread = &(tsk->thread); + struct thread_struct *t = &tsk->thread; - thread->debugreg7 = 0; - for (i = 0; i < HBP_NUM; i++) - thread->debugreg[i] = 0; + for (i = 0; i < HBP_NUM; i++) { + unregister_hw_breakpoint(t->ptrace_bps[i]); + t->ptrace_bps[i] = NULL; + } } +#ifdef CONFIG_KVM +void hw_breakpoint_restore(void) +{ + set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); + set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1); + set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); + set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); + set_debugreg(current->thread.debugreg6, 6); + set_debugreg(__get_cpu_var(dr7), 7); +} +EXPORT_SYMBOL_GPL(hw_breakpoint_restore); +#endif + /* * Handle debug exception notifications. * @@ -313,7 +421,7 @@ void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) static int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; - struct hw_breakpoint *bp; + struct perf_event *bp; unsigned long dr7, dr6; unsigned long *dr6_p; @@ -325,10 +433,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; - /* Lazy debug register switching */ - if (!test_tsk_thread_flag(current, TIF_DEBUG)) - arch_uninstall_thread_hw_breakpoint(); - get_debugreg(dr7, 7); /* Disable breakpoints during exception handling */ set_debugreg(0UL, 7); @@ -344,17 +448,18 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) for (i = 0; i < HBP_NUM; ++i) { if (likely(!(dr6 & (DR_TRAP0 << i)))) continue; + /* - * Find the corresponding hw_breakpoint structure and - * invoke its triggered callback. + * The counter may be concurrently released but that can only + * occur from a call_rcu() path. We can then safely fetch + * the breakpoint, use its callback, touch its counter + * while we are in an rcu_read_lock() path. */ - if (i >= hbp_kernel_pos) - bp = per_cpu(this_hbp_kernel[i], cpu); - else { - bp = current->thread.hbp[i]; - if (bp) - rc = NOTIFY_DONE; - } + rcu_read_lock(); + + bp = per_cpu(bp_per_reg[i], cpu); + if (bp) + rc = NOTIFY_DONE; /* * Reset the 'i'th TRAP bit in dr6 to denote completion of * exception handling @@ -362,19 +467,23 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) (*dr6_p) &= ~(DR_TRAP0 << i); /* * bp can be NULL due to lazy debug register switching - * or due to the delay between updates of hbp_kernel_pos - * and this_hbp_kernel. + * or due to concurrent perf counter removing. */ - if (!bp) - continue; + if (!bp) { + rcu_read_unlock(); + break; + } - (bp->triggered)(bp, args->regs); + (bp->callback)(bp, args->regs); + + rcu_read_unlock(); } if (dr6 & (~DR_TRAP_BITS)) rc = NOTIFY_DONE; set_debugreg(dr7, 7); put_cpu(); + return rc; } @@ -389,3 +498,13 @@ int __kprobes hw_breakpoint_exceptions_notify( return hw_breakpoint_handler(data); } + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ + /* TODO */ +} + +void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) +{ + /* TODO */ +} diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index cf8ee001630..744508e7cfd 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -18,7 +19,6 @@ #include #include #include -#include unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); @@ -47,8 +47,6 @@ void free_thread_xstate(struct task_struct *tsk) kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); tsk->thread.xstate = NULL; } - if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) - flush_thread_hw_breakpoint(tsk); WARN(tsk->thread.ds_ctx, "leaking DS context\n"); } @@ -107,8 +105,7 @@ void flush_thread(void) } #endif - if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) - flush_thread_hw_breakpoint(tsk); + flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* * Forget coprocessor state.. diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 209e7480176..d5bd3132ee7 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -59,7 +59,6 @@ #include #include #include -#include asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -264,9 +263,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; - if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) - if (copy_thread_hw_breakpoint(tsk, p, clone_flags)) - goto out; + + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, @@ -287,13 +285,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); -out: if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } - if (err) - flush_thread_hw_breakpoint(p); clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); p->thread.ds_ctx = NULL; @@ -437,23 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) lazy_load_gs(next->gs); percpu_write(current_task, next_p); - /* - * There's a problem with moving the arch_install_thread_hw_breakpoint() - * call before current is updated. Suppose a kernel breakpoint is - * triggered in between the two, the hw-breakpoint handler will see that - * the 'current' task does not have TIF_DEBUG flag set and will think it - * is leftover from an old task (lazy switching) and will erase it. Then - * until the next context switch, no user-breakpoints will be installed. - * - * The real problem is that it's impossible to update both current and - * physical debug registers at the same instant, so there will always be - * a window in which they disagree and a breakpoint might get triggered. - * Since we use lazy switching, we are forced to assume that a - * disagreement means that current is correct and the exception is due - * to lazy debug register switching. - */ - if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) - arch_install_thread_hw_breakpoint(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 72edac026a7..5bafdec3444 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -53,7 +53,6 @@ #include #include #include -#include asmlinkage extern void ret_from_fork(void); @@ -244,8 +243,6 @@ void release_thread(struct task_struct *dead_task) BUG(); } } - if (unlikely(dead_task->thread.debugreg7)) - flush_thread_hw_breakpoint(dead_task); } static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) @@ -309,9 +306,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, savesegment(ds, p->thread.ds); err = -ENOMEM; - if (unlikely(test_tsk_thread_flag(me, TIF_DEBUG))) - if (copy_thread_hw_breakpoint(me, p, clone_flags)) - goto out; + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); @@ -351,8 +346,6 @@ out: kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } - if (err) - flush_thread_hw_breakpoint(p); return err; } @@ -508,23 +501,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ if (preload_fpu) __math_state_restore(); - /* - * There's a problem with moving the arch_install_thread_hw_breakpoint() - * call before current is updated. Suppose a kernel breakpoint is - * triggered in between the two, the hw-breakpoint handler will see that - * the 'current' task does not have TIF_DEBUG flag set and will think it - * is leftover from an old task (lazy switching) and will erase it. Then - * until the next context switch, no user-breakpoints will be installed. - * - * The real problem is that it's impossible to update both current and - * physical debug registers at the same instant, so there will always be - * a window in which they disagree and a breakpoint might get triggered. - * Since we use lazy switching, we are forced to assume that a - * disagreement means that current is correct and the exception is due - * to lazy debug register switching. - */ - if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) - arch_install_thread_hw_breakpoint(next_p); return prev_p; } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 267cb85b479..e79610d9597 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include @@ -441,54 +443,59 @@ static int genregs_set(struct task_struct *target, return ret; } -/* - * Decode the length and type bits for a particular breakpoint as - * stored in debug register 7. Return the "enabled" status. - */ -static int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, - unsigned *type) +static void ptrace_triggered(struct perf_event *bp, void *data) { - int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); - - *len = (bp_info & 0xc) | 0x40; - *type = (bp_info & 0x3) | 0x80; - return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; -} - -static void ptrace_triggered(struct hw_breakpoint *bp, struct pt_regs *regs) -{ - struct thread_struct *thread = &(current->thread); int i; + struct thread_struct *thread = &(current->thread); /* * Store in the virtual DR6 register the fact that the breakpoint * was hit so the thread's debugger will see it. */ - for (i = 0; i < hbp_kernel_pos; i++) - /* - * We will check bp->info.address against the address stored in - * thread's hbp structure and not debugreg[i]. This is to ensure - * that the corresponding bit for 'i' in DR7 register is enabled - */ - if (bp->info.address == thread->hbp[i]->info.address) + for (i = 0; i < HBP_NUM; i++) { + if (thread->ptrace_bps[i] == bp) break; + } thread->debugreg6 |= (DR_TRAP0 << i); } +/* + * Walk through every ptrace breakpoints for this thread and + * build the dr7 value on top of their attributes. + * + */ +static unsigned long ptrace_get_dr7(struct perf_event *bp[]) +{ + int i; + int dr7 = 0; + struct arch_hw_breakpoint *info; + + for (i = 0; i < HBP_NUM; i++) { + if (bp[i] && !bp[i]->attr.disabled) { + info = counter_arch_bp(bp[i]); + dr7 |= encode_dr7(i, info->len, info->type); + } + } + + return dr7; +} + /* * Handle ptrace writes to debug register 7. */ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) { struct thread_struct *thread = &(tsk->thread); - unsigned long old_dr7 = thread->debugreg7; + unsigned long old_dr7; int i, orig_ret = 0, rc = 0; int enabled, second_pass = 0; unsigned len, type; - struct hw_breakpoint *bp; + int gen_len, gen_type; + struct perf_event *bp; data &= ~DR_CONTROL_RESERVED; + old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: /* * Loop through all the hardware breakpoints, making the @@ -496,11 +503,12 @@ restore: */ for (i = 0; i < HBP_NUM; i++) { enabled = decode_dr7(data, i, &len, &type); - bp = thread->hbp[i]; + bp = thread->ptrace_bps[i]; if (!enabled) { if (bp) { - /* Don't unregister the breakpoints right-away, + /* + * Don't unregister the breakpoints right-away, * unless all register_user_hw_breakpoint() * requests have succeeded. This prevents * any window of opportunity for debug @@ -508,27 +516,45 @@ restore: */ if (!second_pass) continue; - unregister_user_hw_breakpoint(tsk, bp); - kfree(bp); + thread->ptrace_bps[i] = NULL; + unregister_hw_breakpoint(bp); } continue; } + + /* + * We shoud have at least an inactive breakpoint at this + * slot. It means the user is writing dr7 without having + * written the address register first + */ if (!bp) { - rc = -ENOMEM; - bp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); - if (bp) { - bp->info.address = thread->debugreg[i]; - bp->triggered = ptrace_triggered; - bp->info.len = len; - bp->info.type = type; - rc = register_user_hw_breakpoint(tsk, bp); - if (rc) - kfree(bp); - } - } else - rc = modify_user_hw_breakpoint(tsk, bp); + rc = -EINVAL; + break; + } + + rc = arch_bp_generic_fields(len, type, &gen_len, &gen_type); if (rc) break; + + /* + * This is a temporary thing as bp is unregistered/registered + * to simulate modification + */ + bp = modify_user_hw_breakpoint(bp, bp->attr.bp_addr, gen_len, + gen_type, bp->callback, + tsk, true); + thread->ptrace_bps[i] = NULL; + + if (!bp) { /* incorrect bp, or we have a bug in bp API */ + rc = -EINVAL; + break; + } + if (IS_ERR(bp)) { + rc = PTR_ERR(bp); + bp = NULL; + break; + } + thread->ptrace_bps[i] = bp; } /* * Make a second pass to free the remaining unused breakpoints @@ -553,15 +579,63 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) struct thread_struct *thread = &(tsk->thread); unsigned long val = 0; - if (n < HBP_NUM) - val = thread->debugreg[n]; - else if (n == 6) + if (n < HBP_NUM) { + struct perf_event *bp; + bp = thread->ptrace_bps[n]; + if (!bp) + return 0; + val = bp->hw.info.address; + } else if (n == 6) { val = thread->debugreg6; - else if (n == 7) - val = thread->debugreg7; + } else if (n == 7) { + val = ptrace_get_dr7(thread->ptrace_bps); + } return val; } +static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, + unsigned long addr) +{ + struct perf_event *bp; + struct thread_struct *t = &tsk->thread; + + if (!t->ptrace_bps[nr]) { + /* + * Put stub len and type to register (reserve) an inactive but + * correct bp + */ + bp = register_user_hw_breakpoint(addr, HW_BREAKPOINT_LEN_1, + HW_BREAKPOINT_W, + ptrace_triggered, tsk, + false); + } else { + bp = t->ptrace_bps[nr]; + t->ptrace_bps[nr] = NULL; + bp = modify_user_hw_breakpoint(bp, addr, bp->attr.bp_len, + bp->attr.bp_type, + bp->callback, + tsk, + bp->attr.disabled); + } + + if (!bp) + return -EIO; + /* + * CHECKME: the previous code returned -EIO if the addr wasn't a + * valid task virtual addr. The new one will return -EINVAL in this + * case. + * -EINVAL may be what we want for in-kernel breakpoints users, but + * -EIO looks better for ptrace, since we refuse a register writing + * for the user. And anyway this is the previous behaviour. + */ + if (IS_ERR(bp)) + return PTR_ERR(bp); + + t->ptrace_bps[nr] = bp; + + return 0; +} + /* * Handle PTRACE_POKEUSR calls for the debug register area. */ @@ -575,19 +649,13 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) return -EIO; if (n == 6) { - tsk->thread.debugreg6 = val; + thread->debugreg6 = val; goto ret_path; } if (n < HBP_NUM) { - if (thread->hbp[n]) { - if (arch_check_va_in_userspace(val, - thread->hbp[n]->info.len) == 0) { - rc = -EIO; - goto ret_path; - } - thread->hbp[n]->info.address = val; - } - thread->debugreg[n] = val; + rc = ptrace_set_breakpoint_addr(tsk, n, val); + if (rc) + return rc; } /* All that's left is DR7 */ if (n == 7) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 213a7a3e456..565ebc65920 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include @@ -328,7 +327,6 @@ notrace static void __cpuinit start_secondary(void *unused) x86_cpuinit.setup_percpu_clockev(); wmb(); - load_debug_registers(); cpu_idle(); } @@ -1269,7 +1267,6 @@ void cpu_disable_common(void) remove_cpu_from_maps(cpu); unlock_vector_lock(); fixup_irqs(); - hw_breakpoint_disable(); } int native_cpu_disable(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index fc2974adf9b..22dee7aa781 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -42,6 +42,7 @@ #define CREATE_TRACE_POINTS #include "trace.h" +#include #include #include #include @@ -3643,14 +3644,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu, kvm_run); - if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) { - set_debugreg(current->thread.debugreg[0], 0); - set_debugreg(current->thread.debugreg[1], 1); - set_debugreg(current->thread.debugreg[2], 2); - set_debugreg(current->thread.debugreg[3], 3); - set_debugreg(current->thread.debugreg6, 6); - set_debugreg(current->thread.debugreg7, 7); - } + /* + * If the guest has used debug registers, at least dr7 + * will be disabled while returning to the host. + * If we don't have active breakpoints in the host, we don't + * care about the messed up debug address registers. But if + * we have some of them active, restore the old state. + */ + if (__get_cpu_var(dr7) & DR_GLOBAL_ENABLE_MASK) + hw_breakpoint_restore(); set_bit(KVM_REQ_KICK, &vcpu->requests); local_irq_enable(); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index e09a44fc466..0a979f3e5b8 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -105,7 +105,6 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr4 = read_cr4(); ctxt->cr8 = read_cr8(); #endif - hw_breakpoint_disable(); } /* Needed by apm.c */ @@ -144,11 +143,6 @@ static void fix_processor_context(void) #endif load_TR_desc(); /* This does ltr */ load_LDT(¤t->active_mm->context); /* This does lldt */ - - /* - * Now maybe reload the debug registers - */ - load_debug_registers(); } /** diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 61ccc8f17ea..7eba9b92e5f 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -1,136 +1,131 @@ #ifndef _LINUX_HW_BREAKPOINT_H #define _LINUX_HW_BREAKPOINT_H +#include -#ifdef __KERNEL__ -#include -#include -#include - -/** - * struct hw_breakpoint - unified kernel/user-space hardware breakpoint - * @triggered: callback invoked after target address access - * @info: arch-specific breakpoint info (address, length, and type) - * - * %hw_breakpoint structures are the kernel's way of representing - * hardware breakpoints. These are data breakpoints - * (also known as "watchpoints", triggered on data access), and the breakpoint's - * target address can be located in either kernel space or user space. - * - * The breakpoint's address, length, and type are highly - * architecture-specific. The values are encoded in the @info field; you - * specify them when registering the breakpoint. To examine the encoded - * values use hw_breakpoint_get_{kaddress,uaddress,len,type}(), declared - * below. - * - * The address is specified as a regular kernel pointer (for kernel-space - * breakponts) or as an %__user pointer (for user-space breakpoints). - * With register_user_hw_breakpoint(), the address must refer to a - * location in user space. The breakpoint will be active only while the - * requested task is running. Conversely with - * register_kernel_hw_breakpoint(), the address must refer to a location - * in kernel space, and the breakpoint will be active on all CPUs - * regardless of the current task. - * - * The length is the breakpoint's extent in bytes, which is subject to - * certain limitations. include/asm/hw_breakpoint.h contains macros - * defining the available lengths for a specific architecture. Note that - * the address's alignment must match the length. The breakpoint will - * catch accesses to any byte in the range from address to address + - * (length - 1). - * - * The breakpoint's type indicates the sort of access that will cause it - * to trigger. Possible values may include: - * - * %HW_BREAKPOINT_RW (triggered on read or write access), - * %HW_BREAKPOINT_WRITE (triggered on write access), and - * %HW_BREAKPOINT_READ (triggered on read access). - * - * Appropriate macros are defined in include/asm/hw_breakpoint.h; not all - * possibilities are available on all architectures. Execute breakpoints - * must have length equal to the special value %HW_BREAKPOINT_LEN_EXECUTE. - * - * When a breakpoint gets hit, the @triggered callback is - * invoked in_interrupt with a pointer to the %hw_breakpoint structure and the - * processor registers. - * Data breakpoints occur after the memory access has taken place. - * Breakpoints are disabled during execution @triggered, to avoid - * recursive traps and allow unhindered access to breakpointed memory. - * - * This sample code sets a breakpoint on pid_max and registers a callback - * function for writes to that variable. Note that it is not portable - * as written, because not all architectures support HW_BREAKPOINT_LEN_4. - * - * ---------------------------------------------------------------------- - * - * #include - * - * struct hw_breakpoint my_bp; - * - * static void my_triggered(struct hw_breakpoint *bp, struct pt_regs *regs) - * { - * printk(KERN_DEBUG "Inside triggered routine of breakpoint exception\n"); - * dump_stack(); - * ............... - * } - * - * static struct hw_breakpoint my_bp; - * - * static int init_module(void) - * { - * ...................... - * my_bp.info.type = HW_BREAKPOINT_WRITE; - * my_bp.info.len = HW_BREAKPOINT_LEN_4; - * - * my_bp.installed = (void *)my_bp_installed; - * - * rc = register_kernel_hw_breakpoint(&my_bp); - * ...................... - * } - * - * static void cleanup_module(void) - * { - * ...................... - * unregister_kernel_hw_breakpoint(&my_bp); - * ...................... - * } - * - * ---------------------------------------------------------------------- - */ -struct hw_breakpoint { - void (*triggered)(struct hw_breakpoint *, struct pt_regs *); - struct arch_hw_breakpoint info; +enum { + HW_BREAKPOINT_LEN_1 = 1, + HW_BREAKPOINT_LEN_2 = 2, + HW_BREAKPOINT_LEN_4 = 4, + HW_BREAKPOINT_LEN_8 = 8, }; -/* - * len and type values are defined in include/asm/hw_breakpoint.h. - * Available values vary according to the architecture. On i386 the - * possibilities are: - * - * HW_BREAKPOINT_LEN_1 - * HW_BREAKPOINT_LEN_2 - * HW_BREAKPOINT_LEN_4 - * HW_BREAKPOINT_RW - * HW_BREAKPOINT_READ - * - * On other architectures HW_BREAKPOINT_LEN_8 may be available, and the - * 1-, 2-, and 4-byte lengths may be unavailable. There also may be - * HW_BREAKPOINT_WRITE. You can use #ifdef to check at compile time. - */ +enum { + HW_BREAKPOINT_R = 1, + HW_BREAKPOINT_W = 2, + HW_BREAKPOINT_X = 4, +}; + +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return &bp->hw.info; +} + +static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) +{ + return bp->attr.bp_addr; +} + +static inline int hw_breakpoint_type(struct perf_event *bp) +{ + return bp->attr.bp_type; +} + +static inline int hw_breakpoint_len(struct perf_event *bp) +{ + return bp->attr.bp_len; +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern struct perf_event * +register_user_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active); + +/* FIXME: only change from the attr, and don't unregister */ +extern struct perf_event * +modify_user_hw_breakpoint(struct perf_event *bp, + unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active); -extern int register_user_hw_breakpoint(struct task_struct *tsk, - struct hw_breakpoint *bp); -extern int modify_user_hw_breakpoint(struct task_struct *tsk, - struct hw_breakpoint *bp); -extern void unregister_user_hw_breakpoint(struct task_struct *tsk, - struct hw_breakpoint *bp); /* * Kernel breakpoints are not associated with any particular thread. */ -extern int register_kernel_hw_breakpoint(struct hw_breakpoint *bp); -extern void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp); +extern struct perf_event * +register_wide_hw_breakpoint_cpu(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + int cpu, + bool active); -extern unsigned int hbp_kernel_pos; +extern struct perf_event ** +register_wide_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + bool active); -#endif /* __KERNEL__ */ -#endif /* _LINUX_HW_BREAKPOINT_H */ +extern int register_perf_hw_breakpoint(struct perf_event *bp); +extern int __register_perf_hw_breakpoint(struct perf_event *bp); +extern void unregister_hw_breakpoint(struct perf_event *bp); +extern void unregister_wide_hw_breakpoint(struct perf_event **cpu_events); + +extern int reserve_bp_slot(struct perf_event *bp); +extern void release_bp_slot(struct perf_event *bp); + +extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); + +#else /* !CONFIG_HAVE_HW_BREAKPOINT */ + +static inline struct perf_event * +register_user_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active) { return NULL; } +static inline struct perf_event * +modify_user_hw_breakpoint(struct perf_event *bp, + unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active) { return NULL; } +static inline struct perf_event * +register_wide_hw_breakpoint_cpu(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + int cpu, + bool active) { return NULL; } +static inline struct perf_event ** +register_wide_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + bool active) { return NULL; } +static inline int +register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline int +__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } +static inline void unregister_hw_breakpoint(struct perf_event *bp) { } +static inline void +unregister_wide_hw_breakpoint(struct perf_event **cpu_events) { } +static inline int +reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } +static inline void release_bp_slot(struct perf_event *bp) { } + +static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } + +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +#endif /* _LINUX_HW_BREAKPOINT_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8d54e6d25ee..cead64ea6c1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -18,6 +18,10 @@ #include #include +#ifdef CONFIG_HAVE_HW_BREAKPOINT +#include +#endif + /* * User-space ABI bits: */ @@ -31,6 +35,7 @@ enum perf_type_id { PERF_TYPE_TRACEPOINT = 2, PERF_TYPE_HW_CACHE = 3, PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, PERF_TYPE_MAX, /* non-ABI */ }; @@ -207,6 +212,15 @@ struct perf_event_attr { __u32 wakeup_events; /* wakeup every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; + + union { + struct { /* Hardware breakpoint info */ + __u64 bp_addr; + __u32 bp_type; + __u32 bp_len; + }; + }; + __u32 __reserved_2; __u64 __reserved_3; @@ -476,6 +490,11 @@ struct hw_perf_event { atomic64_t count; struct hrtimer hrtimer; }; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + union { /* breakpoint */ + struct arch_hw_breakpoint info; + }; +#endif }; atomic64_t prev_count; u64 sample_period; @@ -588,7 +607,7 @@ struct perf_event { u64 tstamp_running; u64 tstamp_stopped; - struct perf_event_attr attr; + struct perf_event_attr attr; struct hw_perf_event hw; struct perf_event_context *ctx; @@ -643,6 +662,8 @@ struct perf_event { perf_callback_t callback; + perf_callback_t event_callback; + #endif /* CONFIG_PERF_EVENTS */ }; @@ -831,6 +852,7 @@ extern int sysctl_perf_event_sample_rate; extern void perf_event_init(void); extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); +extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ @@ -865,6 +887,8 @@ static inline int perf_event_task_enable(void) { return -EINVAL; } static inline void perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { } +static inline void +perf_bp_event(struct perf_event *event, void *data) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } diff --git a/kernel/exit.c b/kernel/exit.c index e61891f8012..266f8920628 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -979,6 +980,10 @@ NORET_TYPE void do_exit(long code) proc_exit_connector(tsk); + /* + * FIXME: do that only when needed, using sched_exit tracepoint + */ + flush_ptrace_hw_breakpoint(tsk); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index c1f64e65a9f..08f6d016320 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -15,6 +15,7 @@ * * Copyright (C) 2007 Alan Stern * Copyright (C) IBM Corporation, 2009 + * Copyright (C) 2009, Frederic Weisbecker */ /* @@ -35,334 +36,242 @@ #include #include -#include +#include + #include #ifdef CONFIG_X86 #include #endif -/* - * Spinlock that protects all (un)register operations over kernel/user-space - * breakpoint requests - */ -static DEFINE_SPINLOCK(hw_breakpoint_lock); -/* Array of kernel-space breakpoint structures */ -struct hw_breakpoint *hbp_kernel[HBP_NUM]; +static atomic_t bp_slot; -/* - * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being - * modified but we need the older copy to handle any hbp exceptions. It will - * sync with hbp_kernel[] value after updation is done through IPIs. - */ -DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); - -/* - * Kernel breakpoints grow downwards, starting from HBP_NUM - * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for - * kernel-space request. We will initialise it here and not in an __init - * routine because load_debug_registers(), which uses this variable can be - * called very early during CPU initialisation. - */ -unsigned int hbp_kernel_pos = HBP_NUM; - -/* - * An array containing refcount of threads using a given bkpt register - * Accesses are synchronised by acquiring hw_breakpoint_lock - */ -unsigned int hbp_user_refcount[HBP_NUM]; - -/* - * Load the debug registers during startup of a CPU. - */ -void load_debug_registers(void) +int reserve_bp_slot(struct perf_event *bp) { - unsigned long flags; - struct task_struct *tsk = current; + if (atomic_inc_return(&bp_slot) == HBP_NUM) { + atomic_dec(&bp_slot); - spin_lock_bh(&hw_breakpoint_lock); - - /* Prevent IPIs for new kernel breakpoint updates */ - local_irq_save(flags); - arch_update_kernel_hw_breakpoint(NULL); - local_irq_restore(flags); - - if (test_tsk_thread_flag(tsk, TIF_DEBUG)) - arch_install_thread_hw_breakpoint(tsk); - - spin_unlock_bh(&hw_breakpoint_lock); -} - -/* - * Erase all the hardware breakpoint info associated with a thread. - * - * If tsk != current then tsk must not be usable (for example, a - * child being cleaned up from a failed fork). - */ -void flush_thread_hw_breakpoint(struct task_struct *tsk) -{ - int i; - struct thread_struct *thread = &(tsk->thread); - - spin_lock_bh(&hw_breakpoint_lock); - - /* The thread no longer has any breakpoints associated with it */ - clear_tsk_thread_flag(tsk, TIF_DEBUG); - for (i = 0; i < HBP_NUM; i++) { - if (thread->hbp[i]) { - hbp_user_refcount[i]--; - kfree(thread->hbp[i]); - thread->hbp[i] = NULL; - } + return -ENOSPC; } - arch_flush_thread_hw_breakpoint(tsk); - - /* Actually uninstall the breakpoints if necessary */ - if (tsk == current) - arch_uninstall_thread_hw_breakpoint(); - spin_unlock_bh(&hw_breakpoint_lock); -} - -/* - * Copy the hardware breakpoint info from a thread to its cloned child. - */ -int copy_thread_hw_breakpoint(struct task_struct *tsk, - struct task_struct *child, unsigned long clone_flags) -{ - /* - * We will assume that breakpoint settings are not inherited - * and the child starts out with no debug registers set. - * But what about CLONE_PTRACE? - */ - clear_tsk_thread_flag(child, TIF_DEBUG); - - /* We will call flush routine since the debugregs are not inherited */ - arch_flush_thread_hw_breakpoint(child); - return 0; } -static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk, - struct hw_breakpoint *bp) +void release_bp_slot(struct perf_event *bp) { - struct thread_struct *thread = &(tsk->thread); - int rc; + atomic_dec(&bp_slot); +} - /* Do not overcommit. Fail if kernel has used the hbp registers */ - if (pos >= hbp_kernel_pos) - return -ENOSPC; +int __register_perf_hw_breakpoint(struct perf_event *bp) +{ + int ret; - rc = arch_validate_hwbkpt_settings(bp, tsk); - if (rc) - return rc; + ret = reserve_bp_slot(bp); + if (ret) + return ret; - thread->hbp[pos] = bp; - hbp_user_refcount[pos]++; + if (!bp->attr.disabled) + ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); - arch_update_user_hw_breakpoint(pos, tsk); - /* - * Does it need to be installed right now? - * Otherwise it will get installed the next time tsk runs - */ - if (tsk == current) - arch_install_thread_hw_breakpoint(tsk); + return ret; +} - return rc; +int register_perf_hw_breakpoint(struct perf_event *bp) +{ + bp->callback = perf_bp_event; + + return __register_perf_hw_breakpoint(bp); } /* - * Modify the address of a hbp register already in use by the task - * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint() + * Register a breakpoint bound to a task and a given cpu. + * If cpu is -1, the breakpoint is active for the task in every cpu + * If the task is -1, the breakpoint is active for every tasks in the given + * cpu. */ -static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk, - struct hw_breakpoint *bp) +static struct perf_event * +register_user_hw_breakpoint_cpu(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + pid_t pid, + int cpu, + bool active) { - struct thread_struct *thread = &(tsk->thread); + struct perf_event_attr *attr; + struct perf_event *bp; - if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk))) - return -EINVAL; + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + if (!attr) + return ERR_PTR(-ENOMEM); - if (thread->hbp[pos] == NULL) - return -EINVAL; - - thread->hbp[pos] = bp; + attr->type = PERF_TYPE_BREAKPOINT; + attr->size = sizeof(*attr); + attr->bp_addr = addr; + attr->bp_len = len; + attr->bp_type = type; /* - * 'pos' must be that of a hbp register already used by 'tsk' - * Otherwise arch_modify_user_hw_breakpoint() will fail + * Such breakpoints are used by debuggers to trigger signals when + * we hit the excepted memory op. We can't miss such events, they + * must be pinned. */ - arch_update_user_hw_breakpoint(pos, tsk); + attr->pinned = 1; - if (tsk == current) - arch_install_thread_hw_breakpoint(tsk); + if (!active) + attr->disabled = 1; - return 0; -} + bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered); + kfree(attr); -static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk) -{ - hbp_user_refcount[pos]--; - tsk->thread.hbp[pos] = NULL; - - arch_update_user_hw_breakpoint(pos, tsk); - - if (tsk == current) - arch_install_thread_hw_breakpoint(tsk); + return bp; } /** * register_user_hw_breakpoint - register a hardware breakpoint for user space + * @addr: is the memory address that triggers the breakpoint + * @len: the length of the access to the memory (1 byte, 2 bytes etc...) + * @type: the type of the access to the memory (read/write/exec) + * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs - * @bp: the breakpoint structure to register - * - * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and - * @bp->triggered must be set properly before invocation + * @active: should we activate it while registering it * */ -int register_user_hw_breakpoint(struct task_struct *tsk, - struct hw_breakpoint *bp) +struct perf_event * +register_user_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active) { - struct thread_struct *thread = &(tsk->thread); - int i, rc = -ENOSPC; - - spin_lock_bh(&hw_breakpoint_lock); - - for (i = 0; i < hbp_kernel_pos; i++) { - if (!thread->hbp[i]) { - rc = __register_user_hw_breakpoint(i, tsk, bp); - break; - } - } - if (!rc) - set_tsk_thread_flag(tsk, TIF_DEBUG); - - spin_unlock_bh(&hw_breakpoint_lock); - return rc; + return register_user_hw_breakpoint_cpu(addr, len, type, triggered, + tsk->pid, -1, active); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); /** * modify_user_hw_breakpoint - modify a user-space hardware breakpoint + * @bp: the breakpoint structure to modify + * @addr: is the memory address that triggers the breakpoint + * @len: the length of the access to the memory (1 byte, 2 bytes etc...) + * @type: the type of the access to the memory (read/write/exec) + * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs - * @bp: the breakpoint structure to unregister - * + * @active: should we activate it while registering it */ -int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp) +struct perf_event * +modify_user_hw_breakpoint(struct perf_event *bp, + unsigned long addr, + int len, + int type, + perf_callback_t triggered, + struct task_struct *tsk, + bool active) { - struct thread_struct *thread = &(tsk->thread); - int i, ret = -ENOENT; + /* + * FIXME: do it without unregistering + * - We don't want to lose our slot + * - If the new bp is incorrect, don't lose the older one + */ + unregister_hw_breakpoint(bp); - spin_lock_bh(&hw_breakpoint_lock); - for (i = 0; i < hbp_kernel_pos; i++) { - if (bp == thread->hbp[i]) { - ret = __modify_user_hw_breakpoint(i, tsk, bp); - break; - } - } - spin_unlock_bh(&hw_breakpoint_lock); - return ret; + return register_user_hw_breakpoint(addr, len, type, triggered, + tsk, active); } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); /** - * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint - * @tsk: pointer to 'task_struct' of the process to which the address belongs + * unregister_hw_breakpoint - unregister a user-space hardware breakpoint * @bp: the breakpoint structure to unregister - * */ -void unregister_user_hw_breakpoint(struct task_struct *tsk, - struct hw_breakpoint *bp) +void unregister_hw_breakpoint(struct perf_event *bp) { - struct thread_struct *thread = &(tsk->thread); - int i, pos = -1, hbp_counter = 0; - - spin_lock_bh(&hw_breakpoint_lock); - for (i = 0; i < hbp_kernel_pos; i++) { - if (thread->hbp[i]) - hbp_counter++; - if (bp == thread->hbp[i]) - pos = i; - } - if (pos >= 0) { - __unregister_user_hw_breakpoint(pos, tsk); - hbp_counter--; - } - if (!hbp_counter) - clear_tsk_thread_flag(tsk, TIF_DEBUG); - - spin_unlock_bh(&hw_breakpoint_lock); -} -EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint); - -/** - * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space - * @bp: the breakpoint structure to register - * - * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and - * @bp->triggered must be set properly before invocation - * - */ -int register_kernel_hw_breakpoint(struct hw_breakpoint *bp) -{ - int rc; - - rc = arch_validate_hwbkpt_settings(bp, NULL); - if (rc) - return rc; - - spin_lock_bh(&hw_breakpoint_lock); - - rc = -ENOSPC; - /* Check if we are over-committing */ - if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) { - hbp_kernel_pos--; - hbp_kernel[hbp_kernel_pos] = bp; - on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); - rc = 0; - } - - spin_unlock_bh(&hw_breakpoint_lock); - return rc; -} -EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint); - -/** - * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space - * @bp: the breakpoint structure to unregister - * - * Uninstalls and unregisters @bp. - */ -void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp) -{ - int i, j; - - spin_lock_bh(&hw_breakpoint_lock); - - /* Find the 'bp' in our list of breakpoints for kernel */ - for (i = hbp_kernel_pos; i < HBP_NUM; i++) - if (bp == hbp_kernel[i]) - break; - - /* Check if we did not find a match for 'bp'. If so return early */ - if (i == HBP_NUM) { - spin_unlock_bh(&hw_breakpoint_lock); + if (!bp) return; + perf_event_release_kernel(bp); +} +EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); + +static struct perf_event * +register_kernel_hw_breakpoint_cpu(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + int cpu, + bool active) +{ + return register_user_hw_breakpoint_cpu(addr, len, type, triggered, + -1, cpu, active); +} + +/** + * register_wide_hw_breakpoint - register a wide breakpoint in the kernel + * @addr: is the memory address that triggers the breakpoint + * @len: the length of the access to the memory (1 byte, 2 bytes etc...) + * @type: the type of the access to the memory (read/write/exec) + * @triggered: callback to trigger when we hit the breakpoint + * @active: should we activate it while registering it + * + * @return a set of per_cpu pointers to perf events + */ +struct perf_event ** +register_wide_hw_breakpoint(unsigned long addr, + int len, + int type, + perf_callback_t triggered, + bool active) +{ + struct perf_event **cpu_events, **pevent, *bp; + long err; + int cpu; + + cpu_events = alloc_percpu(typeof(*cpu_events)); + if (!cpu_events) + return ERR_PTR(-ENOMEM); + + for_each_possible_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + bp = register_kernel_hw_breakpoint_cpu(addr, len, type, + triggered, cpu, active); + + *pevent = bp; + + if (IS_ERR(bp) || !bp) { + err = PTR_ERR(bp); + goto fail; + } } - /* - * We'll shift the breakpoints one-level above to compact if - * unregistration creates a hole - */ - for (j = i; j > hbp_kernel_pos; j--) - hbp_kernel[j] = hbp_kernel[j-1]; + return cpu_events; - hbp_kernel[hbp_kernel_pos] = NULL; - on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1); - hbp_kernel_pos++; - - spin_unlock_bh(&hw_breakpoint_lock); +fail: + for_each_possible_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + if (IS_ERR(*pevent) || !*pevent) + break; + unregister_hw_breakpoint(*pevent); + } + free_percpu(cpu_events); + /* return the error if any */ + return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint); + +/** + * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel + * @cpu_events: the per cpu set of events to unregister + */ +void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) +{ + int cpu; + struct perf_event **pevent; + + for_each_possible_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + unregister_hw_breakpoint(*pevent); + } + free_percpu(cpu_events); +} + static struct notifier_block hw_breakpoint_exceptions_nb = { .notifier_call = hw_breakpoint_exceptions_notify, @@ -374,5 +283,12 @@ static int __init init_hw_breakpoint(void) { return register_die_notifier(&hw_breakpoint_exceptions_nb); } - core_initcall(init_hw_breakpoint); + + +struct pmu perf_ops_bp = { + .enable = arch_install_hw_breakpoint, + .disable = arch_uninstall_hw_breakpoint, + .read = hw_breakpoint_pmu_read, + .unthrottle = hw_breakpoint_pmu_unthrottle +}; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 5087125e2a0..98dc56b2ebe 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -29,6 +29,7 @@ #include #include #include +#include #include @@ -4229,6 +4230,51 @@ static void perf_event_free_filter(struct perf_event *event) #endif /* CONFIG_EVENT_PROFILE */ +#ifdef CONFIG_HAVE_HW_BREAKPOINT +static void bp_perf_event_destroy(struct perf_event *event) +{ + release_bp_slot(event); +} + +static const struct pmu *bp_perf_event_init(struct perf_event *bp) +{ + int err; + /* + * The breakpoint is already filled if we haven't created the counter + * through perf syscall + * FIXME: manage to get trigerred to NULL if it comes from syscalls + */ + if (!bp->callback) + err = register_perf_hw_breakpoint(bp); + else + err = __register_perf_hw_breakpoint(bp); + if (err) + return ERR_PTR(err); + + bp->destroy = bp_perf_event_destroy; + + return &perf_ops_bp; +} + +void perf_bp_event(struct perf_event *bp, void *regs) +{ + /* TODO */ +} +#else +static void bp_perf_event_destroy(struct perf_event *event) +{ +} + +static const struct pmu *bp_perf_event_init(struct perf_event *bp) +{ + return NULL; +} + +void perf_bp_event(struct perf_event *bp, void *regs) +{ +} +#endif + atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) @@ -4375,6 +4421,11 @@ perf_event_alloc(struct perf_event_attr *attr, pmu = tp_perf_event_init(event); break; + case PERF_TYPE_BREAKPOINT: + pmu = bp_perf_event_init(event); + break; + + default: break; } @@ -4686,7 +4737,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, ctx = find_get_context(pid, cpu); if (IS_ERR(ctx)) - return NULL ; + return NULL; event = perf_event_alloc(attr, cpu, ctx, NULL, NULL, callback, GFP_KERNEL); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 91c3d0e9a5a..d72f06ff263 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -11,14 +11,11 @@ #include #include #include +#include #include #include -#ifdef CONFIG_KSYM_TRACER -#include -#endif - enum trace_type { __TRACE_FIRST_TYPE = 0, diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index e19747d4f86..c16a08f399d 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -372,11 +372,11 @@ FTRACE_ENTRY(ksym_trace, ksym_trace_entry, F_STRUCT( __field( unsigned long, ip ) __field( unsigned char, type ) - __array( char , ksym_name, KSYM_NAME_LEN ) __array( char , cmd, TASK_COMM_LEN ) + __field( unsigned long, addr ) ), - F_printk("ip: %pF type: %d ksym_name: %s cmd: %s", + F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", (void *)__entry->ip, (unsigned int)__entry->type, - __entry->ksym_name, __entry->cmd) + (void *)__entry->addr, __entry->cmd) ); diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 6d5609c6737..fea83eeeef0 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -29,7 +29,11 @@ #include "trace_stat.h" #include "trace.h" -/* For now, let us restrict the no. of symbols traced simultaneously to number +#include +#include + +/* + * For now, let us restrict the no. of symbols traced simultaneously to number * of available hardware breakpoint registers. */ #define KSYM_TRACER_MAX HBP_NUM @@ -37,8 +41,10 @@ #define KSYM_TRACER_OP_LEN 3 /* rw- */ struct trace_ksym { - struct hw_breakpoint *ksym_hbp; + struct perf_event **ksym_hbp; unsigned long ksym_addr; + int type; + int len; #ifdef CONFIG_PROFILE_KSYM_TRACER unsigned long counter; #endif @@ -75,10 +81,11 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) } #endif /* CONFIG_PROFILE_KSYM_TRACER */ -void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) +void ksym_hbp_handler(struct perf_event *hbp, void *data) { struct ring_buffer_event *event; struct ksym_trace_entry *entry; + struct pt_regs *regs = data; struct ring_buffer *buffer; int pc; @@ -96,12 +103,12 @@ void ksym_hbp_handler(struct hw_breakpoint *hbp, struct pt_regs *regs) entry = ring_buffer_event_data(event); entry->ip = instruction_pointer(regs); - entry->type = hbp->info.type; - strlcpy(entry->ksym_name, hbp->info.name, KSYM_SYMBOL_LEN); + entry->type = hw_breakpoint_type(hbp); + entry->addr = hw_breakpoint_addr(hbp); strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); #ifdef CONFIG_PROFILE_KSYM_TRACER - ksym_collect_stats(hbp->info.address); + ksym_collect_stats(hw_breakpoint_addr(hbp)); #endif /* CONFIG_PROFILE_KSYM_TRACER */ trace_buffer_unlock_commit(buffer, event, 0, pc); @@ -120,31 +127,21 @@ static int ksym_trace_get_access_type(char *str) int access = 0; if (str[0] == 'r') - access += 4; - else if (str[0] != '-') - return -EINVAL; + access |= HW_BREAKPOINT_R; if (str[1] == 'w') - access += 2; - else if (str[1] != '-') - return -EINVAL; + access |= HW_BREAKPOINT_W; - if (str[2] != '-') - return -EINVAL; + if (str[2] == 'x') + access |= HW_BREAKPOINT_X; switch (access) { - case 6: - access = HW_BREAKPOINT_RW; - break; - case 4: - access = -EINVAL; - break; - case 2: - access = HW_BREAKPOINT_WRITE; - break; + case HW_BREAKPOINT_W: + case HW_BREAKPOINT_W | HW_BREAKPOINT_R: + return access; + default: + return -EINVAL; } - - return access; } /* @@ -194,36 +191,33 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) if (!entry) return -ENOMEM; - entry->ksym_hbp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); - if (!entry->ksym_hbp) - goto err; + entry->type = op; + entry->ksym_addr = addr; + entry->len = HW_BREAKPOINT_LEN_4; - entry->ksym_hbp->info.name = kstrdup(ksymname, GFP_KERNEL); - if (!entry->ksym_hbp->info.name) - goto err; + ret = -EAGAIN; + entry->ksym_hbp = register_wide_hw_breakpoint(entry->ksym_addr, + entry->len, entry->type, + ksym_hbp_handler, true); + if (IS_ERR(entry->ksym_hbp)) { + entry->ksym_hbp = NULL; + ret = PTR_ERR(entry->ksym_hbp); + } - entry->ksym_hbp->info.type = op; - entry->ksym_addr = entry->ksym_hbp->info.address = addr; -#ifdef CONFIG_X86 - entry->ksym_hbp->info.len = HW_BREAKPOINT_LEN_4; -#endif - entry->ksym_hbp->triggered = (void *)ksym_hbp_handler; - - ret = register_kernel_hw_breakpoint(entry->ksym_hbp); - if (ret < 0) { + if (!entry->ksym_hbp) { printk(KERN_INFO "ksym_tracer request failed. Try again" " later!!\n"); - ret = -EAGAIN; goto err; } + hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); ksym_filter_entry_count++; + return 0; + err: - if (entry->ksym_hbp) - kfree(entry->ksym_hbp->info.name); - kfree(entry->ksym_hbp); kfree(entry); + return ret; } @@ -244,10 +238,10 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, mutex_lock(&ksym_tracer_mutex); hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - ret = trace_seq_printf(s, "%s:", entry->ksym_hbp->info.name); - if (entry->ksym_hbp->info.type == HW_BREAKPOINT_WRITE) + ret = trace_seq_printf(s, "%pS:", (void *)entry->ksym_addr); + if (entry->type == HW_BREAKPOINT_W) ret = trace_seq_puts(s, "-w-\n"); - else if (entry->ksym_hbp->info.type == HW_BREAKPOINT_RW) + else if (entry->type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) ret = trace_seq_puts(s, "rw-\n"); WARN_ON_ONCE(!ret); } @@ -269,12 +263,10 @@ static void __ksym_trace_reset(void) mutex_lock(&ksym_tracer_mutex); hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, ksym_hlist) { - unregister_kernel_hw_breakpoint(entry->ksym_hbp); + unregister_wide_hw_breakpoint(entry->ksym_hbp); ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); - kfree(entry->ksym_hbp->info.name); - kfree(entry->ksym_hbp); kfree(entry); } mutex_unlock(&ksym_tracer_mutex); @@ -327,7 +319,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { if (entry->ksym_addr == ksym_addr) { /* Check for malformed request: (6) */ - if (entry->ksym_hbp->info.type != op) + if (entry->type != op) changed = 1; else goto out; @@ -335,18 +327,21 @@ static ssize_t ksym_trace_filter_write(struct file *file, } } if (changed) { - unregister_kernel_hw_breakpoint(entry->ksym_hbp); - entry->ksym_hbp->info.type = op; + unregister_wide_hw_breakpoint(entry->ksym_hbp); + entry->type = op; if (op > 0) { - ret = register_kernel_hw_breakpoint(entry->ksym_hbp); - if (ret == 0) + entry->ksym_hbp = + register_wide_hw_breakpoint(entry->ksym_addr, + entry->len, entry->type, + ksym_hbp_handler, true); + if (IS_ERR(entry->ksym_hbp)) + entry->ksym_hbp = NULL; + if (!entry->ksym_hbp) goto out; } ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); - kfree(entry->ksym_hbp->info.name); - kfree(entry->ksym_hbp); kfree(entry); ret = 0; goto out; @@ -413,16 +408,16 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) trace_assign_type(field, entry); - ret = trace_seq_printf(s, "%11s-%-5d [%03d] %-30s ", field->cmd, - entry->pid, iter->cpu, field->ksym_name); + ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, + entry->pid, iter->cpu, (char *)field->addr); if (!ret) return TRACE_TYPE_PARTIAL_LINE; switch (field->type) { - case HW_BREAKPOINT_WRITE: + case HW_BREAKPOINT_W: ret = trace_seq_printf(s, " W "); break; - case HW_BREAKPOINT_RW: + case HW_BREAKPOINT_R | HW_BREAKPOINT_W: ret = trace_seq_printf(s, " RW "); break; default: @@ -490,14 +485,13 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); - if (entry->ksym_hbp) - access_type = entry->ksym_hbp->info.type; + access_type = entry->type; switch (access_type) { - case HW_BREAKPOINT_WRITE: + case HW_BREAKPOINT_W: seq_puts(m, " W "); break; - case HW_BREAKPOINT_RW: + case HW_BREAKPOINT_R | HW_BREAKPOINT_W: seq_puts(m, " RW "); break; default: diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 7179c12e4f0..27c5072c2e6 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -828,7 +828,8 @@ trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) ksym_selftest_dummy = 0; /* Register the read-write tracing request */ - ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, HW_BREAKPOINT_RW, + ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, + HW_BREAKPOINT_R | HW_BREAKPOINT_W, (unsigned long)(&ksym_selftest_dummy)); if (ret < 0) { From ba1c813a6b9a0ef14d7112daf51270eff326f037 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 10 Sep 2009 09:26:21 +0200 Subject: [PATCH 236/470] hw-breakpoints: Arbitrate access to pmu following registers constraints Allow or refuse to build a counter using the breakpoints pmu following given constraints. We keep track of the pmu users by using three per cpu variables: - nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters in the given cpu - nr_bp_flexible stores the number of non-pinned breakpoints counters in the given cpu. - task_bp_pinned stores the number of pinned task breakpoints in a cpu The latter is not a simple counter but gathers the number of tasks that have n pinned breakpoints. Considering HBP_NUM the number of available breakpoint address registers: task_bp_pinned[0] is the number of tasks having 1 breakpoint task_bp_pinned[1] is the number of tasks having 2 breakpoints [...] task_bp_pinned[HBP_NUM - 1] is the number of tasks having the maximum number of registers (HBP_NUM). When a breakpoint counter is created and wants an access to the pmu, we evaluate the following constraints: == Non-pinned counter == - If attached to a single cpu, check: (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM -> If there are already non-pinned counters in this cpu, it means there is already a free slot for them. Otherwise, we check that the maximum number of per task breakpoints (for this cpu) plus the number of per cpu breakpoint (for this cpu) doesn't cover every registers. - If attached to every cpus, check: (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM -> This is roughly the same, except we check the number of per cpu bp for every cpu and we keep the max one. Same for the per tasks breakpoints. == Pinned counter == - If attached to a single cpu, check: ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM -> Same checks as before. But now the nr_bp_flexible, if any, must keep one register at least (or flexible breakpoints will never be be fed). - If attached to every cpus, check: ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) + max(per_cpu(task_bp_pinned, *))) < HBP_NUM Changes in v2: - Counter -> event rename Changes in v5: - Fix unreleased non-pinned task-bound-only counters. We only released it in the first cpu. (Thanks to Paul Mackerras for reporting that) Changes in v6: - Currently, events scheduling are done in this order: cpu context pinned + cpu context non-pinned + task context pinned + task context non-pinned events. Then our current constraints are right theoretically but not in practice, because non-pinned counters may be scheduled before we can apply every possible pinned counters. So consider non-pinned counters as pinned for now. Signed-off-by: Frederic Weisbecker Cc: Prasad Cc: Alan Stern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Ingo Molnar Cc: Jan Kiszka Cc: Jiri Slaby Cc: Li Zefan Cc: Avi Kivity Cc: Paul Mackerras Cc: Mike Galbraith Cc: Masami Hiramatsu Cc: Paul Mundt --- kernel/hw_breakpoint.c | 211 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 205 insertions(+), 6 deletions(-) diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 08f6d016320..e662dc991c9 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -16,6 +16,8 @@ * Copyright (C) 2007 Alan Stern * Copyright (C) IBM Corporation, 2009 * Copyright (C) 2009, Frederic Weisbecker + * + * Thanks to Ingo Molnar for his many suggestions. */ /* @@ -44,24 +46,221 @@ #include #endif -static atomic_t bp_slot; +/* + * Constraints data + */ -int reserve_bp_slot(struct perf_event *bp) +/* Number of pinned cpu breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); + +/* Number of pinned task breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); + +/* Number of non-pinned cpu/task breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); + +/* Gather the number of total pinned and un-pinned bp in a cpuset */ +struct bp_busy_slots { + unsigned int pinned; + unsigned int flexible; +}; + +/* Serialize accesses to the above constraints */ +static DEFINE_MUTEX(nr_bp_mutex); + +/* + * Report the maximum number of pinned breakpoints a task + * have in this cpu + */ +static unsigned int max_task_bp_pinned(int cpu) { - if (atomic_inc_return(&bp_slot) == HBP_NUM) { - atomic_dec(&bp_slot); + int i; + unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); - return -ENOSPC; + for (i = HBP_NUM -1; i >= 0; i--) { + if (tsk_pinned[i] > 0) + return i + 1; } return 0; } +/* + * Report the number of pinned/un-pinned breakpoints we have in + * a given cpu (cpu > -1) or in all of them (cpu = -1). + */ +static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) +{ + if (cpu >= 0) { + slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); + slots->pinned += max_task_bp_pinned(cpu); + slots->flexible = per_cpu(nr_bp_flexible, cpu); + + return; + } + + for_each_online_cpu(cpu) { + unsigned int nr; + + nr = per_cpu(nr_cpu_bp_pinned, cpu); + nr += max_task_bp_pinned(cpu); + + if (nr > slots->pinned) + slots->pinned = nr; + + nr = per_cpu(nr_bp_flexible, cpu); + + if (nr > slots->flexible) + slots->flexible = nr; + } +} + +/* + * Add a pinned breakpoint for the given task in our constraint table + */ +static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) +{ + int count = 0; + struct perf_event *bp; + struct perf_event_context *ctx = tsk->perf_event_ctxp; + unsigned int *task_bp_pinned; + struct list_head *list; + unsigned long flags; + + if (WARN_ONCE(!ctx, "No perf context for this task")) + return; + + list = &ctx->event_list; + + spin_lock_irqsave(&ctx->lock, flags); + + /* + * The current breakpoint counter is not included in the list + * at the open() callback time + */ + list_for_each_entry(bp, list, event_entry) { + if (bp->attr.type == PERF_TYPE_BREAKPOINT) + count++; + } + + spin_unlock_irqrestore(&ctx->lock, flags); + + if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) + return; + + task_bp_pinned = per_cpu(task_bp_pinned, cpu); + if (enable) { + task_bp_pinned[count]++; + if (count > 0) + task_bp_pinned[count-1]--; + } else { + task_bp_pinned[count]--; + if (count > 0) + task_bp_pinned[count-1]++; + } +} + +/* + * Add/remove the given breakpoint in our constraint table + */ +static void toggle_bp_slot(struct perf_event *bp, bool enable) +{ + int cpu = bp->cpu; + struct task_struct *tsk = bp->ctx->task; + + /* Pinned counter task profiling */ + if (tsk) { + if (cpu >= 0) { + toggle_bp_task_slot(tsk, cpu, enable); + return; + } + + for_each_online_cpu(cpu) + toggle_bp_task_slot(tsk, cpu, enable); + return; + } + + /* Pinned counter cpu profiling */ + if (enable) + per_cpu(nr_cpu_bp_pinned, bp->cpu)++; + else + per_cpu(nr_cpu_bp_pinned, bp->cpu)--; +} + +/* + * Contraints to check before allowing this new breakpoint counter: + * + * == Non-pinned counter == (Considered as pinned for now) + * + * - If attached to a single cpu, check: + * + * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) + * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM + * + * -> If there are already non-pinned counters in this cpu, it means + * there is already a free slot for them. + * Otherwise, we check that the maximum number of per task + * breakpoints (for this cpu) plus the number of per cpu breakpoint + * (for this cpu) doesn't cover every registers. + * + * - If attached to every cpus, check: + * + * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) + * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM + * + * -> This is roughly the same, except we check the number of per cpu + * bp for every cpu and we keep the max one. Same for the per tasks + * breakpoints. + * + * + * == Pinned counter == + * + * - If attached to a single cpu, check: + * + * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) + * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM + * + * -> Same checks as before. But now the nr_bp_flexible, if any, must keep + * one register at least (or they will never be fed). + * + * - If attached to every cpus, check: + * + * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) + * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM + */ +int reserve_bp_slot(struct perf_event *bp) +{ + struct bp_busy_slots slots = {0}; + int ret = 0; + + mutex_lock(&nr_bp_mutex); + + fetch_bp_busy_slots(&slots, bp->cpu); + + /* Flexible counters need to keep at least one slot */ + if (slots.pinned + (!!slots.flexible) == HBP_NUM) { + ret = -ENOSPC; + goto end; + } + + toggle_bp_slot(bp, true); + +end: + mutex_unlock(&nr_bp_mutex); + + return ret; +} + void release_bp_slot(struct perf_event *bp) { - atomic_dec(&bp_slot); + mutex_lock(&nr_bp_mutex); + + toggle_bp_slot(bp, false); + + mutex_unlock(&nr_bp_mutex); } + int __register_perf_hw_breakpoint(struct perf_event *bp) { int ret; From 30ff21e31fe5c8b7b1b7d30cc41e32bc4ee9f175 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 10 Sep 2009 09:35:20 +0800 Subject: [PATCH 237/470] ksym_tracer: Remove KSYM_SELFTEST_ENTRY The macro used to be used in both trace_selftest.c and trace_ksym.c, but no longer, so remove it from header file. Signed-off-by: Li Zefan Cc: Prasad Signed-off-by: Frederic Weisbecker --- kernel/trace/trace.h | 1 - kernel/trace/trace_selftest.c | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d72f06ff263..ee00475742e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -367,7 +367,6 @@ int register_tracer(struct tracer *type); void unregister_tracer(struct tracer *type); int is_tracing_stopped(void); -#define KSYM_SELFTEST_ENTRY "ksym_selftest_dummy" extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); extern unsigned long nsecs_to_usecs(unsigned long nsecs); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 27c5072c2e6..dc98309e839 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -828,7 +828,8 @@ trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) ksym_selftest_dummy = 0; /* Register the read-write tracing request */ - ret = process_new_ksym_entry(KSYM_SELFTEST_ENTRY, + + ret = process_new_ksym_entry("ksym_selftest_dummy", HW_BREAKPOINT_R | HW_BREAKPOINT_W, (unsigned long)(&ksym_selftest_dummy)); From afe61f677866ffc484e69c4ecca2d316d564d78b Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sun, 8 Nov 2009 09:01:37 -0600 Subject: [PATCH 238/470] perf tools: Add debugfs utility routines for perf Add routines to locate the debugfs mount point and to manage the mounting and unmounting of the debugfs. Signed-off-by: Clark Williams Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra LKML-Reference: <20091101155621.2b3503ee@torg> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/util/debugfs.c | 241 ++++++++++++++++++++++++++++++++++++++ tools/perf/util/debugfs.h | 25 ++++ 3 files changed, 268 insertions(+) create mode 100644 tools/perf/util/debugfs.c create mode 100644 tools/perf/util/debugfs.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 542b29e2e38..b9509b1cc32 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -353,6 +353,7 @@ LIB_H += util/include/asm/swab.h LIB_H += util/include/asm/system.h LIB_H += util/include/asm/uaccess.h LIB_H += perf.h +LIB_H += util/debugfs.h LIB_H += util/event.h LIB_H += util/types.h LIB_H += util/levenshtein.h @@ -378,6 +379,7 @@ LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o LIB_OBJS += util/config.o LIB_OBJS += util/ctype.o +LIB_OBJS += util/debugfs.o LIB_OBJS += util/environment.o LIB_OBJS += util/event.o LIB_OBJS += util/exec_cmd.o diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c new file mode 100644 index 00000000000..06b73ee02c4 --- /dev/null +++ b/tools/perf/util/debugfs.c @@ -0,0 +1,241 @@ +#include "util.h" +#include "debugfs.h" +#include "cache.h" + +static int debugfs_premounted; +static char debugfs_mountpoint[MAX_PATH+1]; + +static const char *debugfs_known_mountpoints[] = { + "/sys/kernel/debug/", + "/debug/", + 0, +}; + +/* use this to force a umount */ +void debugfs_force_cleanup(void) +{ + debugfs_find_mountpoint(); + debugfs_premounted = 0; + debugfs_umount(); +} + +/* construct a full path to a debugfs element */ +int debugfs_make_path(const char *element, char *buffer, int size) +{ + int len; + + if (strlen(debugfs_mountpoint) == 0) { + buffer[0] = '\0'; + return -1; + } + + len = strlen(debugfs_mountpoint) + strlen(element) + 1; + if (len >= size) + return len+1; + + snprintf(buffer, size-1, "%s/%s", debugfs_mountpoint, element); + return 0; +} + +static int debugfs_found; + +/* find the path to the mounted debugfs */ +const char *debugfs_find_mountpoint(void) +{ + const char **ptr; + char type[100]; + FILE *fp; + + if (debugfs_found) + return (const char *) debugfs_mountpoint; + + ptr = debugfs_known_mountpoints; + while (*ptr) { + if (debugfs_valid_mountpoint(*ptr) == 0) { + debugfs_found = 1; + strcpy(debugfs_mountpoint, *ptr); + return debugfs_mountpoint; + } + ptr++; + } + + /* give up and parse /proc/mounts */ + fp = fopen("/proc/mounts", "r"); + if (fp == NULL) + die("Can't open /proc/mounts for read"); + + while (fscanf(fp, "%*s %" + STR(MAX_PATH) + "s %99s %*s %*d %*d\n", + debugfs_mountpoint, type) == 2) { + if (strcmp(type, "debugfs") == 0) + break; + } + fclose(fp); + + if (strcmp(type, "debugfs") != 0) + return NULL; + + debugfs_found = 1; + + return debugfs_mountpoint; +} + +/* verify that a mountpoint is actually a debugfs instance */ + +int debugfs_valid_mountpoint(const char *debugfs) +{ + struct statfs st_fs; + + if (statfs(debugfs, &st_fs) < 0) + return -ENOENT; + else if (st_fs.f_type != (long) DEBUGFS_MAGIC) + return -ENOENT; + + return 0; +} + + +int debugfs_valid_entry(const char *path) +{ + struct stat st; + + if (stat(path, &st)) + return -errno; + + return 0; +} + +/* mount the debugfs somewhere */ + +int debugfs_mount(const char *mountpoint) +{ + char mountcmd[128]; + + /* see if it's already mounted */ + if (debugfs_find_mountpoint()) { + debugfs_premounted = 1; + return 0; + } + + /* if not mounted and no argument */ + if (mountpoint == NULL) { + /* see if environment variable set */ + mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT); + /* if no environment variable, use default */ + if (mountpoint == NULL) + mountpoint = "/sys/kernel/debug"; + } + + /* save the mountpoint */ + strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint)); + + /* mount it */ + snprintf(mountcmd, sizeof(mountcmd), + "/bin/mount -t debugfs debugfs %s", mountpoint); + return system(mountcmd); +} + +/* umount the debugfs */ + +int debugfs_umount(void) +{ + char umountcmd[128]; + int ret; + + /* if it was already mounted, leave it */ + if (debugfs_premounted) + return 0; + + /* make sure it's a valid mount point */ + ret = debugfs_valid_mountpoint(debugfs_mountpoint); + if (ret) + return ret; + + snprintf(umountcmd, sizeof(umountcmd), + "/bin/umount %s", debugfs_mountpoint); + return system(umountcmd); +} + +int debugfs_write(const char *entry, const char *value) +{ + char path[MAX_PATH+1]; + int ret, count; + int fd; + + /* construct the path */ + snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); + + /* verify that it exists */ + ret = debugfs_valid_entry(path); + if (ret) + return ret; + + /* get how many chars we're going to write */ + count = strlen(value); + + /* open the debugfs entry */ + fd = open(path, O_RDWR); + if (fd < 0) + return -errno; + + while (count > 0) { + /* write it */ + ret = write(fd, value, count); + if (ret <= 0) { + if (ret == EAGAIN) + continue; + close(fd); + return -errno; + } + count -= ret; + } + + /* close it */ + close(fd); + + /* return success */ + return 0; +} + +/* + * read a debugfs entry + * returns the number of chars read or a negative errno + */ +int debugfs_read(const char *entry, char *buffer, size_t size) +{ + char path[MAX_PATH+1]; + int ret; + int fd; + + /* construct the path */ + snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry); + + /* verify that it exists */ + ret = debugfs_valid_entry(path); + if (ret) + return ret; + + /* open the debugfs entry */ + fd = open(path, O_RDONLY); + if (fd < 0) + return -errno; + + do { + /* read it */ + ret = read(fd, buffer, size); + if (ret == 0) { + close(fd); + return EOF; + } + } while (ret < 0 && errno == EAGAIN); + + /* close it */ + close(fd); + + /* make *sure* there's a null character at the end */ + buffer[ret] = '\0'; + + /* return the number of chars read */ + return ret; +} diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h new file mode 100644 index 00000000000..3cd14f9ae78 --- /dev/null +++ b/tools/perf/util/debugfs.h @@ -0,0 +1,25 @@ +#ifndef __DEBUGFS_H__ +#define __DEBUGFS_H__ + +#include + +#ifndef MAX_PATH +# define MAX_PATH 256 +#endif + +#ifndef STR +# define _STR(x) #x +# define STR(x) _STR(x) +#endif + +extern const char *debugfs_find_mountpoint(void); +extern int debugfs_valid_mountpoint(const char *debugfs); +extern int debugfs_valid_entry(const char *path); +extern int debugfs_mount(const char *mountpoint); +extern int debugfs_umount(void); +extern int debugfs_write(const char *entry, const char *value); +extern int debugfs_read(const char *entry, char *buffer, size_t size); +extern void debugfs_force_cleanup(void); +extern int debugfs_make_path(const char *element, char *buffer, int size); + +#endif /* __DEBUGFS_H__ */ From 549104f22b3cd4761145eb5fba6ee4d59822da61 Mon Sep 17 00:00:00 2001 From: Clark Williams Date: Sun, 8 Nov 2009 09:03:07 -0600 Subject: [PATCH 239/470] perf tools: Modify perf routines to use new debugfs routines modify perf.c get_debugfs_mntpnt() to use the util/debugfs.c debugfs_find_mountpoint() modify util/parse-events.c to use debugfs_valid_mountpoint(). Signed-off-by: Clark Williams Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra LKML-Reference: <20091101155720.624cc87e@torg> Signed-off-by: Ingo Molnar --- tools/perf/perf.c | 44 +++++----------------------------- tools/perf/util/parse-events.c | 17 ++++--------- 2 files changed, 10 insertions(+), 51 deletions(-) diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 624e62d9d1e..601f403fbda 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -14,6 +14,7 @@ #include "util/run-command.h" #include "util/parse-events.h" #include "util/string.h" +#include "util/debugfs.h" const char perf_usage_string[] = "perf [--version] [--help] COMMAND [ARGS]"; @@ -382,45 +383,12 @@ static int run_argv(int *argcp, const char ***argv) /* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */ static void get_debugfs_mntpt(void) { - FILE *file; - char fs_type[100]; - char debugfs[MAXPATHLEN]; + const char *path = debugfs_find_mountpoint(); - /* - * try the standard location - */ - if (valid_debugfs_mount("/sys/kernel/debug/") == 0) { - strcpy(debugfs_mntpt, "/sys/kernel/debug/"); - return; - } - - /* - * try the sane location - */ - if (valid_debugfs_mount("/debug/") == 0) { - strcpy(debugfs_mntpt, "/debug/"); - return; - } - - /* - * give up and parse /proc/mounts - */ - file = fopen("/proc/mounts", "r"); - if (file == NULL) - return; - - while (fscanf(file, "%*s %" - STR(MAXPATHLEN) - "s %99s %*s %*d %*d\n", - debugfs, fs_type) == 2) { - if (strcmp(fs_type, "debugfs") == 0) - break; - } - fclose(file); - if (strcmp(fs_type, "debugfs") == 0) { - strncpy(debugfs_mntpt, debugfs, MAXPATHLEN); - debugfs_mntpt[MAXPATHLEN - 1] = '\0'; - } + if (path) + strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt)); + else + debugfs_mntpt[0] = '\0'; } int main(int argc, const char **argv) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 31baa5a6036..097938a96d7 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -7,6 +7,7 @@ #include "string.h" #include "cache.h" #include "header.h" +#include "debugfs.h" int nr_counters; @@ -149,16 +150,6 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) #define MAX_EVENT_LENGTH 512 -int valid_debugfs_mount(const char *debugfs) -{ - struct statfs st_fs; - - if (statfs(debugfs, &st_fs) < 0) - return -ENOENT; - else if (st_fs.f_type != (long) DEBUGFS_MAGIC) - return -ENOENT; - return 0; -} struct tracepoint_path *tracepoint_id_to_path(u64 config) { @@ -171,7 +162,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (valid_debugfs_mount(debugfs_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return NULL; sys_dir = opendir(debugfs_path); @@ -510,7 +501,7 @@ static enum event_result parse_tracepoint_event(const char **strp, char sys_name[MAX_EVENT_LENGTH]; unsigned int sys_length, evt_length; - if (valid_debugfs_mount(debugfs_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return 0; evt_name = strchr(*strp, ':'); @@ -788,7 +779,7 @@ static void print_tracepoint_events(void) char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; - if (valid_debugfs_mount(debugfs_path)) + if (debugfs_valid_mountpoint(debugfs_path)) return; sys_dir = opendir(debugfs_path); From 5ff0cfc67f00fe0feaa1da0b2359232ea4aa0ee7 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Mon, 9 Nov 2009 12:31:05 +0900 Subject: [PATCH 240/470] perf bench: Fix bench/sched-pipe.c to wait for child process Ingo reported this small 'perf bench sched pipe' output problem: | $ ./perf bench sched pipe | (executing 1000000 pipe operations between two tasks) | | Total time:4.898 sec | $ 4.898586 usecs/op | 204140 ops/sec | | the shell prompt came back before the usecs/op and ops/sec line | was printed. Process teardown race, lack of wait() or so? This caused by lack of calling waitpid() by parent process, so I added it. Signed-off-by: Hitoshi Mitake Cc: Rusty Russell Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Jiri Kosina LKML-Reference: <1257737465-7546-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/sched-pipe.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 3214ed20b1a..6a29100e928 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -26,6 +26,7 @@ #include #include #include +#include #define LOOPS_DEFAULT 1000000 static int loops = LOOPS_DEFAULT; @@ -58,8 +59,8 @@ int bench_sched_pipe(int argc, const char **argv, * discarding returned value of read(), write() * causes error in building environment for perf */ - int ret; - pid_t pid; + int ret, wait_stat; + pid_t pid, retpid; argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0); @@ -87,8 +88,11 @@ int bench_sched_pipe(int argc, const char **argv, gettimeofday(&stop, NULL); timersub(&stop, &start, &diff); - if (pid) + if (pid) { + retpid = waitpid(pid, &wait_stat, 0); + assert((retpid == pid) && WIFEXITED(wait_stat)); return 0; + } if (simple) printf("%lu.%03lu\n", From ca2b900f9af1586b9889ccc4b12e453c13268bd5 Mon Sep 17 00:00:00 2001 From: Zeev Tarantov Date: Mon, 9 Nov 2009 13:26:13 +0200 Subject: [PATCH 241/470] perf tools: Fix syntax in documentation Fix trivial syntax in perf-events user-space tools documentation. Signed-off-by: Zeev Tarantov Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <12d7e64c0911081811i7e5b466cu6706ff6ab3e70db4@mail.gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-report.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 59f0b846cd7..9dccb180b7a 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -24,11 +24,11 @@ OPTIONS --dsos=:: Only consider symbols in these dsos. CSV that understands file://filename entries. --n ---show-nr-samples +-n:: +--show-nr-samples:: Show the number of samples for each symbol --T ---threads +-T:: +--threads:: Show per-thread event counters -C:: --comms=:: From 242aa14a67f4e19453fc8a51cffc5ac5ee5bcbd1 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 08:19:59 +0900 Subject: [PATCH 242/470] perf bench: Add format constants to bench.h for unified output formatting This patch adds some constants and extern declaration to bench.h. These are used for unified output formatting of 'perf bench'. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257808802-9420-2-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/bench.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index 59adb279cd7..42167ea4194 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -6,4 +6,13 @@ extern int bench_sched_messaging(int argc, const char **argv, extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); +#define BENCH_FORMAT_DEFAULT_STR "default" +#define BENCH_FORMAT_DEFAULT 0 +#define BENCH_FORMAT_SIMPLE_STR "simple" +#define BENCH_FORMAT_SIMPLE 1 + +#define BENCH_FORMAT_UNKNOWN -1 + +extern int bench_format; + #endif From 386d7e9e542c2115d5d300747e57f503458a1617 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 08:20:00 +0900 Subject: [PATCH 243/470] perf bench: Modify builtin-bench.c for processing common options This patch modifies builtin-bench.c for processing common options. The first option added is "--format". Users of perf bench will be able to specify output style by --format. Usage example: % ./perf bench sched messaging # with no style specify (20 sender and receiver processes per group) (10 groups == 400 processes run) Total time:1.431 sec % ./perf bench --format=simple sched messaging # specified simple 1.431 Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257808802-9420-3-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-bench.c | 79 +++++++++++++++++++++++++++++++------- 1 file changed, 65 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 31f41643b0c..c7505eaff84 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -74,53 +74,104 @@ static void dump_suites(int subsys_index) return; } +static char *bench_format_str; +int bench_format = BENCH_FORMAT_DEFAULT; + +static const struct option bench_options[] = { + OPT_STRING('f', "format", &bench_format_str, "default", + "Specify format style"), + OPT_END() +}; + +static const char * const bench_usage[] = { + "perf bench [] []", + NULL +}; + +static void print_usage(void) +{ + int i; + + printf("Usage: \n"); + for (i = 0; bench_usage[i]; i++) + printf("\t%s\n", bench_usage[i]); + printf("\n"); + + printf("List of available subsystems...\n\n"); + + for (i = 0; subsystems[i].name; i++) + printf("\t%s: %s\n", + subsystems[i].name, subsystems[i].summary); + printf("\n"); +} + +static int bench_str2int(char *str) +{ + if (!str) + return BENCH_FORMAT_DEFAULT; + + if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR)) + return BENCH_FORMAT_DEFAULT; + else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR)) + return BENCH_FORMAT_SIMPLE; + + return BENCH_FORMAT_UNKNOWN; +} + int cmd_bench(int argc, const char **argv, const char *prefix __used) { int i, j, status = 0; if (argc < 2) { /* No subsystem specified. */ - printf("Usage: perf bench []\n\n"); - printf("List of available subsystems...\n\n"); + print_usage(); + goto end; + } - for (i = 0; subsystems[i].name; i++) - printf("\t%s: %s\n", - subsystems[i].name, subsystems[i].summary); - printf("\n"); + argc = parse_options(argc, argv, bench_options, bench_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + bench_format = bench_str2int(bench_format_str); + if (bench_format == BENCH_FORMAT_UNKNOWN) { + printf("Unknown format descriptor:%s\n", bench_format_str); + goto end; + } + + if (argc < 1) { + print_usage(); goto end; } for (i = 0; subsystems[i].name; i++) { - if (strcmp(subsystems[i].name, argv[1])) + if (strcmp(subsystems[i].name, argv[0])) continue; - if (argc < 3) { + if (argc < 2) { /* No suite specified. */ dump_suites(i); goto end; } for (j = 0; subsystems[i].suites[j].name; j++) { - if (strcmp(subsystems[i].suites[j].name, argv[2])) + if (strcmp(subsystems[i].suites[j].name, argv[1])) continue; - status = subsystems[i].suites[j].fn(argc - 2, - argv + 2, prefix); + status = subsystems[i].suites[j].fn(argc - 1, + argv + 1, prefix); goto end; } - if (!strcmp(argv[2], "-h") || !strcmp(argv[2], "--help")) { + if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) { dump_suites(i); goto end; } - printf("Unknown suite:%s for %s\n", argv[2], argv[1]); + printf("Unknown suite:%s for %s\n", argv[1], argv[0]); status = 1; goto end; } - printf("Unknown subsystem:%s\n", argv[1]); + printf("Unknown subsystem:%s\n", argv[0]); status = 1; end: From cced06c62a9db6bd6d77e3f0a57dbe47a26d881e Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 08:20:01 +0900 Subject: [PATCH 244/470] perf bench: Modify bench/bench-messaging.c to adopt unified output formatting This patch modifies bench/bench-messaging.c to adopt unified output formatting: --format option. Usage example: % ./perf bench sched messaging # with no style specify (20 sender and receiver processes per group) (10 groups == 400 processes run) Total time:1.431 sec % ./perf bench --format=simple sched messaging # specified simple 1.431 Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257808802-9420-4-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/sched-messaging.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index 36b62c549e2..2cc5edcca3d 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c @@ -35,7 +35,6 @@ static int use_pipes = 0; static unsigned int loops = 100; static unsigned int thread_mode = 0; static unsigned int num_groups = 10; -static int simple = 0; struct sender_context { unsigned int num_fds; @@ -261,9 +260,6 @@ static const struct option options[] = { "Specify number of groups"), OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), - OPT_BOOLEAN('s', "simple-output", &simple, - "Do simple output (this maybe useful for" - "processing by scripts or graph tools like gnuplot)"), OPT_END() }; @@ -316,9 +312,8 @@ int bench_sched_messaging(int argc, const char **argv, timersub(&stop, &start, &diff); - if (simple) - printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000); - else { + switch (bench_format) { + case BENCH_FORMAT_DEFAULT: printf("(%d sender and receiver %s per group)\n", num_fds, thread_mode ? "threads" : "processes"); printf("(%d groups == %d %s run)\n\n", @@ -326,6 +321,15 @@ int bench_sched_messaging(int argc, const char **argv, thread_mode ? "threads" : "processes"); printf("\tTotal time:%lu.%03lu sec\n", diff.tv_sec, diff.tv_usec/1000); + break; + case BENCH_FORMAT_SIMPLE: + printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000); + break; + default: + /* reaching here is something disaster */ + fprintf(stderr, "Unknown format:%d\n", bench_format); + exit(1); + break; } return 0; From 158ba827f6deef4102c5247ed4b6a587f0bd6a07 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 08:20:02 +0900 Subject: [PATCH 245/470] perf bench: Modify builtin-pipe.c for processing common options This patch modifies builtin-pipe.c for processing common options. The first option added is "--format". Users of perf bench will be able to specify output style by --format. Usage example: % ./perf bench sched pipe # with no style specify (executing 1000000 pipe operations between two tasks) Total time:5.855 sec 5.855061 usecs/op 170792 ops/sec % ./perf bench --format=simple sched pipe # specified simple 5.988 Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257808802-9420-5-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras --- tools/perf/bench/sched-pipe.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 6a29100e928..a9ac186714b 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -30,14 +30,10 @@ #define LOOPS_DEFAULT 1000000 static int loops = LOOPS_DEFAULT; -static int simple = 0; static const struct option options[] = { OPT_INTEGER('l', "loop", &loops, "Specify number of loops"), - OPT_BOOLEAN('s', "simple-output", &simple, - "Do simple output (this maybe useful for" - "processing by scripts or graph tools like gnuplot)"), OPT_END() }; @@ -94,10 +90,8 @@ int bench_sched_pipe(int argc, const char **argv, return 0; } - if (simple) - printf("%lu.%03lu\n", - diff.tv_sec, diff.tv_usec / 1000); - else { + switch (bench_format) { + case BENCH_FORMAT_DEFAULT: printf("(executing %d pipe operations between two tasks)\n\n", loops); @@ -111,6 +105,18 @@ int bench_sched_pipe(int argc, const char **argv, printf("\t\t%d ops/sec\n", (int)((double)loops / ((double)result_usec / (double)1000000))); + break; + + case BENCH_FORMAT_SIMPLE: + printf("%lu.%03lu\n", + diff.tv_sec, diff.tv_usec / 1000); + break; + + default: + /* reaching here is something disaster */ + fprintf(stderr, "Unknown format:%d\n", bench_format); + exit(1); + break; } return 0; From a2202aa29289db64ca7988b12343158b67b27f10 Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Tue, 10 Nov 2009 09:38:24 +0800 Subject: [PATCH 246/470] x86: Under BIOS control, restore AP's APIC_LVTTHMR to the BSP value On platforms where the BIOS handles the thermal monitor interrupt, APIC_LVTTHMR on each logical CPU is programmed to generate a SMI and OS must not touch it. Unfortunately AP bringup sequence using INIT-SIPI-SIPI clears all the LVT entries except the mask bit. Essentially this results in all LVT entries including the thermal monitoring interrupt set to masked (clearing the bios programmed value for APIC_LVTTHMR). And this leads to kernel take over the thermal monitoring interrupt on AP's but not on BSP (leaving the bios programmed value only on BSP). As a result of this, we have seen system hangs when the thermal monitoring interrupt is generated. Fix this by reading the initial value of thermal LVT entry on BSP and if bios has taken over the control, then program the same value on all AP's and leave the thermal monitoring interrupt control on all the logical cpu's to the bios. Signed-off-by: Yong Wang Reviewed-by: Suresh Siddha Cc: Borislav Petkov Cc: Arjan van de Ven LKML-Reference: <20091110013824.GA24940@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar Cc: stable@kernel.org --- arch/x86/include/asm/mce.h | 9 ++++++++ arch/x86/kernel/cpu/common.c | 2 -- arch/x86/kernel/cpu/mcheck/mce.c | 5 ++-- arch/x86/kernel/cpu/mcheck/therm_throt.c | 29 +++++++++++++++++++++++- arch/x86/kernel/setup.c | 3 +++ 5 files changed, 43 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 161485da683..858baa061cf 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -120,8 +120,10 @@ extern int mce_disabled; extern int mce_p5_enabled; #ifdef CONFIG_X86_MCE +int mcheck_init(void); void mcheck_cpu_init(struct cpuinfo_x86 *c); #else +static inline int mcheck_init(void) { return 0; } static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {} #endif @@ -215,5 +217,12 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); void intel_init_thermal(struct cpuinfo_x86 *c); void mce_log_therm_throt_event(__u64 status); + +#ifdef CONFIG_X86_THERMAL_VECTOR +extern void mcheck_intel_therm_init(void); +#else +static inline void mcheck_intel_therm_init(void) { } +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_X86_MCE_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4df69a38be5..9053be5d95c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -837,10 +837,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; } -#ifdef CONFIG_X86_MCE /* Init Machine Check Exception if available. */ mcheck_cpu_init(c); -#endif select_idle_routine(c); diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 80801705edd..0d4102031a4 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1655,13 +1655,14 @@ static int __init mcheck_enable(char *str) } __setup("mce", mcheck_enable); -static int __init mcheck_init(void) +int __init mcheck_init(void) { atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb); + mcheck_intel_therm_init(); + return 0; } -early_initcall(mcheck_init); /* * Sysfs support diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index b3a1dba7533..7f3cf36ed12 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -49,6 +49,8 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state); static atomic_t therm_throt_en = ATOMIC_INIT(0); +static u32 lvtthmr_init __read_mostly; + #ifdef CONFIG_SYSFS #define define_therm_throt_sysdev_one_ro(_name) \ static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) @@ -254,6 +256,18 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) ack_APIC_irq(); } +void mcheck_intel_therm_init(void) +{ + /* + * This function is only called on boot CPU. Save the init thermal + * LVT value on BSP and use that value to restore APs' thermal LVT + * entry BIOS programmed later + */ + if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && + cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) + lvtthmr_init = apic_read(APIC_LVTTHMR); +} + void intel_init_thermal(struct cpuinfo_x86 *c) { unsigned int cpu = smp_processor_id(); @@ -270,7 +284,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c) * since it might be delivered via SMI already: */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); - h = apic_read(APIC_LVTTHMR); + + /* + * The initial value of thermal LVT entries on all APs always reads + * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI + * sequence to them and LVT registers are reset to 0s except for + * the mask bits which are set to 1s when APs receive INIT IPI. + * Always restore the value that BIOS has programmed on AP based on + * BSP's info we saved since BIOS is always setting the same value + * for all threads/cores + */ + apic_write(APIC_LVTTHMR, lvtthmr_init); + + h = lvtthmr_init; + if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e09f0e2c14b..179c1f2aa45 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -109,6 +109,7 @@ #ifdef CONFIG_X86_64 #include #endif +#include /* * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. @@ -1024,6 +1025,8 @@ void __init setup_arch(char **cmdline_p) #endif #endif x86_init.oem.banner(); + + mcheck_init(); } #ifdef CONFIG_X86_32 From 676c0dbe6e514fdd8e434a9e623c781aa9b40b15 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 9 Nov 2009 17:37:34 +0900 Subject: [PATCH 247/470] ksym_tracer: Support read accesses independent of read/write. All of the infrastructure already exists to support read accesses for platforms that support a read access independently of read/write (such as in the case of the SuperH UBC). This just trivially hooks up the read case by itself. Signed-off-by: Paul Mundt Cc: Ingo Molnar Cc: Li Zefan Cc: Prasad Cc: Alan Stern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Jan Kiszka Cc: Jiri Slaby Cc: Avi Kivity Cc: Paul Mackerras Cc: Mike Galbraith Cc: Masami Hiramatsu Cc: Arjan van de Ven LKML-Reference: <20091109083733.GA25848@linux-sh.org> Signed-off-by: Frederic Weisbecker --- kernel/trace/trace_ksym.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index fea83eeeef0..11935b53a6c 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -136,6 +136,7 @@ static int ksym_trace_get_access_type(char *str) access |= HW_BREAKPOINT_X; switch (access) { + case HW_BREAKPOINT_R: case HW_BREAKPOINT_W: case HW_BREAKPOINT_W | HW_BREAKPOINT_R: return access; @@ -239,7 +240,9 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { ret = trace_seq_printf(s, "%pS:", (void *)entry->ksym_addr); - if (entry->type == HW_BREAKPOINT_W) + if (entry->type == HW_BREAKPOINT_R) + ret = trace_seq_puts(s, "r--\n"); + else if (entry->type == HW_BREAKPOINT_W) ret = trace_seq_puts(s, "-w-\n"); else if (entry->type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) ret = trace_seq_puts(s, "rw-\n"); @@ -414,6 +417,9 @@ static enum print_line_t ksym_trace_output(struct trace_iterator *iter) return TRACE_TYPE_PARTIAL_LINE; switch (field->type) { + case HW_BREAKPOINT_R: + ret = trace_seq_printf(s, " R "); + break; case HW_BREAKPOINT_W: ret = trace_seq_printf(s, " W "); break; @@ -488,6 +494,9 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) access_type = entry->type; switch (access_type) { + case HW_BREAKPOINT_R: + seq_puts(m, " R "); + break; case HW_BREAKPOINT_W: seq_puts(m, " W "); break; From 9f6b3c2c30cfbb1166ce7e74a8f9fd93ae19d2de Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 9 Nov 2009 21:03:43 +0100 Subject: [PATCH 248/470] hw-breakpoints: Fix broken a.out format dump Fix the broken a.out format dump. For now we only dump the ptrace breakpoints. TODO: Dump every perf breakpoints for the current thread, not only ptrace based ones. Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker Cc: "K. Prasad" --- arch/x86/include/asm/a.out-core.h | 10 ++------- arch/x86/include/asm/debugreg.h | 2 ++ arch/x86/kernel/hw_breakpoint.c | 35 +++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index fc4685dd6e4..7a15588e45d 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -17,6 +17,7 @@ #include #include +#include /* * fill in the user structure for an a.out core dump @@ -32,14 +33,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; - dump->u_debugreg[0] = current->thread.debugreg[0]; - dump->u_debugreg[1] = current->thread.debugreg[1]; - dump->u_debugreg[2] = current->thread.debugreg[2]; - dump->u_debugreg[3] = current->thread.debugreg[3]; - dump->u_debugreg[4] = 0; - dump->u_debugreg[5] = 0; - dump->u_debugreg[6] = current->thread.debugreg6; - dump->u_debugreg[7] = current->thread.debugreg7; + aout_dump_debugregs(dump); if (dump->start_stack < TASK_SIZE) dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 9a3333c91f9..f1b673f0823 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -89,6 +89,8 @@ static inline void hw_breakpoint_disable(void) set_debugreg(0UL, 3); } +extern void aout_dump_debugregs(struct user *dump); + #ifdef CONFIG_KVM extern void hw_breakpoint_restore(void); #endif diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index e622620790b..57dcee5fa95 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -375,6 +375,41 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, return 0; } +/* + * Dump the debug register contents to the user. + * We can't dump our per cpu values because it + * may contain cpu wide breakpoint, something that + * doesn't belong to the current task. + * + * TODO: include non-ptrace user breakpoints (perf) + */ +void aout_dump_debugregs(struct user *dump) +{ + int i; + int dr7 = 0; + struct perf_event *bp; + struct arch_hw_breakpoint *info; + struct thread_struct *thread = ¤t->thread; + + for (i = 0; i < HBP_NUM; i++) { + bp = thread->ptrace_bps[i]; + + if (bp && !bp->attr.disabled) { + dump->u_debugreg[i] = bp->attr.bp_addr; + info = counter_arch_bp(bp); + dr7 |= encode_dr7(i, info->len, info->type); + } else { + dump->u_debugreg[i] = 0; + } + } + + dump->u_debugreg[4] = 0; + dump->u_debugreg[5] = 0; + dump->u_debugreg[6] = current->thread.debugreg6; + + dump->u_debugreg[7] = dr7; +} + /* * Release the user breakpoints used by ptrace */ From f60d24d2ad04977b0bd9e3eb35dba2d2fa569af9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 10 Nov 2009 10:17:07 +0100 Subject: [PATCH 249/470] hw-breakpoints: Fix broken hw-breakpoint sample module The hw-breakpoint sample module has been broken during the hw-breakpoint internals refactoring. Propagate the changes to it. Reported-by: "K. Prasad" Signed-off-by: Frederic Weisbecker --- kernel/hw_breakpoint.c | 3 +- kernel/kallsyms.c | 1 + samples/hw_breakpoint/data_breakpoint.c | 43 ++++++++++++++----------- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index e662dc991c9..9ea9414e0e5 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -454,6 +454,7 @@ fail: /* return the error if any */ return ERR_PTR(err); } +EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); /** * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel @@ -470,7 +471,7 @@ void unregister_wide_hw_breakpoint(struct perf_event **cpu_events) } free_percpu(cpu_events); } - +EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); static struct notifier_block hw_breakpoint_exceptions_nb = { .notifier_call = hw_breakpoint_exceptions_notify, diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 8b6b8b697c6..8e5288a8a35 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -181,6 +181,7 @@ unsigned long kallsyms_lookup_name(const char *name) } return module_kallsyms_lookup_name(name); } +EXPORT_SYMBOL_GPL(kallsyms_lookup_name); int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 9cbdbb871b7..5bc9819a819 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -27,18 +27,19 @@ #include /* Needed by all modules */ #include /* Needed for KERN_INFO */ #include /* Needed for the macros */ +#include -#include +#include +#include -struct hw_breakpoint sample_hbp; +struct perf_event **sample_hbp; static char ksym_name[KSYM_NAME_LEN] = "pid_max"; module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO); MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" " write operations on the kernel symbol"); -void sample_hbp_handler(struct hw_breakpoint *temp, struct pt_regs - *temp_regs) +static void sample_hbp_handler(struct perf_event *temp, void *data) { printk(KERN_INFO "%s value is changed\n", ksym_name); dump_stack(); @@ -48,30 +49,34 @@ void sample_hbp_handler(struct hw_breakpoint *temp, struct pt_regs static int __init hw_break_module_init(void) { int ret; + unsigned long addr; -#ifdef CONFIG_X86 - sample_hbp.info.name = ksym_name; - sample_hbp.info.type = HW_BREAKPOINT_WRITE; - sample_hbp.info.len = HW_BREAKPOINT_LEN_4; -#endif /* CONFIG_X86 */ + addr = kallsyms_lookup_name(ksym_name); - sample_hbp.triggered = (void *)sample_hbp_handler; + sample_hbp = register_wide_hw_breakpoint(addr, HW_BREAKPOINT_LEN_4, + HW_BREAKPOINT_W | HW_BREAKPOINT_R, + sample_hbp_handler, true); + if (IS_ERR(sample_hbp)) { + ret = PTR_ERR(sample_hbp); + goto fail; + } else if (!sample_hbp) { + ret = -EINVAL; + goto fail; + } - ret = register_kernel_hw_breakpoint(&sample_hbp); - - if (ret < 0) { - printk(KERN_INFO "Breakpoint registration failed\n"); - return ret; - } else - printk(KERN_INFO "HW Breakpoint for %s write installed\n", - ksym_name); + printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name); return 0; + +fail: + printk(KERN_INFO "Breakpoint registration failed\n"); + + return ret; } static void __exit hw_break_module_exit(void) { - unregister_kernel_hw_breakpoint(&sample_hbp); + unregister_wide_hw_breakpoint(sample_hbp); printk(KERN_INFO "HW Breakpoint for %s write uninstalled\n", ksym_name); } From 59d8eb53ea9947db7cad8ebc31b0fb54f23a9851 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 10 Nov 2009 11:03:12 +0100 Subject: [PATCH 250/470] hw-breakpoints: Wrap in the KVM breakpoint active state check Wrap in the cpu dr7 check that tells if we have active breakpoints that need to be restored in the cpu. This wrapper makes the check more self-explainable and also reusable for any further other uses. Reported-by: Jan Kiszka Signed-off-by: Frederic Weisbecker Cc: Avi Kivity Cc: "K. Prasad" --- arch/x86/include/asm/debugreg.h | 5 +++++ arch/x86/kvm/x86.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index f1b673f0823..0f6e92af422 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -89,6 +89,11 @@ static inline void hw_breakpoint_disable(void) set_debugreg(0UL, 3); } +static inline int hw_breakpoint_active(void) +{ + return __get_cpu_var(dr7) & DR_GLOBAL_ENABLE_MASK; +} + extern void aout_dump_debugregs(struct user *dump); #ifdef CONFIG_KVM diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 22dee7aa781..3817220cc86 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3651,7 +3651,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ - if (__get_cpu_var(dr7) & DR_GLOBAL_ENABLE_MASK) + if (hw_breakpoint_active()) hw_breakpoint_restore(); set_bit(KVM_REQ_KICK, &vcpu->requests); From 606bc1e18d346fc7d7fb333909cc95b06b1ca5b1 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 10 Nov 2009 20:50:53 +0900 Subject: [PATCH 251/470] perf bench: Clean up bench/bench.h Clean up initializers in bench.h: - No need to break the line for function prototypes, they are more readable in a single line. (even if checkpatch complains about it - We try to align definitions / structure fields vertically, to make it all a bit more readable. Signed-off-by: Ingo Molnar Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257853855-28934-2-git-send-email-mitake@dcl.info.waseda.ac.jp> --- tools/perf/bench/bench.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index 42167ea4194..9fbd8d745fa 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -1,17 +1,15 @@ #ifndef BENCH_H #define BENCH_H -extern int bench_sched_messaging(int argc, const char **argv, - const char *prefix); -extern int bench_sched_pipe(int argc, const char **argv, - const char *prefix); +extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); +extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); -#define BENCH_FORMAT_DEFAULT_STR "default" -#define BENCH_FORMAT_DEFAULT 0 -#define BENCH_FORMAT_SIMPLE_STR "simple" -#define BENCH_FORMAT_SIMPLE 1 +#define BENCH_FORMAT_DEFAULT_STR "default" +#define BENCH_FORMAT_DEFAULT 0 +#define BENCH_FORMAT_SIMPLE_STR "simple" +#define BENCH_FORMAT_SIMPLE 1 -#define BENCH_FORMAT_UNKNOWN -1 +#define BENCH_FORMAT_UNKNOWN -1 extern int bench_format; From 9fbc04f2493929a69fd9e53b5fb53c127d7950d5 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 20:50:54 +0900 Subject: [PATCH 252/470] perf bench: Add new document about perf-bench This patch adds new document about perf-bench. Man page and html will be provided for user. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257853855-28934-3-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-bench.txt | 120 ++++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 tools/perf/Documentation/perf-bench.txt diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt new file mode 100644 index 00000000000..ae525ac5a2c --- /dev/null +++ b/tools/perf/Documentation/perf-bench.txt @@ -0,0 +1,120 @@ +perf-bench(1) +============ + +NAME +---- +perf-bench - General framework for benchmark suites + +SYNOPSIS +-------- +[verse] +'perf bench' [] [] + +DESCRIPTION +----------- +This 'perf bench' command is general framework for benchmark suites. + +COMMON OPTIONS +-------------- +-f:: +--format=:: +Specify format style. +Current available format styles are, + +'default':: +Default style. This is mainly for human reading. +--------------------- +% perf bench sched pipe # with no style specify +(executing 1000000 pipe operations between two tasks) + Total time:5.855 sec + 5.855061 usecs/op + 170792 ops/sec +--------------------- + +'simple':: +This simple style is friendly for automated +processing by scripts. +--------------------- +% perf bench --format=simple sched pipe # specified simple +5.988 +--------------------- + +SUBSYSTEM +--------- + +'sched':: + Scheduler and IPC mechanisms. + +SUITES FOR 'sched' +~~~~~~~~~~~~~~~~~~ +*messaging*:: +Suite for evaluating performance of scheduler and IPC mechanisms. +Based on hackbench by Rusty Russell. + +Options of *pipe* +^^^^^^^^^^^^^^^^^ +-p:: +--pipe:: +Use pipe() instead of socketpair() + +-t:: +--thread:: +Be multi thread instead of multi process + +-g:: +--group=:: +Specify number of groups + +-l:: +--loop=:: +Specify number of loops + +Example of *messaging* +^^^^^^^^^^^^^^^^^^^^^^ + +--------------------- +% perf bench sched messaging # run with default +options (20 sender and receiver processes per group) +(10 groups == 400 processes run) + + Total time:0.308 sec + +% perf bench sched messaging -t -g 20 # be multi-thread,with 20 groups +(20 sender and receiver threads per group) +(20 groups == 800 threads run) + + Total time:0.582 sec +--------------------- + +*pipe*:: +Suite for pipe() system call. +Based on pipe-test-1m.c by Ingo Molnar. + +Options of *pipe* +^^^^^^^^^^^^^^^^^ +-l:: +--loop=:: +Specify number of loops. + +Example of *pipe* +^^^^^^^^^^^^^^^^^ + +--------------------- +% perf bench sched pipe +(executing 1000000 pipe operations between two tasks) + + Total time:8.091 sec + 8.091833 usecs/op + 123581 ops/sec + +% perf bench sched pipe -l 1000 # loop 1000 +(executing 1000 pipe operations between two tasks) + + Total time:0.016 sec + 16.948000 usecs/op + 59004 ops/sec +--------------------- + +SEE ALSO +-------- +linkperf:perf[1] From 8d8d61aadb9d8cce07f7dcdb77a4c20a25d36d07 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Tue, 10 Nov 2009 20:50:55 +0900 Subject: [PATCH 253/470] perf bench: Modify command-list.txt for the entry of perf-bench This patch modifies command-list.txt for the entry of perf-bench. So perf will show 'bench' in command list. Example: % perf usage: perf [--version] [--help] COMMAND [ARGS] The most commonly used perf commands are: annotate Read perf.data (created by perf record) and display annotated code bench General framework for benchmark suites ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ list List all symbolic event types probe Define new dynamic tracepoints record Run a command and record its profile into perf.data report Read perf.data (created by perf record) and display the profile sched Tool to trace/measure scheduler properties (latencies) stat Run a command and gather performance counter statistics timechart Tool to visualize total system behavior during a workload top System profiling tool. trace Read perf.data (created by perf record) and display trace output See 'perf help COMMAND' for more information on a specific command. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257853855-28934-4-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/command-list.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 00326e230d8..981c40b9a5e 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -3,6 +3,7 @@ # command name category [deprecated] [common] # perf-annotate mainporcelain common +perf-bench mainporcelain common perf-list mainporcelain common perf-sched mainporcelain common perf-record mainporcelain common From 79e295d4bd0f524257299e7c4e42f643f21abcc2 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 11 Nov 2009 00:04:00 +0900 Subject: [PATCH 254/470] perf bench: Improve builtin-bench.c for more friendly output This patch makes output of perf bench more friendly. Current style of putput, keeping user wait and printing everything suddenly when we finish, may confuse users. So I improved it: | % perf bench sched messaging | # Running sched/messaging benchmark... <- printed right after invocation | # 20 sender and receiver processes per group | # 10 groups == 400 processes run | | Total time: 1.476 [sec] Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257865442-20252-2-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/builtin-bench.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index c7505eaff84..90c39baae0d 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -156,6 +156,10 @@ int cmd_bench(int argc, const char **argv, const char *prefix __used) if (strcmp(subsystems[i].suites[j].name, argv[1])) continue; + if (bench_format == BENCH_FORMAT_DEFAULT) + printf("# Running %s/%s benchmark...\n", + subsystems[i].name, + subsystems[i].suites[j].name); status = subsystems[i].suites[j].fn(argc - 1, argv + 1, prefix); goto end; From ff676b193a401b23c84a79a7ec06559f3eaae917 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 11 Nov 2009 00:04:01 +0900 Subject: [PATCH 255/470] perf bench: Improve sched-pipe.c with more comfortable output This patch improves sched-pipe.c with more comfortable output. Change points are comment style description and formatting numerical values and its units. Example: | % ./perf bench sched pipe | # Running sched/pipe benchmark... | # Extecuted 1000000 pipe operations between two tasks | | Total time:5.822 [sec] | | 5.822553 usecs/op | 171745 ops/sec Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257865442-20252-3-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/bench/sched-pipe.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index a9ac186714b..238185f9797 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -92,17 +92,18 @@ int bench_sched_pipe(int argc, const char **argv, switch (bench_format) { case BENCH_FORMAT_DEFAULT: - printf("(executing %d pipe operations between two tasks)\n\n", + printf("# Extecuted %d pipe operations between two tasks\n\n", loops); result_usec = diff.tv_sec * 1000000; result_usec += diff.tv_usec; - printf("\tTotal time:%lu.%03lu sec\n", - diff.tv_sec, diff.tv_usec / 1000); - printf("\t\t%lf usecs/op\n", + printf(" %14s: %lu.%03lu [sec]\n\n", "Total time", + diff.tv_sec, diff.tv_usec/1000); + + printf(" %14lf usecs/op\n", (double)result_usec / (double)loops); - printf("\t\t%d ops/sec\n", + printf(" %14d ops/sec\n", (int)((double)loops / ((double)result_usec / (double)1000000))); break; From c5659b74f052150791750234f92dcfb29d27efa5 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 11 Nov 2009 00:04:02 +0900 Subject: [PATCH 256/470] perf bench: Improve sched-message.c with more comfortable output This patch improves sched-message.c with more comfortable output. Change points are comment style description and formatting numerical values and its units. Example: | % perf bench sched messaging | # Running sched/messaging benchmark... | # 20 sender and receiver processes per group | # 10 groups == 400 processes run | | Total time: 1.490 [sec] Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1257865442-20252-4-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras --- tools/perf/bench/sched-messaging.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index 2cc5edcca3d..605a2a959aa 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c @@ -314,12 +314,12 @@ int bench_sched_messaging(int argc, const char **argv, switch (bench_format) { case BENCH_FORMAT_DEFAULT: - printf("(%d sender and receiver %s per group)\n", + printf("# %d sender and receiver %s per group\n", num_fds, thread_mode ? "threads" : "processes"); - printf("(%d groups == %d %s run)\n\n", + printf("# %d groups == %d %s run\n\n", num_groups, num_groups * 2 * num_fds, thread_mode ? "threads" : "processes"); - printf("\tTotal time:%lu.%03lu sec\n", + printf(" %14s: %lu.%03lu [sec]\n", "Total time", diff.tv_sec, diff.tv_usec/1000); break; case BENCH_FORMAT_SIMPLE: From de8967214d8ce536161a1ad6538ad1cb82e7428d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:02 +0100 Subject: [PATCH 257/470] perf tools: Synthetize the targeted process Don't forget to also synthetize the targeted process from perf record or we'll miss its dso in the events and then we won't be able to deal with its build-id. We are missing it because it is created after the existing synthetized tasks but before the counters are enabled and can send its mapping event. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index ab333812ace..9f98b86e747 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -497,13 +497,22 @@ static int __cmd_record(int argc, const char **argv) if (target_pid == -1 && argc) { pid = fork(); if (pid < 0) - perror("failed to fork"); + die("failed to fork"); if (!pid) { if (execvp(argv[0], (char **)argv)) { perror(argv[0]); exit(-1); } + } else { + /* + * Wait a bit for the execv'ed child to appear + * and be updated in /proc + * FIXME: Do you know a less heuristical solution? + */ + usleep(1000); + event__synthesize_thread(pid, + process_synthesized_event); } child_pid = pid; From 8671dab9d5b2f0b444b8d09792384dccbfd43d14 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:03 +0100 Subject: [PATCH 258/470] perf tools: Move the build-id storage operations to headers So that it makes easier to control it. Especially because we plan to give it a feature section. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 32 ++--------------------------- tools/perf/util/header.c | 41 ++++++++++++++++++++++++++++++++++--- tools/perf/util/header.h | 2 +- 3 files changed, 41 insertions(+), 34 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 9f98b86e747..c35e61b3098 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -378,39 +378,11 @@ static void open_counters(int cpu, pid_t pid) nr_cpu++; } -static bool write_buildid_table(void) -{ - struct dso *pos; - bool have_buildid = false; - - list_for_each_entry(pos, &dsos, node) { - struct build_id_event b; - size_t len; - - if (filename__read_build_id(pos->long_name, - &b.build_id, - sizeof(b.build_id)) < 0) - continue; - have_buildid = true; - memset(&b.header, 0, sizeof(b.header)); - len = strlen(pos->long_name) + 1; - len = ALIGN(len, 64); - b.header.size = sizeof(b) + len; - write_output(&b, sizeof(b)); - write_output(pos->long_name, len); - } - - return have_buildid; -} - static void atexit_header(void) { header->data_size += bytes_written; - if (write_buildid_table()) - perf_header__set_feat(header, HEADER_BUILD_ID); - - perf_header__write(header, output); + perf_header__write(header, output, true); } static int __cmd_record(int argc, const char **argv) @@ -487,7 +459,7 @@ static int __cmd_record(int argc, const char **argv) } if (file_new) - perf_header__write(header, output); + perf_header__write(header, output, false); if (!system_wide) event__synthesize_thread(pid, process_synthesized_event); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 050f543fd96..a4d0bbef9a4 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2,11 +2,13 @@ #include #include #include +#include #include "util.h" #include "header.h" #include "../perf.h" #include "trace-event.h" +#include "symbol.h" /* * Create new perf.data header attribute: @@ -172,7 +174,33 @@ static void do_write(int fd, void *buf, size_t size) } } -static void perf_header__adds_write(struct perf_header *self, int fd) +static bool write_buildid_table(int fd) +{ + struct dso *pos; + bool have_buildid = false; + + list_for_each_entry(pos, &dsos, node) { + struct build_id_event b; + size_t len; + + if (filename__read_build_id(pos->long_name, + &b.build_id, + sizeof(b.build_id)) < 0) + continue; + have_buildid = true; + memset(&b.header, 0, sizeof(b.header)); + len = strlen(pos->long_name) + 1; + len = ALIGN(len, 64); + b.header.size = sizeof(b) + len; + do_write(fd, &b, sizeof(b)); + do_write(fd, pos->long_name, len); + } + + return have_buildid; +} + +static void +perf_header__adds_write(struct perf_header *self, int fd, bool at_exit) { struct perf_file_section trace_sec; u64 cur_offset = lseek(fd, 0, SEEK_CUR); @@ -196,9 +224,16 @@ static void perf_header__adds_write(struct perf_header *self, int fd) */ cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); } + + if (at_exit) { + lseek(fd, self->data_offset + self->data_size, SEEK_SET); + if (write_buildid_table(fd)) + perf_header__set_feat(self, HEADER_BUILD_ID); + lseek(fd, cur_offset, SEEK_SET); + } }; -void perf_header__write(struct perf_header *self, int fd) +void perf_header__write(struct perf_header *self, int fd, bool at_exit) { struct perf_file_header f_header; struct perf_file_attr f_attr; @@ -236,7 +271,7 @@ void perf_header__write(struct perf_header *self, int fd) if (events) do_write(fd, events, self->event_size); - perf_header__adds_write(self, fd); + perf_header__adds_write(self, fd, at_exit); self->data_offset = lseek(fd, 0, SEEK_CUR); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 2f233c5db7e..77186c9e605 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -33,7 +33,7 @@ struct perf_header { }; struct perf_header *perf_header__read(int fd); -void perf_header__write(struct perf_header *self, int fd); +void perf_header__write(struct perf_header *self, int fd, bool at_exit); void perf_header__add_attr(struct perf_header *self, struct perf_header_attr *attr); From 57f395a7eabb913d3605d7392be5bdb0837c9f3d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:04 +0100 Subject: [PATCH 259/470] perf tools: Split up build id saving into fetch and write We are saving the build id once we stop the profiling. And only after doing that we know if we need to set that feature in the header through the feature bitmap. But if we want a proper feature support in the headers, using a rule of offset/size pairs in sections, we need to know in advance how many features we need to set in the headers, so that we can reserve rooms for their section headers. The current state doesn't allow that, as it forces us to first save the build-ids to the file right after the datas instead of planning any structured layout. That's why this splits up the build-ids processing in two parts: one that fetches the build-ids from the Dso objects, and one that saves them into the file. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/event.h | 7 +++++++ tools/perf/util/header.c | 37 +++++++++++++++---------------------- tools/perf/util/symbol.c | 34 ++++++++++++++++++++++++++++++++++ tools/perf/util/symbol.h | 1 + 4 files changed, 57 insertions(+), 22 deletions(-) diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 34c6fcb82d9..1f771ce3a95 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -69,6 +69,13 @@ struct build_id_event { char filename[]; }; +struct build_id_list { + struct build_id_event event; + struct list_head list; + const char *dso_name; + int len; +}; + typedef union event_union { struct perf_event_header header; struct ip_event ip; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index a4d0bbef9a4..2f702c23f71 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -174,29 +174,18 @@ static void do_write(int fd, void *buf, size_t size) } } -static bool write_buildid_table(int fd) +static void write_buildid_table(int fd, struct list_head *id_head) { - struct dso *pos; - bool have_buildid = false; + struct build_id_list *iter, *next; - list_for_each_entry(pos, &dsos, node) { - struct build_id_event b; - size_t len; + list_for_each_entry_safe(iter, next, id_head, list) { + struct build_id_event *b = &iter->event; - if (filename__read_build_id(pos->long_name, - &b.build_id, - sizeof(b.build_id)) < 0) - continue; - have_buildid = true; - memset(&b.header, 0, sizeof(b.header)); - len = strlen(pos->long_name) + 1; - len = ALIGN(len, 64); - b.header.size = sizeof(b) + len; - do_write(fd, &b, sizeof(b)); - do_write(fd, pos->long_name, len); + do_write(fd, b, sizeof(*b)); + do_write(fd, (void *)iter->dso_name, iter->len); + list_del(&iter->list); + free(iter); } - - return have_buildid; } static void @@ -226,10 +215,14 @@ perf_header__adds_write(struct perf_header *self, int fd, bool at_exit) } if (at_exit) { - lseek(fd, self->data_offset + self->data_size, SEEK_SET); - if (write_buildid_table(fd)) + LIST_HEAD(id_list); + + if (fetch_build_id_table(&id_list)) { + lseek(fd, self->data_offset + self->data_size, SEEK_SET); perf_header__set_feat(self, HEADER_BUILD_ID); - lseek(fd, cur_offset, SEEK_SET); + write_buildid_table(fd, &id_list); + lseek(fd, cur_offset, SEEK_SET); + } } }; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a2e95ce1f22..9c286db6200 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -851,6 +851,40 @@ out_close: return err; } +bool fetch_build_id_table(struct list_head *head) +{ + bool have_buildid = false; + struct dso *pos; + + list_for_each_entry(pos, &dsos, node) { + struct build_id_list *new; + struct build_id_event b; + size_t len; + + if (filename__read_build_id(pos->long_name, + &b.build_id, + sizeof(b.build_id)) < 0) + continue; + have_buildid = true; + memset(&b.header, 0, sizeof(b.header)); + len = strlen(pos->long_name) + 1; + len = ALIGN(len, 64); + b.header.size = sizeof(b) + len; + + new = malloc(sizeof(*new)); + if (!new) + die("No memory\n"); + + memcpy(&new->event, &b, sizeof(b)); + new->dso_name = pos->long_name; + new->len = len; + + list_add_tail(&new->list, head); + } + + return have_buildid; +} + int filename__read_build_id(const char *filename, void *bf, size_t size) { int fd, err = -1; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index f8c1899af48..0a34a5493f1 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -86,6 +86,7 @@ char dso__symtab_origin(const struct dso *self); void dso__set_build_id(struct dso *self, void *build_id); int filename__read_build_id(const char *filename, void *bf, size_t size); +bool fetch_build_id_table(struct list_head *head); int build_id__sprintf(u8 *self, int len, char *bf); int load_kernel(symbol_filter_t filter); From 4778d2e4f410c6eea32f594cb2be9590bcb28b84 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:05 +0100 Subject: [PATCH 260/470] perf tools: Read the build-ids from the header layer Keep the build-ids reading implementation in the data mapping but move its call to the headers so that we have a better control on it (offset seeking, size passing, etc..). Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/data_map.c | 8 ++------ tools/perf/util/data_map.h | 2 ++ tools/perf/util/header.c | 14 ++++++++++++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 00a9c114c8d..66e58aaecce 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -70,8 +70,8 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } } -static int perf_header__read_build_ids(const struct perf_header *self, - int input, off_t file_size) +int perf_header__read_build_ids(const struct perf_header *self, + int input, off_t file_size) { off_t offset = self->data_offset + self->data_size; struct build_id_event bev; @@ -163,10 +163,6 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, if (curr_handler->sample_type_check(sample_type) < 0) exit(-1); - if (perf_header__has_feat(header, HEADER_BUILD_ID) && - perf_header__read_build_ids(header, input, input_stat.st_size)) - pr_debug("failed to read buildids, continuing...\n"); - if (load_kernel(NULL) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index 716d1053b07..c4122810e48 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -27,5 +27,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int full_paths, int *cwdlen, char **cwd); +int perf_header__read_build_ids(const struct perf_header *self, + int input, off_t file_size); #endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 2f702c23f71..915b56edbf0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -9,6 +9,8 @@ #include "../perf.h" #include "trace-event.h" #include "symbol.h" +#include "data_map.h" +#include "debug.h" /* * Create new perf.data header attribute: @@ -322,6 +324,14 @@ static void perf_header__adds_read(struct perf_header *self, int fd) trace_report(fd); lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); } + + if (perf_header__has_feat(self, HEADER_BUILD_ID)) { + struct stat input_stat; + + fstat(fd, &input_stat); + if (perf_header__read_build_ids(self, fd, input_stat.st_size)) + pr_debug("failed to read buildids, continuing...\n"); + } }; struct perf_header *perf_header__read(int fd) @@ -382,14 +392,14 @@ struct perf_header *perf_header__read(int fd) memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features)); - perf_header__adds_read(self, fd); - self->event_offset = f_header.event_types.offset; self->event_size = f_header.event_types.size; self->data_offset = f_header.data.offset; self->data_size = f_header.data.size; + perf_header__adds_read(self, fd); + lseek(fd, self->data_offset, SEEK_SET); self->frozen = 1; From 3e13ab2d83b6867a20663c73c184f29c2fde1558 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:06 +0100 Subject: [PATCH 261/470] perf tools: Use perf_header__set/has_feat whenever possible And drop the alternate checks/sets using set_bit or other kind of helpers. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-5-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 4 ++-- tools/perf/util/header.c | 12 ++---------- tools/perf/util/header.h | 1 - 3 files changed, 4 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index c35e61b3098..326e8a79cab 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -431,11 +431,11 @@ static int __cmd_record(int argc, const char **argv) header = perf_header__new(); if (raw_samples) { - perf_header__feat_trace_info(header); + perf_header__set_feat(header, HEADER_TRACE_INFO); } else { for (i = 0; i < nr_counters; i++) { if (attrs[i].sample_type & PERF_SAMPLE_RAW) { - perf_header__feat_trace_info(header); + perf_header__set_feat(header, HEADER_TRACE_INFO); break; } } diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 915b56edbf0..9709d38113b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -148,11 +148,6 @@ struct perf_file_header { DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; -void perf_header__feat_trace_info(struct perf_header *header) -{ - set_bit(HEADER_TRACE_INFO, header->adds_features); -} - void perf_header__set_feat(struct perf_header *self, int feat) { set_bit(feat, self->adds_features); @@ -195,9 +190,8 @@ perf_header__adds_write(struct perf_header *self, int fd, bool at_exit) { struct perf_file_section trace_sec; u64 cur_offset = lseek(fd, 0, SEEK_CUR); - unsigned long *feat_mask = self->adds_features; - if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { /* Write trace info */ trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR); read_tracing_data(fd, attrs, nr_counters); @@ -314,9 +308,7 @@ static void do_read(int fd, void *buf, size_t size) static void perf_header__adds_read(struct perf_header *self, int fd) { - const unsigned long *feat_mask = self->adds_features; - - if (test_bit(HEADER_TRACE_INFO, feat_mask)) { + if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { struct perf_file_section trace_sec; do_read(fd, &trace_sec, sizeof(trace_sec)); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 77186c9e605..a22d70b0757 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -49,7 +49,6 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * perf_header__find_attr(u64 id, struct perf_header *header); -void perf_header__feat_trace_info(struct perf_header *header); void perf_header__set_feat(struct perf_header *self, int feat); bool perf_header__has_feat(const struct perf_header *self, int feat); From 9e827dd00a94136b944a538bede67c944d0b740a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2009 04:51:07 +0100 Subject: [PATCH 262/470] perf tools: Bring linear set of section headers for features Build a set of section headers for features right after the datas. Each implemented feature will have one of such section header that provides the offset and the size of the data manipulated by the feature. The trace informations have moved after the data and are recorded on exit time. The new layout is as follows: ----------------------- ___ [ magic ] | [ header size ] | [ attr size ] | [ attr content offset ] | [ attr content size ] | [ data offset ] File Headers [ data size ] | [ event_types offset ] | [ event_types size ] | [ feature bitmap ] v [ attr section ] [ events section ] ___ [ X ] | [ X ] | [ X ] Datas [ X ] | [ X ] v ___ [ Feature 1 offset ] | [ Feature 1 size ] Features headers [ Feature 2 offset ] | [ Feature 2 size ] v [ Feature 1 content ] [ Feature 2 content ] ----------------------- We have as many feature's section headers as we have features in use for the current file. Say Feat 1 and Feat 3 are used by the file, but not Feat 2. Then the feature headers will be like follows: [ Feature 1 offset ] | [ Feature 1 size ] Features headers [ Feature 3 offset ] | [ Feature 3 size ] v There is no hole to cover Feature 2 that is not in use here. We only need to cover the needed headers in order, from the lowest feature bit to the highest. Currently we have two features: HEADER_TRACE_INFO and HEADER_BUILD_ID. Both have their contents that follow the feature headers. Putting the contents right after the feature headers is not mandatory though. While we keep the feature headers right after the data and in order, their offsets can point everywhere. We have just put the two above feature contents in the end of the file for convenience. The purpose of this layout change is to have a file format that scales while keeping it simple: having such linear feature headers is less error prone wrt forward/backward compatibility as the content of a feature can be put anywhere, its location can even change by the time, it's fine because its headers will tell where it is. And we know how to find these headers, following the above rules. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Hitoshi Mitake LKML-Reference: <1257911467-28276-6-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/data_map.c | 11 +-- tools/perf/util/data_map.h | 3 +- tools/perf/util/header.c | 112 +++++++++++++++++-------- tools/perf/util/include/linux/bitmap.h | 1 + tools/perf/util/include/linux/ctype.h | 2 +- tools/perf/util/util.h | 3 + 6 files changed, 86 insertions(+), 46 deletions(-) diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 66e58aaecce..aacb814a4ef 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -70,18 +70,15 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } } -int perf_header__read_build_ids(const struct perf_header *self, - int input, off_t file_size) +int perf_header__read_build_ids(int input, off_t size) { - off_t offset = self->data_offset + self->data_size; struct build_id_event bev; char filename[PATH_MAX]; + off_t offset = lseek(input, 0, SEEK_CUR); + off_t limit = offset + size; int err = -1; - if (lseek(input, offset, SEEK_SET) < 0) - return -1; - - while (offset < file_size) { + while (offset < limit) { struct dso *dso; ssize_t len; diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index c4122810e48..20b4037a823 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -27,7 +27,6 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int full_paths, int *cwdlen, char **cwd); -int perf_header__read_build_ids(const struct perf_header *self, - int input, off_t file_size); +int perf_header__read_build_ids(int input, off_t file_size); #endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 9709d38113b..ebed4f44ed3 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -186,41 +186,58 @@ static void write_buildid_table(int fd, struct list_head *id_head) } static void -perf_header__adds_write(struct perf_header *self, int fd, bool at_exit) +perf_header__adds_write(struct perf_header *self, int fd) { - struct perf_file_section trace_sec; - u64 cur_offset = lseek(fd, 0, SEEK_CUR); + LIST_HEAD(id_list); + int nr_sections; + struct perf_file_section *feat_sec; + int sec_size; + u64 sec_start; + int idx = 0; + + if (fetch_build_id_table(&id_list)) + perf_header__set_feat(self, HEADER_BUILD_ID); + + nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); + if (!nr_sections) + return; + + feat_sec = calloc(sizeof(*feat_sec), nr_sections); + if (!feat_sec) + die("No memory"); + + sec_size = sizeof(*feat_sec) * nr_sections; + + sec_start = self->data_offset + self->data_size; + lseek(fd, sec_start + sec_size, SEEK_SET); if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { + struct perf_file_section *trace_sec; + + trace_sec = &feat_sec[idx++]; + /* Write trace info */ - trace_sec.offset = lseek(fd, sizeof(trace_sec), SEEK_CUR); + trace_sec->offset = lseek(fd, 0, SEEK_CUR); read_tracing_data(fd, attrs, nr_counters); - trace_sec.size = lseek(fd, 0, SEEK_CUR) - trace_sec.offset; - - /* Write trace info headers */ - lseek(fd, cur_offset, SEEK_SET); - do_write(fd, &trace_sec, sizeof(trace_sec)); - - /* - * Update cur_offset. So that other (future) - * features can set their own infos in this place. But if we are - * the only feature, at least that seeks to the place the data - * should begin. - */ - cur_offset = lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); + trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; } - if (at_exit) { - LIST_HEAD(id_list); - if (fetch_build_id_table(&id_list)) { - lseek(fd, self->data_offset + self->data_size, SEEK_SET); - perf_header__set_feat(self, HEADER_BUILD_ID); - write_buildid_table(fd, &id_list); - lseek(fd, cur_offset, SEEK_SET); - } + if (perf_header__has_feat(self, HEADER_BUILD_ID)) { + struct perf_file_section *buildid_sec; + + buildid_sec = &feat_sec[idx++]; + + /* Write build-ids */ + buildid_sec->offset = lseek(fd, 0, SEEK_CUR); + write_buildid_table(fd, &id_list); + buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; } -}; + + lseek(fd, sec_start, SEEK_SET); + do_write(fd, feat_sec, sec_size); + free(feat_sec); +} void perf_header__write(struct perf_header *self, int fd, bool at_exit) { @@ -260,10 +277,11 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) if (events) do_write(fd, events, self->event_size); - perf_header__adds_write(self, fd, at_exit); - self->data_offset = lseek(fd, 0, SEEK_CUR); + if (at_exit) + perf_header__adds_write(self, fd); + f_header = (struct perf_file_header){ .magic = PERF_MAGIC, .size = sizeof(f_header), @@ -308,22 +326,44 @@ static void do_read(int fd, void *buf, size_t size) static void perf_header__adds_read(struct perf_header *self, int fd) { - if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { - struct perf_file_section trace_sec; + struct perf_file_section *feat_sec; + int nr_sections; + int sec_size; + int idx = 0; - do_read(fd, &trace_sec, sizeof(trace_sec)); - lseek(fd, trace_sec.offset, SEEK_SET); + + nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); + if (!nr_sections) + return; + + feat_sec = calloc(sizeof(*feat_sec), nr_sections); + if (!feat_sec) + die("No memory"); + + sec_size = sizeof(*feat_sec) * nr_sections; + + lseek(fd, self->data_offset + self->data_size, SEEK_SET); + + do_read(fd, feat_sec, sec_size); + + if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { + struct perf_file_section *trace_sec; + + trace_sec = &feat_sec[idx++]; + lseek(fd, trace_sec->offset, SEEK_SET); trace_report(fd); - lseek(fd, trace_sec.offset + trace_sec.size, SEEK_SET); } if (perf_header__has_feat(self, HEADER_BUILD_ID)) { - struct stat input_stat; + struct perf_file_section *buildid_sec; - fstat(fd, &input_stat); - if (perf_header__read_build_ids(self, fd, input_stat.st_size)) + buildid_sec = &feat_sec[idx++]; + lseek(fd, buildid_sec->offset, SEEK_SET); + if (perf_header__read_build_ids(fd, buildid_sec->size)) pr_debug("failed to read buildids, continuing...\n"); } + + free(feat_sec); }; struct perf_header *perf_header__read(int fd) diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h index 821c1033bcc..94507639a8c 100644 --- a/tools/perf/util/include/linux/bitmap.h +++ b/tools/perf/util/include/linux/bitmap.h @@ -1,2 +1,3 @@ #include "../../../../include/linux/bitmap.h" #include "../../../../include/asm-generic/bitops/find.h" +#include diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h index bae5783282e..a53d4ee1e0b 100644 --- a/tools/perf/util/include/linux/ctype.h +++ b/tools/perf/util/include/linux/ctype.h @@ -1 +1 @@ -#include "../../../../include/linux/ctype.h" +#include "../util.h" diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 9de2329dd44..7bd5bdaeb23 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -306,6 +306,7 @@ static inline int has_extension(const char *filename, const char *ext) #undef isascii #undef isspace #undef isdigit +#undef isxdigit #undef isalpha #undef isprint #undef isalnum @@ -323,6 +324,8 @@ extern unsigned char sane_ctype[256]; #define isascii(x) (((x) & ~0x7f) == 0) #define isspace(x) sane_istest(x,GIT_SPACE) #define isdigit(x) sane_istest(x,GIT_DIGIT) +#define isxdigit(x) \ + (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G') #define isalpha(x) sane_istest(x,GIT_ALPHA) #define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) #define isprint(x) sane_istest(x,GIT_PRINT) From 5d7bdab75cd56d2bdc0986ae5546be3b09fea70a Mon Sep 17 00:00:00 2001 From: Michael Cree Date: Wed, 11 Nov 2009 20:43:03 +1300 Subject: [PATCH 263/470] perf tools: Test -fstack-protector-all compiler option for inclusion in CFLAGS Some architectures (e.g. Alpha) do not support the -fstack-protector-all compiler option and the use of the option with -Werror causes the compiler to abort and the build fails. Test that the compiler supports -fstack-protector-all before inclusion in CFLAGS. Signed-off-by: Michael Cree Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091111074302.GA3728@omega> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b9509b1cc32..e6d42728a33 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -207,7 +207,7 @@ ifndef PERF_DEBUG CFLAGS_OPTIMIZE = -O6 endif -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) LDFLAGS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) @@ -259,6 +259,9 @@ PTHREAD_LIBS = -lpthread # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ +ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null >/dev/null 2>&1 && echo y"), y) + CFLAGS := $(CFLAGS) -fstack-protector-all +endif ### --- END CONFIGURATION SECTION --- From ce6b5d768c79b9d5dd6345c033bae781d5ca9b8e Mon Sep 17 00:00:00 2001 From: Yong Wang Date: Wed, 11 Nov 2009 15:51:25 +0800 Subject: [PATCH 264/470] x86: Mark the thermal init functions __init Mark the thermal init functions __init so that the init memory can be freed. Signed-off-by: Yong Wang LKML-Reference: <20091111075125.GA17900@ywang-moblin2.bj.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/therm_throt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 7f3cf36ed12..8a73d5c12a0 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -256,7 +256,7 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) ack_APIC_irq(); } -void mcheck_intel_therm_init(void) +void __init mcheck_intel_therm_init(void) { /* * This function is only called on boot CPU. Save the init thermal @@ -268,7 +268,7 @@ void mcheck_intel_therm_init(void) lvtthmr_init = apic_read(APIC_LVTTHMR); } -void intel_init_thermal(struct cpuinfo_x86 *c) +void __init intel_init_thermal(struct cpuinfo_x86 *c) { unsigned int cpu = smp_processor_id(); int tm2 = 0; From cffd377e5879ea58522224a785a083f201afd80e Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Thu, 12 Nov 2009 15:52:40 +0900 Subject: [PATCH 265/470] x86, mce: Fix __init annotations The intel_init_thermal() is called from resume path, so it cannot be marked as __init. OTOH mce_banks_init() is only called from __mcheck_cpu_cap_init() which is marked as __cpuinit, so it can be also marked as __cpuinit. Signed-off-by: Hidetoshi Seto Acked-by: Yong Wang LKML-Reference: <4AFBB0B8.2070501@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- arch/x86/kernel/cpu/mcheck/therm_throt.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0d4102031a4..5f277cad2ed 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1201,7 +1201,7 @@ int mce_notify_irq(void) } EXPORT_SYMBOL_GPL(mce_notify_irq); -static int mce_banks_init(void) +static int __cpuinit __mcheck_cpu_mce_banks_init(void) { int i; @@ -1242,7 +1242,7 @@ static int __cpuinit __mcheck_cpu_cap_init(void) WARN_ON(banks != 0 && b != banks); banks = b; if (!mce_banks) { - int err = mce_banks_init(); + int err = __mcheck_cpu_mce_banks_init(); if (err) return err; diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 8a73d5c12a0..4fef985fc22 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -268,7 +268,7 @@ void __init mcheck_intel_therm_init(void) lvtthmr_init = apic_read(APIC_LVTTHMR); } -void __init intel_init_thermal(struct cpuinfo_x86 *c) +void intel_init_thermal(struct cpuinfo_x86 *c) { unsigned int cpu = smp_processor_id(); int tm2 = 0; From db48cccc7c709ccfa7cb4ac702bc27c216bffee7 Mon Sep 17 00:00:00 2001 From: Hiroshi Shimamoto Date: Thu, 12 Nov 2009 11:25:34 +0900 Subject: [PATCH 266/470] perf_event, x86: Annotate init functions and data Annotate init functions and data with __init and __initconst. Signed-off-by: Hiroshi Shimamoto Cc: Peter Zijlstra Cc: Stephane Eranian LKML-Reference: <4AFB721E.8070203@ct.jp.nec.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2e20bca3cca..bd874302420 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -245,7 +245,7 @@ static u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; -static const u64 nehalem_hw_cache_event_ids +static __initconst u64 nehalem_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -336,7 +336,7 @@ static const u64 nehalem_hw_cache_event_ids }, }; -static const u64 core2_hw_cache_event_ids +static __initconst u64 core2_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -427,7 +427,7 @@ static const u64 core2_hw_cache_event_ids }, }; -static const u64 atom_hw_cache_event_ids +static __initconst u64 atom_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -536,7 +536,7 @@ static u64 intel_pmu_raw_event(u64 hw_event) return hw_event & CORE_EVNTSEL_MASK; } -static const u64 amd_hw_cache_event_ids +static __initconst u64 amd_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -1964,7 +1964,7 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { .priority = 1 }; -static struct x86_pmu p6_pmu = { +static __initconst struct x86_pmu p6_pmu = { .name = "p6", .handle_irq = p6_pmu_handle_irq, .disable_all = p6_pmu_disable_all, @@ -1992,7 +1992,7 @@ static struct x86_pmu p6_pmu = { .get_event_idx = intel_get_event_idx, }; -static struct x86_pmu intel_pmu = { +static __initconst struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, .disable_all = intel_pmu_disable_all, @@ -2016,7 +2016,7 @@ static struct x86_pmu intel_pmu = { .get_event_idx = intel_get_event_idx, }; -static struct x86_pmu amd_pmu = { +static __initconst struct x86_pmu amd_pmu = { .name = "AMD", .handle_irq = amd_pmu_handle_irq, .disable_all = amd_pmu_disable_all, @@ -2037,7 +2037,7 @@ static struct x86_pmu amd_pmu = { .get_event_idx = gen_get_event_idx, }; -static int p6_pmu_init(void) +static __init int p6_pmu_init(void) { switch (boot_cpu_data.x86_model) { case 1: @@ -2071,7 +2071,7 @@ static int p6_pmu_init(void) return 0; } -static int intel_pmu_init(void) +static __init int intel_pmu_init(void) { union cpuid10_edx edx; union cpuid10_eax eax; @@ -2144,7 +2144,7 @@ static int intel_pmu_init(void) return 0; } -static int amd_pmu_init(void) +static __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ if (boot_cpu_data.x86 < 6) From 67178767b936fb47a3a5e88097cff41ccbda7acb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2009 10:06:34 +0100 Subject: [PATCH 267/470] tracing: Rename 'lockdep' event subsystem into 'lock' Lockdep events subsystem gathers various locking related events such as a request, release, contention or acquisition of a lock. The name of this event subsystem is a bit of a misnomer since these events are not quite related to lockdep but more generally to locking, ie: these events are not reporting lock dependencies or possible deadlock scenario but pure locking events. Hence this rename. Signed-off-by: Frederic Weisbecker Acked-by: Peter Zijlstra Acked-by: Hitoshi Mitake Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith Cc: Paul Mackerras Cc: Steven Rostedt Cc: Li Zefan LKML-Reference: <1258103194-843-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/trace/events/{lockdep.h => lock.h} | 8 ++++---- kernel/lockdep.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) rename include/trace/events/{lockdep.h => lock.h} (92%) diff --git a/include/trace/events/lockdep.h b/include/trace/events/lock.h similarity index 92% rename from include/trace/events/lockdep.h rename to include/trace/events/lock.h index bcf1d209a00..a870ba125aa 100644 --- a/include/trace/events/lockdep.h +++ b/include/trace/events/lock.h @@ -1,8 +1,8 @@ #undef TRACE_SYSTEM -#define TRACE_SYSTEM lockdep +#define TRACE_SYSTEM lock -#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_LOCKDEP_H +#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOCK_H #include #include @@ -90,7 +90,7 @@ TRACE_EVENT(lock_acquired, #endif #endif -#endif /* _TRACE_LOCKDEP_H */ +#endif /* _TRACE_LOCK_H */ /* This part must be outside protection */ #include diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 9af56723c09..f5dcd36d315 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -49,7 +49,7 @@ #include "lockdep_internals.h" #define CREATE_TRACE_POINTS -#include +#include #ifdef CONFIG_PROVE_LOCKING int prove_locking = 1; From 687b16fb617bd446439425a368ad7c7bbd202c73 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2009 13:16:15 +0100 Subject: [PATCH 268/470] hw-breakpoints: Provide an off-case for counter_arch_bp() If an arch doesn't support the hw breakpoints, counter_arch_bp() has no off case to cover the missing breakpoint info structure from the perf event. The result is a build error in non-x86 configs. Reported-by: Ingo Molnar Signed-off-by: Frederic Weisbecker Cc: Prasad LKML-Reference: <1258114575-32655-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar Cc: Prasad --- include/linux/hw_breakpoint.h | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 7eba9b92e5f..18710e0c84b 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -16,11 +16,6 @@ enum { HW_BREAKPOINT_X = 4, }; -static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) -{ - return &bp->hw.info; -} - static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; @@ -83,6 +78,11 @@ extern void release_bp_slot(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return &bp->hw.info; +} + #else /* !CONFIG_HAVE_HW_BREAKPOINT */ static inline struct perf_event * @@ -126,6 +126,11 @@ static inline void release_bp_slot(struct perf_event *bp) { } static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { } +static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) +{ + return NULL; +} + #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* _LINUX_HW_BREAKPOINT_H */ From 688bcaff291cf2fe2734e43f2793d4d05b850518 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 14 Nov 2009 01:12:47 +0100 Subject: [PATCH 269/470] hw-breakpoints: Fix build on !perf architectures the arch/alpha build fails with: In file included from tip/kernel/exit.c:52: tip/include/linux/hw_breakpoint.h: In function 'hw_breakpoint_addr': tip/include/linux/hw_breakpoint.h:21: error: 'struct perf_event' has no member named 'attr' [...] Move these helper inlines inside the CONFIG_HAVE_HW_BREAKPOINT ifdef. Cc: Frederic Weisbecker Cc: Prasad LKML-Reference: <1258114575-32655-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/hw_breakpoint.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 18710e0c84b..0b98cbf76da 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -16,6 +16,8 @@ enum { HW_BREAKPOINT_X = 4, }; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; @@ -31,7 +33,6 @@ static inline int hw_breakpoint_len(struct perf_event *bp) return bp->attr.bp_len; } -#ifdef CONFIG_HAVE_HW_BREAKPOINT extern struct perf_event * register_user_hw_breakpoint(unsigned long addr, int len, From 4c49b12853fbb5eff4849b7b6a1e895776f027a1 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Fri, 13 Nov 2009 21:47:33 -0800 Subject: [PATCH 270/470] perf_event: Fix invalid type in ioctl definition u64 is invalid in userspace headers, including ioctl definitions; use __u64 instead Signed-off-by: Arjan van de Ven Cc: LKML-Reference: <20091113214733.7cd76be9@infradead.org> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 45b56faf5cd..ec3768a8105 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -219,7 +219,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) #define PERF_EVENT_IOC_RESET _IO ('$', 3) -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) From 68efa37df779b3e04280598e8b5b3a1919b65fee Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 14 Nov 2009 01:35:29 +0100 Subject: [PATCH 271/470] hw-breakpoints, x86: Fix modular KVM build This build error: arch/x86/kvm/x86.c:3655: error: implicit declaration of function 'hw_breakpoint_restore' Happens because in the CONFIG_KVM=m case there's no 'CONFIG_KVM' define in the kernel - it's CONFIG_KVM_MODULE in that case. Make the prototype available unconditionally. Cc: Frederic Weisbecker Cc: Prasad LKML-Reference: <1258114575-32655-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/debugreg.h | 2 -- arch/x86/kernel/hw_breakpoint.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 0f6e92af422..fdabd843576 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -96,9 +96,7 @@ static inline int hw_breakpoint_active(void) extern void aout_dump_debugregs(struct user *dump); -#ifdef CONFIG_KVM extern void hw_breakpoint_restore(void); -#endif #endif /* __KERNEL__ */ diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 57dcee5fa95..752daebe91c 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -43,6 +43,7 @@ /* Per cpu debug control register value */ DEFINE_PER_CPU(unsigned long, dr7); +EXPORT_PER_CPU_SYMBOL(dr7); /* Per cpu debug address registers values */ static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); @@ -409,6 +410,7 @@ void aout_dump_debugregs(struct user *dump) dump->u_debugreg[7] = dr7; } +EXPORT_SYMBOL_GPL(aout_dump_debugregs); /* * Release the user breakpoints used by ptrace @@ -424,7 +426,6 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) } } -#ifdef CONFIG_KVM void hw_breakpoint_restore(void) { set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0); @@ -435,7 +436,6 @@ void hw_breakpoint_restore(void) set_debugreg(__get_cpu_var(dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); -#endif /* * Handle debug exception notifications. From d2fb8b4151a92223da6a84006f8f248ebeb6677d Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Sun, 15 Nov 2009 20:36:53 +0900 Subject: [PATCH 272/470] perf tools: Add new perf_atoll() function to parse string representing size in bytes This patch modifies util/string.[ch] to add new function: perf_atoll() to parse string representing size in bytes. This function parses (\d+)(b|B|kb|KB|mb|MB|gb|GB) (e.g. "256MB") and returns its numeric value. (e.g. 268435456) Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1258285013-4759-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Ingo Molnar --- tools/perf/util/string.c | 84 ++++++++++++++++++++++++++++++++++++++++ tools/perf/util/string.h | 1 + 2 files changed, 85 insertions(+) diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 04743d3e903..227043577e0 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -1,5 +1,7 @@ #include +#include #include "string.h" +#include "util.h" static int hex(char ch) { @@ -43,3 +45,85 @@ char *strxfrchar(char *s, char from, char to) return s; } + +#define K 1024LL +/* + * perf_atoll() + * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB") + * and return its numeric value + */ +s64 perf_atoll(const char *str) +{ + unsigned int i; + s64 length = -1, unit = 1; + + if (!isdigit(str[0])) + goto out_err; + + for (i = 1; i < strlen(str); i++) { + switch (str[i]) { + case 'B': + case 'b': + break; + case 'K': + if (str[i + 1] != 'B') + goto out_err; + else + goto kilo; + case 'k': + if (str[i + 1] != 'b') + goto out_err; +kilo: + unit = K; + break; + case 'M': + if (str[i + 1] != 'B') + goto out_err; + else + goto mega; + case 'm': + if (str[i + 1] != 'b') + goto out_err; +mega: + unit = K * K; + break; + case 'G': + if (str[i + 1] != 'B') + goto out_err; + else + goto giga; + case 'g': + if (str[i + 1] != 'b') + goto out_err; +giga: + unit = K * K * K; + break; + case 'T': + if (str[i + 1] != 'B') + goto out_err; + else + goto tera; + case 't': + if (str[i + 1] != 'b') + goto out_err; +tera: + unit = K * K * K * K; + break; + case '\0': /* only specified figures */ + unit = 1; + break; + default: + if (!isdigit(str[i])) + goto out_err; + break; + } + } + + length = atoll(str) * unit; + goto out; + +out_err: + length = -1; +out: + return length; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index 2c84bf65ba0..e50b07f8082 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -5,6 +5,7 @@ int hex2u64(const char *ptr, u64 *val); char *strxfrchar(char *s, char from, char to); +s64 perf_atoll(const char *str); #define _STR(x) #x #define STR(x) _STR(x) From 7255fe2a42c612f2b8fe4c347f0a5f0c97d85a46 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Sun, 15 Nov 2009 12:05:08 -0200 Subject: [PATCH 273/470] perf stat: Do not print ratio when task-clock event is not counted The ratio between the number of events and the time elapsed makes sense only if task-clock event is counted. Otherwise it will be simply a (confusing) # 0.000 M/sec This patch outputs the ratio only if task-clock event is counted. Some test examples of before and after: Before: [lucas@skywalker linux.trees.git]$ sudo perf stat -e branch-misses -a -- sleep 1 Performance counter stats for 'sleep 1': 1367818 branch-misses # 0.000 M/sec 1.001494325 seconds time elapsed After (without task-clock): [lucas@skywalker perf]$ sudo ./perf stat -e branch-misses -a -- sleep 1 Performance counter stats for 'sleep 1': 1135044 branch-misses 1.001370775 seconds time elapsed After (with task-clock): [lucas@skywalker perf]$ sudo ./perf stat -e branch-misses -e task-clock -a -- sleep 1 Performance counter stats for 'sleep 1': 1070111 branch-misses # 0.534 M/sec 2002.730893 task-clock-msecs # 1.999 CPUs 1.001640292 seconds time elapsed Signed-off-by: Lucas De Marchi Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091115140507.GB21561@skywalker.lan> Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c6df3770b87..c70d7200355 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -357,7 +357,8 @@ static void abs_printout(int counter, double avg) ratio = avg / total; fprintf(stderr, " # %10.3f IPC ", ratio); - } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter)) { + } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) && + runtime_branches_stats.n != 0) { total = avg_stats(&runtime_branches_stats); if (total) @@ -365,7 +366,7 @@ static void abs_printout(int counter, double avg) fprintf(stderr, " # %10.3f %% ", ratio); - } else { + } else if (runtime_nsecs_stats.n != 0) { total = avg_stats(&runtime_nsecs_stats); if (total) From 559fdc3c1b624edb1933a875022fe7e27934d11c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 16 Nov 2009 12:45:14 +0100 Subject: [PATCH 274/470] perf_event: Optimize perf_output_lock() The purpose of perf_output_{un,}lock() is to: 1) avoid publishing incomplete data [ possible when publishing a head that is ahead of an entry that is still being written ] 2) guarantee fwd progress [ a simple refcount on pending writers doesn't need to drop to 0, making it so would end up implementing something like forced quiecent states of RCU ] To satisfy the above without undue complexity it serializes between CPUs, this means that a pending writer can only be the same cpu in a nested context, and since (under normal operation) a cpu always makes progress we're good -- if the head is only published when the bottom most writer completes. Now we don't need to disable IRQs in order to serialize between CPUs, disabling preemption ought to be sufficient, esp since we already deal with nesting due to NMIs. This avoids potentially expensive (and needless) local IRQ disable/enable ops. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <1258373161.26714.254.camel@laptop> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 1 - kernel/perf_event.c | 21 +++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index df4e73e3377..7f87563c848 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -714,7 +714,6 @@ struct perf_output_handle { int nmi; int sample; int locked; - unsigned long flags; }; #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6f4ed3b4cd7..3256e36ad25 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2674,20 +2674,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle) static void perf_output_lock(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; - int cpu; + int cur, cpu = get_cpu(); handle->locked = 0; - local_irq_save(handle->flags); - cpu = smp_processor_id(); + for (;;) { + cur = atomic_cmpxchg(&data->lock, -1, cpu); + if (cur == -1) { + handle->locked = 1; + break; + } + if (cur == cpu) + break; - if (in_nmi() && atomic_read(&data->lock) == cpu) - return; - - while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) cpu_relax(); - - handle->locked = 1; + } } static void perf_output_unlock(struct perf_output_handle *handle) @@ -2733,7 +2734,7 @@ again: if (atomic_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); out: - local_irq_restore(handle->flags); + put_cpu(); } void perf_output_copy(struct perf_output_handle *handle, From 82164161679c448f33092945ea97cb547a13683a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 13:48:11 -0200 Subject: [PATCH 275/470] perf symbols: Call the symbol filter in dso__synthesize_plt_symbols() We need to pass the symbol to the filter so that, for instance, 'perf top' can do filtering and also set the private area it manages, setting the ->map pointer, etc. I found this while running 'perf top' on a machine where hits happened on PLT symbols, where ->map wasn't being set up and segfaults thus happened. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258386491-20278-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 9c286db6200..93e4b52ccfe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -281,6 +281,11 @@ static int kernel_maps__load_all_kallsyms(void) if (sym == NULL) goto out_delete_line; + /* + * We will pass the symbols to the filter later, in + * kernel_maps__split_kallsyms, when we have split the + * maps per module + */ dso__insert_symbol(kernel_map->dso, sym); } @@ -555,7 +560,8 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *self) +static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, + symbol_filter_t filter) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -643,8 +649,12 @@ static int dso__synthesize_plt_symbols(struct dso *self) if (!f) goto out_elf_end; - dso__insert_symbol(self, f); - ++nr; + if (filter && filter(map, f)) + symbol__delete(f); + else { + dso__insert_symbol(self, f); + ++nr; + } } } else if (shdr_rel_plt.sh_type == SHT_REL) { GElf_Rel pos_mem, *pos; @@ -661,8 +671,12 @@ static int dso__synthesize_plt_symbols(struct dso *self) if (!f) goto out_elf_end; - dso__insert_symbol(self, f); - ++nr; + if (filter && filter(map, f)) + symbol__delete(f); + else { + dso__insert_symbol(self, f); + ++nr; + } } } @@ -1050,7 +1064,7 @@ compare_build_id: goto more; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(self); + int nr_plt = dso__synthesize_plt_symbols(self, map, filter); if (nr_plt > 0) ret += nr_plt; } From 84fe8488ade7922afa9f3aa77c22d2d92beb9660 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 16:32:41 -0200 Subject: [PATCH 276/470] perf symbols: Pass the offset to perf_header__read_build_ids() Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258396365-29217-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/data_map.c | 3 +-- tools/perf/util/data_map.h | 2 +- tools/perf/util/header.c | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index aacb814a4ef..14cb8465eb0 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -70,11 +70,10 @@ process_event(event_t *event, unsigned long offset, unsigned long head) } } -int perf_header__read_build_ids(int input, off_t size) +int perf_header__read_build_ids(int input, off_t offset, off_t size) { struct build_id_event bev; char filename[PATH_MAX]; - off_t offset = lseek(input, 0, SEEK_CUR); off_t limit = offset + size; int err = -1; diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index 20b4037a823..ae036ecd762 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -27,6 +27,6 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int full_paths, int *cwdlen, char **cwd); -int perf_header__read_build_ids(int input, off_t file_size); +int perf_header__read_build_ids(int input, off_t offset, off_t file_size); #endif diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ebed4f44ed3..ca0d657eefa 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -359,7 +359,7 @@ static void perf_header__adds_read(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; lseek(fd, buildid_sec->offset, SEEK_SET); - if (perf_header__read_build_ids(fd, buildid_sec->size)) + if (perf_header__read_build_ids(fd, buildid_sec->offset, buildid_sec->size)) pr_debug("failed to read buildids, continuing...\n"); } From 8f41146aedf803856fb6477056e3960cb9ba8f9c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 16:32:42 -0200 Subject: [PATCH 277/470] perf tools: Debug.h needs to include event.h for event_t Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258396365-29217-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/debug.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index e8b18a1f87a..c6c24c522de 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -2,6 +2,8 @@ #ifndef __PERF_DEBUG_H #define __PERF_DEBUG_H +#include "event.h" + extern int verbose; extern int dump_trace; From 37562eac3767c7f07bb1a1329708ff6453e47570 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 16:32:43 -0200 Subject: [PATCH 278/470] perf tools: Generalize perf_header__adds_read() Renaming it to perf_header__process_sections() and passing a callback to handle each feature. The next changesets will introduce 'perf buildid-list' that will handle just the HEADER_BUILD_ID table, ignoring all the other features. Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258396365-29217-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 125 ++++++++++++++++++++++----------------- tools/perf/util/header.h | 31 +++++++++- 2 files changed, 99 insertions(+), 57 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ca0d657eefa..d8416f01117 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -128,26 +128,11 @@ static const char *__perf_magic = "PERFFILE"; #define PERF_MAGIC (*(u64 *)__perf_magic) -struct perf_file_section { - u64 offset; - u64 size; -}; - struct perf_file_attr { struct perf_event_attr attr; struct perf_file_section ids; }; -struct perf_file_header { - u64 magic; - u64 size; - u64 attr_size; - struct perf_file_section attrs; - struct perf_file_section data; - struct perf_file_section event_types; - DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); -}; - void perf_header__set_feat(struct perf_header *self, int feat) { set_bit(feat, self->adds_features); @@ -324,21 +309,23 @@ static void do_read(int fd, void *buf, size_t size) } } -static void perf_header__adds_read(struct perf_header *self, int fd) +int perf_header__process_sections(struct perf_header *self, int fd, + int (*process)(struct perf_file_section *self, + int feat, int fd)) { struct perf_file_section *feat_sec; int nr_sections; int sec_size; int idx = 0; - + int err = 0, feat = 1; nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); if (!nr_sections) - return; + return 0; feat_sec = calloc(sizeof(*feat_sec), nr_sections); if (!feat_sec) - die("No memory"); + return -1; sec_size = sizeof(*feat_sec) * nr_sections; @@ -346,49 +333,85 @@ static void perf_header__adds_read(struct perf_header *self, int fd) do_read(fd, feat_sec, sec_size); - if (perf_header__has_feat(self, HEADER_TRACE_INFO)) { - struct perf_file_section *trace_sec; + while (idx < nr_sections && feat < HEADER_LAST_FEATURE) { + if (perf_header__has_feat(self, feat)) { + struct perf_file_section *sec = &feat_sec[idx++]; - trace_sec = &feat_sec[idx++]; - lseek(fd, trace_sec->offset, SEEK_SET); - trace_report(fd); - } - - if (perf_header__has_feat(self, HEADER_BUILD_ID)) { - struct perf_file_section *buildid_sec; - - buildid_sec = &feat_sec[idx++]; - lseek(fd, buildid_sec->offset, SEEK_SET); - if (perf_header__read_build_ids(fd, buildid_sec->offset, buildid_sec->size)) - pr_debug("failed to read buildids, continuing...\n"); + err = process(sec, feat, fd); + if (err < 0) + break; + } + ++feat; } free(feat_sec); + return err; }; +int perf_file_header__read(struct perf_file_header *self, + struct perf_header *ph, int fd) +{ + lseek(fd, 0, SEEK_SET); + do_read(fd, self, sizeof(*self)); + + if (self->magic != PERF_MAGIC || + self->attr_size != sizeof(struct perf_file_attr)) + return -1; + + if (self->size != sizeof(*self)) { + /* Support the previous format */ + if (self->size == offsetof(typeof(*self), adds_features)) + bitmap_zero(self->adds_features, HEADER_FEAT_BITS); + else + return -1; + } + + memcpy(&ph->adds_features, &self->adds_features, + sizeof(self->adds_features)); + + ph->event_offset = self->event_types.offset; + ph->event_size = self->event_types.size; + ph->data_offset = self->data.offset; + ph->data_size = self->data.size; + return 0; +} + +static int perf_file_section__process(struct perf_file_section *self, + int feat, int fd) +{ + if (lseek(fd, self->offset, SEEK_SET) < 0) { + pr_debug("Failed to lseek to %Ld offset for feature %d, " + "continuing...\n", self->offset, feat); + return 0; + } + + switch (feat) { + case HEADER_TRACE_INFO: + trace_report(fd); + break; + + case HEADER_BUILD_ID: + if (perf_header__read_build_ids(fd, self->offset, self->size)) + pr_debug("Failed to read buildids, continuing...\n"); + break; + default: + pr_debug("unknown feature %d, continuing...\n", feat); + } + + return 0; +} + struct perf_header *perf_header__read(int fd) { struct perf_header *self = perf_header__new(); struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; - int nr_attrs, nr_ids, i, j; - lseek(fd, 0, SEEK_SET); - do_read(fd, &f_header, sizeof(f_header)); - - if (f_header.magic != PERF_MAGIC || - f_header.attr_size != sizeof(f_attr)) + if (perf_file_header__read(&f_header, self, fd) < 0) die("incompatible file format"); - if (f_header.size != sizeof(f_header)) { - /* Support the previous format */ - if (f_header.size == offsetof(typeof(f_header), adds_features)) - bitmap_zero(f_header.adds_features, HEADER_FEAT_BITS); - else - die("incompatible file format"); - } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -422,15 +445,7 @@ struct perf_header *perf_header__read(int fd) event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } - memcpy(&self->adds_features, &f_header.adds_features, sizeof(f_header.adds_features)); - - self->event_offset = f_header.event_types.offset; - self->event_size = f_header.event_types.size; - - self->data_offset = f_header.data.offset; - self->data_size = f_header.data.size; - - perf_header__adds_read(self, fd); + perf_header__process_sections(self, fd, perf_file_section__process); lseek(fd, self->data_offset, SEEK_SET); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index a22d70b0757..f1b3bf7bdfc 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -15,11 +15,34 @@ struct perf_header_attr { off_t id_offset; }; -#define HEADER_TRACE_INFO 1 -#define HEADER_BUILD_ID 2 +enum { + HEADER_TRACE_INFO = 1, + HEADER_BUILD_ID, + HEADER_LAST_FEATURE, +}; #define HEADER_FEAT_BITS 256 +struct perf_file_section { + u64 offset; + u64 size; +}; + +struct perf_file_header { + u64 magic; + u64 size; + u64 attr_size; + struct perf_file_section attrs; + struct perf_file_section data; + struct perf_file_section event_types; + DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); +}; + +struct perf_header; + +int perf_file_header__read(struct perf_file_header *self, + struct perf_header *ph, int fd); + struct perf_header { int frozen; int attrs, size; @@ -54,4 +77,8 @@ bool perf_header__has_feat(const struct perf_header *self, int feat); struct perf_header *perf_header__new(void); +int perf_header__process_sections(struct perf_header *self, int fd, + int (*process)(struct perf_file_section *self, + int feat, int fd)); + #endif /* __PERF_HEADER_H */ From 9e03eb2d512e7f3a1e562d4b922aa8b1891750b6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 16:32:44 -0200 Subject: [PATCH 279/470] perf tools: Introduce dsos__fprintf_buildid To print the buildids in the list of dsos. Will be used by 'perf buildid-list' Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258396365-29217-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 30 ++++++++++++++++++++++++++---- tools/perf/util/symbol.h | 2 ++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 93e4b52ccfe..53de9c4488d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -212,14 +212,21 @@ int build_id__sprintf(u8 *self, int len, char *bf) return raw - self; } -size_t dso__fprintf(struct dso *self, FILE *fp) +size_t dso__fprintf_buildid(struct dso *self, FILE *fp) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - struct rb_node *nd; - size_t ret; build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); - ret = fprintf(fp, "dso: %s (%s)\n", self->short_name, sbuild_id); + return fprintf(fp, "%s", sbuild_id); +} + +size_t dso__fprintf(struct dso *self, FILE *fp) +{ + struct rb_node *nd; + size_t ret = fprintf(fp, "dso: %s (", self->short_name); + + ret += dso__fprintf_buildid(self, fp); + ret += fprintf(fp, ")\n"); for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); @@ -1428,6 +1435,21 @@ void dsos__fprintf(FILE *fp) dso__fprintf(pos, fp); } +size_t dsos__fprintf_buildid(FILE *fp) +{ + struct dso *pos; + size_t ret = 0; + + list_for_each_entry(pos, &dsos, node) { + ret += dso__fprintf_buildid(pos, fp); + if (verbose) + ret += fprintf(fp, " %s\n", pos->long_name); + else + ret += fprintf(fp, "\n"); + } + return ret; +} + int load_kernel(symbol_filter_t filter) { if (dsos__load_kernel(vmlinux_name, filter, modules) <= 0) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 0a34a5493f1..51c5a4a0813 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -80,7 +80,9 @@ int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, int modules); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); +size_t dsos__fprintf_buildid(FILE *fp); +size_t dso__fprintf_buildid(struct dso *self, FILE *fp); size_t dso__fprintf(struct dso *self, FILE *fp); char dso__symtab_origin(const struct dso *self); void dso__set_build_id(struct dso *self, void *build_id); From c34984b2bbc77596c97c333539bffc90d2033178 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 16:32:45 -0200 Subject: [PATCH 280/470] perf buildid-list: New plumbing command With this we can list the buildids in a perf.data file so that we can pipe them to other, distro specific tools that from the buildids can figure out separate packages (foo-debuginfo) where we can find the matching symtabs so that perf report can do its job. E.g: [acme@doppio linux-2.6-tip]$ perf buildid-list | head -5 8e08b117e5458ad3f85da16d42d0fc5cd21c5869 520c2387a587cc5acfcf881e27dba1caaeab4b1f ec8dd400904ddfcac8b1c343263a790f977159dc 7caedbca5a6d8ab39a7fe44bd28c07d3e14a3f3f 379bb828fd08859dbea73279f04abefabc95a6a3 [acme@doppio linux-2.6-tip]$ perf buildid-list -v | head -5 8e08b117e5458ad3f85da16d42d0fc5cd21c5869 /sbin/init 520c2387a587cc5acfcf881e27dba1caaeab4b1f /lib64/ld-2.10.1.so ec8dd400904ddfcac8b1c343263a790f977159dc /lib64/libc-2.10.1.so 7caedbca5a6d8ab39a7fe44bd28c07d3e14a3f3f /sbin/udevd 379bb828fd08859dbea73279f04abefabc95a6a3 /lib64/libdl-2.10.1.so [acme@doppio linux-2.6-tip]$ Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258396365-29217-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- .../perf/Documentation/perf-buildid-list.txt | 34 +++++ tools/perf/Makefile | 1 + tools/perf/builtin-buildid-list.c | 116 ++++++++++++++++++ tools/perf/builtin.h | 1 + tools/perf/command-list.txt | 1 + tools/perf/perf.c | 1 + 6 files changed, 154 insertions(+) create mode 100644 tools/perf/Documentation/perf-buildid-list.txt create mode 100644 tools/perf/builtin-buildid-list.c diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt new file mode 100644 index 00000000000..abab34e0557 --- /dev/null +++ b/tools/perf/Documentation/perf-buildid-list.txt @@ -0,0 +1,34 @@ +perf-buildid-list(1) +==================== + +NAME +---- +perf-buildid-list - List the buildids in a perf.data file + +SYNOPSIS +-------- +[verse] +'perf buildid-list ' + +DESCRIPTION +----------- +This command displays the buildids found in a perf.data file, so that other +tools can be used to fetch packages with matching symbol tables for use by +perf report. + +OPTIONS +------- +-i:: +--input=:: + Input file name. (default: perf.data) +-f:: +--force:: + Don't do ownership validation. +-v:: +--verbose:: + Be more verbose, showing the name of the DSOs after the buildids. + +SEE ALSO +-------- +linkperf:perf-record[1], linkperf:perf-top[1], +linkperf:perf-report[1] diff --git a/tools/perf/Makefile b/tools/perf/Makefile index f7cd89622cf..46a58a81c9a 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -430,6 +430,7 @@ BUILTIN_OBJS += bench/sched-pipe.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-sched.o +BUILTIN_OBJS += builtin-buildid-list.o BUILTIN_OBJS += builtin-list.o BUILTIN_OBJS += builtin-record.o BUILTIN_OBJS += builtin-report.o diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c new file mode 100644 index 00000000000..2e377e1be43 --- /dev/null +++ b/tools/perf/builtin-buildid-list.c @@ -0,0 +1,116 @@ +/* + * builtin-buildid-list.c + * + * Builtin buildid-list command: list buildids in perf.data + * + * Copyright (C) 2009, Red Hat Inc. + * Copyright (C) 2009, Arnaldo Carvalho de Melo + */ +#include "builtin.h" +#include "perf.h" +#include "util/cache.h" +#include "util/data_map.h" +#include "util/debug.h" +#include "util/header.h" +#include "util/parse-options.h" +#include "util/symbol.h" + +static char const *input_name = "perf.data"; +static int force; + +static const char *const buildid_list_usage[] = { + "perf report []", + NULL +}; + +static const struct option options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), + OPT_BOOLEAN('v', "verbose", &verbose, + "be more verbose (show counter open errors, etc)"), + OPT_END() +}; + +static int perf_file_section__process_buildids(struct perf_file_section *self, + int feat, int fd) +{ + if (feat != HEADER_BUILD_ID) + return 0; + + if (lseek(fd, self->offset, SEEK_SET) < 0) { + pr_warning("Failed to lseek to %Ld offset for buildids!\n", + self->offset); + return -1; + } + + if (perf_header__read_build_ids(fd, self->offset, self->size)) { + pr_warning("Failed to read buildids!\n"); + return -1; + } + + return 0; +} + +static int __cmd_buildid_list(void) +{ + int err = -1; + struct perf_header *header; + struct perf_file_header f_header; + struct stat input_stat; + int input = open(input_name, O_RDONLY); + + if (input < 0) { + pr_err("failed to open file: %s", input_name); + if (!strcmp(input_name, "perf.data")) + pr_err(" (try 'perf record' first)"); + pr_err("\n"); + goto out; + } + + err = fstat(input, &input_stat); + if (err < 0) { + perror("failed to stat file"); + goto out_close; + } + + if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { + pr_err("file %s not owned by current user or root\n", + input_name); + goto out_close; + } + + if (!input_stat.st_size) { + pr_info("zero-sized file, nothing to do!\n"); + goto out_close; + } + + err = -1; + header = perf_header__new(); + if (header == NULL) + goto out_close; + + if (perf_file_header__read(&f_header, header, input) < 0) { + pr_warning("incompatible file format"); + goto out_close; + } + + err = perf_header__process_sections(header, input, + perf_file_section__process_buildids); + + if (err < 0) + goto out_close; + + dsos__fprintf_buildid(stdout); +out_close: + close(input); +out: + return err; +} + +int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) +{ + argc = parse_options(argc, argv, options, buildid_list_usage, 0); + setup_pager(); + return __cmd_buildid_list(); +} diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index f0cd5b139b7..e97954a0a3d 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -16,6 +16,7 @@ extern int check_pager_config(const char *cmd); extern int cmd_annotate(int argc, const char **argv, const char *prefix); extern int cmd_bench(int argc, const char **argv, const char *prefix); +extern int cmd_buildid_list(int argc, const char **argv, const char *prefix); extern int cmd_help(int argc, const char **argv, const char *prefix); extern int cmd_sched(int argc, const char **argv, const char *prefix); extern int cmd_list(int argc, const char **argv, const char *prefix); diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index 981c40b9a5e..d37b16cf18f 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -4,6 +4,7 @@ # perf-annotate mainporcelain common perf-bench mainporcelain common +perf-buildid-list mainporcelain common perf-list mainporcelain common perf-sched mainporcelain common perf-record mainporcelain common diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 8936786b42e..53359ebb369 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -287,6 +287,7 @@ static void handle_internal_command(int argc, const char **argv) static struct cmd_struct commands[] = { { "help", cmd_help, 0 }, { "list", cmd_list, 0 }, + { "buildid-list", cmd_buildid_list, 0 }, { "record", cmd_record, 0 }, { "report", cmd_report, 0 }, { "bench", cmd_bench, 0 }, From d65ff75fbe6f8ac7c17f18e4108521898468822c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 16 Nov 2009 18:06:18 -0500 Subject: [PATCH 281/470] x86: Add verbose option to insn decoder test Add verbose option to insn decoder test. This dumps decoded instruction when building kernel with V=1. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Stephen Rothwell Cc: Randy Dunlap Cc: Jim Keniston Cc: Stephen Rothwell LKML-Reference: <20091116230618.5250.18762.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/tools/Makefile | 9 ++++- arch/x86/tools/test_get_len.c | 74 +++++++++++++++++++++++++++++------ 2 files changed, 71 insertions(+), 12 deletions(-) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 5e295d95dc2..4688f90ce5a 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,6 +1,13 @@ PHONY += posttest + +ifeq ($(KBUILD_VERBOSE),1) + postest_verbose = -v +else + postest_verbose = +endif + quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(CONFIG_64BIT) + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len -$(CONFIG_64BIT) $(posttest_verbose) posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index 376d3385219..5743e5128d3 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -20,6 +20,7 @@ #include #include #include +#include #define unlikely(cond) (cond) @@ -36,11 +37,16 @@ */ const char *prog; +static int verbose; +static int x86_64; static void usage(void) { fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" - " %s [y|n](64bit flag)\n", prog); + " %s [-y|-n] [-v] \n", prog); + fprintf(stderr, "\t-y 64bit mode\n"); + fprintf(stderr, "\t-n 32bit mode\n"); + fprintf(stderr, "\t-v verbose mode\n"); exit(1); } @@ -50,6 +56,56 @@ static void malformed_line(const char *line, int line_nr) exit(3); } +static void dump_field(FILE *fp, const char *name, const char *indent, + struct insn_field *field) +{ + fprintf(fp, "%s.%s = {\n", indent, name); + fprintf(fp, "%s\t.value = %d, bytes[] = {%x, %x, %x, %x},\n", + indent, field->value, field->bytes[0], field->bytes[1], + field->bytes[2], field->bytes[3]); + fprintf(fp, "%s\t.got = %d, .nbytes = %d},\n", indent, + field->got, field->nbytes); +} + +static void dump_insn(FILE *fp, struct insn *insn) +{ + fprintf(fp, "Instruction = { \n"); + dump_field(fp, "prefixes", "\t", &insn->prefixes); + dump_field(fp, "rex_prefix", "\t", &insn->rex_prefix); + dump_field(fp, "vex_prefix", "\t", &insn->vex_prefix); + dump_field(fp, "opcode", "\t", &insn->opcode); + dump_field(fp, "modrm", "\t", &insn->modrm); + dump_field(fp, "sib", "\t", &insn->sib); + dump_field(fp, "displacement", "\t", &insn->displacement); + dump_field(fp, "immediate1", "\t", &insn->immediate1); + dump_field(fp, "immediate2", "\t", &insn->immediate2); + fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n", + insn->attr, insn->opnd_bytes, insn->addr_bytes); + fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n", + insn->length, insn->x86_64, insn->kaddr); +} + +static void parse_args(int argc, char **argv) +{ + int c; + prog = argv[0]; + while ((c = getopt(argc, argv, "ynv")) != -1) { + switch (c) { + case 'y': + x86_64 = 1; + break; + case 'n': + x86_64 = 0; + break; + case 'v': + verbose = 1; + break; + default: + usage(); + } + } +} + #define BUFSIZE 256 int main(int argc, char **argv) @@ -57,15 +113,9 @@ int main(int argc, char **argv) char line[BUFSIZE]; unsigned char insn_buf[16]; struct insn insn; - int insns = 0; - int x86_64 = 0; + int insns = 0, c; - prog = argv[0]; - if (argc > 2) - usage(); - - if (argc == 2 && argv[1][0] == 'y') - x86_64 = 1; + parse_args(argc, argv); while (fgets(line, BUFSIZE, stdin)) { char copy[BUFSIZE], *s, *tab1, *tab2; @@ -97,8 +147,10 @@ int main(int argc, char **argv) if (insn.length != nb) { fprintf(stderr, "Error: %s", line); fprintf(stderr, "Error: objdump says %d bytes, but " - "insn_get_length() says %d (attr:%x)\n", nb, - insn.length, insn.attr); + "insn_get_length() says %d\n", nb, + insn.length); + if (verbose) + dump_insn(stderr, &insn); exit(2); } } From 35039eb6b199749943547c8572be6604edf00229 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 16 Nov 2009 18:06:24 -0500 Subject: [PATCH 282/470] x86: Show symbol name if insn decoder test failed Show symbol name if insn decoder test find a difference. This will help us to find out where the issue is. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Stephen Rothwell Cc: Randy Dunlap Cc: Jim Keniston Cc: Stephen Rothwell LKML-Reference: <20091116230624.5250.49813.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/tools/distill.awk | 5 +++++ arch/x86/tools/test_get_len.c | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk index d433619bb86..c13c0ee48ab 100644 --- a/arch/x86/tools/distill.awk +++ b/arch/x86/tools/distill.awk @@ -15,6 +15,11 @@ BEGIN { fwait_str="9b\tfwait" } +/^ *[0-9a-f]+ <[^>]*>:/ { + # Symbol entry + printf("%s%s\n", $2, $1) +} + /^ *[0-9a-f]+:/ { if (split($0, field, "\t") < 3) { # This is a continuation of the same insn. diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index 5743e5128d3..af75e07217b 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -110,7 +110,7 @@ static void parse_args(int argc, char **argv) int main(int argc, char **argv) { - char line[BUFSIZE]; + char line[BUFSIZE], sym[BUFSIZE] = ""; unsigned char insn_buf[16]; struct insn insn; int insns = 0, c; @@ -122,6 +122,12 @@ int main(int argc, char **argv) int nb = 0; unsigned int b; + if (line[0] == '<') { + /* Symbol line */ + strcpy(sym, line); + continue; + } + insns++; memset(insn_buf, 0, 16); strcpy(copy, line); @@ -145,6 +151,8 @@ int main(int argc, char **argv) insn_init(&insn, insn_buf, x86_64); insn_get_length(&insn); if (insn.length != nb) { + fprintf(stderr, "Error: %s found a difference at %s\n", + prog, sym); fprintf(stderr, "Error: %s", line); fprintf(stderr, "Error: objdump says %d bytes, but " "insn_get_length() says %d\n", nb, From dc79c0fc08a94b857aed446bfb47cdfde529400c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 19:30:26 -0200 Subject: [PATCH 283/470] perf tools: Don't die in perf_header_attr__new() We really should propagate such kinds of errors so that users of these library functions decide what to do in such cases instead of exiting in random places like now. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258407027-384-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 5 ++++- tools/perf/util/header.c | 22 ++++++++++++---------- tools/perf/util/header.h | 4 +--- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 04f335ef9a8..4c03bb7a4eb 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -220,7 +220,8 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n h_attr = header->attr[nr]; } else { h_attr = perf_header_attr__new(a); - perf_header__add_attr(header, h_attr); + if (h_attr != NULL) + perf_header__add_attr(header, h_attr); } return h_attr; @@ -308,6 +309,8 @@ try_again: } h_attr = get_header_attr(attr, counter); + if (h_attr == NULL) + die("nomem\n"); if (!file_new) { if (memcmp(&h_attr->attr, attr, sizeof(*attr))) { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d8416f01117..2f07a238ffd 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -19,16 +19,16 @@ struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) { struct perf_header_attr *self = malloc(sizeof(*self)); - if (!self) - die("nomem"); - - self->attr = *attr; - self->ids = 0; - self->size = 1; - self->id = malloc(sizeof(u64)); - - if (!self->id) - die("nomem"); + if (self != NULL) { + self->attr = *attr; + self->ids = 0; + self->size = 1; + self->id = malloc(sizeof(u64)); + if (self->id == NULL) { + free(self); + self = NULL; + } + } return self; } @@ -423,6 +423,8 @@ struct perf_header *perf_header__read(int fd) tmp = lseek(fd, 0, SEEK_CUR); attr = perf_header_attr__new(&f_attr.attr); + if (attr == NULL) + die("nomem"); nr_ids = f_attr.ids.size / sizeof(u64); lseek(fd, f_attr.ids.offset, SEEK_SET); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index f1b3bf7bdfc..0cbd4c9e982 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -64,9 +64,7 @@ void perf_header__add_attr(struct perf_header *self, void perf_header__push_event(u64 id, const char *name); char *perf_header__find_event(u64 id); - -struct perf_header_attr * -perf_header_attr__new(struct perf_event_attr *attr); +struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); From 3b6ed98895b0fccd8c387f3fc44016fb922c0658 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 19:30:27 -0200 Subject: [PATCH 284/470] perf top: Use all the lines in the screen By querying the current number of rows, if the user specifies the number of entries, use that instead. If the user uses the 'e' command to change the number of lines 0 will mean do it automatically, any other number disables the auto resizing. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258407027-384-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 42 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6613f988a33..3af95203208 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -60,7 +60,7 @@ static int system_wide = 0; static int default_interval = 0; static int count_filter = 5; -static int print_entries = 15; +static int print_entries; static int target_pid = -1; static int inherit = 0; @@ -115,6 +115,36 @@ struct sym_entry { * Source functions */ +/* most GUI terminals set LINES (although some don't export it) */ +static int term_rows(void) +{ + char *lines_string = getenv("LINES"); + int n_lines; + + if (lines_string && (n_lines = atoi(lines_string)) > 0) + return n_lines; +#ifdef TIOCGWINSZ + else { + struct winsize ws; + if (!ioctl(1, TIOCGWINSZ, &ws) && ws.ws_row) + return ws.ws_row; + } +#endif + return 25; +} + +static void update_print_entries(void) +{ + print_entries = term_rows(); + if (print_entries > 9) + print_entries -= 9; +} + +static void sig_winch_handler(int sig __used) +{ + update_print_entries(); +} + static void parse_source(struct sym_entry *syme) { struct symbol *sym; @@ -668,6 +698,11 @@ static void handle_keypress(int c) break; case 'e': prompt_integer(&print_entries, "Enter display entries (lines)"); + if (print_entries == 0) { + update_print_entries(); + signal(SIGWINCH, sig_winch_handler); + } else + signal(SIGWINCH, SIG_DFL); break; case 'E': if (nr_counters > 1) { @@ -1228,5 +1263,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (target_pid != -1 || profile_cpu != -1) nr_cpus = 1; + if (print_entries == 0) { + update_print_entries(); + signal(SIGWINCH, sig_winch_handler); + } + return __cmd_top(); } From 8ffcda17314cfeb698a667567ea63f63362dffbb Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 21:45:24 -0200 Subject: [PATCH 285/470] perf top: Introduce --hide_{user,kernel}_symbols Default continues to be showing all symbols. 'K' and 'U' can be used to toggle showing kernel and user symbols. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258415125-15019-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 42 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 3af95203208..89b7f68a179 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -76,6 +76,9 @@ static int delay_secs = 2; static int zero = 0; static int dump_symtab = 0; +static bool hide_kernel_symbols = false; +static bool hide_user_symbols = false; + /* * Source */ @@ -104,6 +107,7 @@ struct sym_entry { unsigned long snap_count; double weight; int skip; + u8 origin; struct map *map; struct source_line *source; struct source_line *lines; @@ -430,6 +434,13 @@ static void print_sym_table(void) list_for_each_entry_safe_from(syme, n, &active_symbols, node) { syme->snap_count = syme->count[snap]; if (syme->snap_count != 0) { + if ((hide_user_symbols && + syme->origin == PERF_RECORD_MISC_USER) || + (hide_kernel_symbols && + syme->origin == PERF_RECORD_MISC_KERNEL)) { + list_remove_active_sym(syme); + continue; + } syme->weight = sym_weight(syme); rb_insert_active_sym(&tmp, syme); sum_ksamples += syme->snap_count; @@ -637,6 +648,12 @@ static void print_mapped_keys(void) if (nr_counters > 1) fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); + fprintf(stdout, + "\t[K] hide kernel_symbols symbols. \t(%s)\n", + hide_kernel_symbols ? "yes" : "no"); + fprintf(stdout, + "\t[U] hide user symbols. \t(%s)\n", + hide_user_symbols ? "yes" : "no"); fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0); fprintf(stdout, "\t[qQ] quit.\n"); } @@ -650,6 +667,8 @@ static int key_mapped(int c) case 'z': case 'q': case 'Q': + case 'K': + case 'U': return 1; case 'E': case 'w': @@ -727,6 +746,9 @@ static void handle_keypress(int c) case 'F': prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)"); break; + case 'K': + hide_kernel_symbols = !hide_kernel_symbols; + break; case 'q': case 'Q': printf("exiting.\n"); @@ -746,6 +768,9 @@ static void handle_keypress(int c) pthread_mutex_unlock(&syme->source_lock); } break; + case 'U': + hide_user_symbols = !hide_user_symbols; + break; case 'w': display_weighted = ~display_weighted; break; @@ -857,11 +882,16 @@ static void event__process_sample(const event_t *self, int counter) struct map *map; struct sym_entry *syme; struct symbol *sym; + u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - switch (self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) { + switch (origin) { case PERF_RECORD_MISC_USER: { - struct thread *thread = threads__findnew(self->ip.pid); + struct thread *thread; + if (hide_user_symbols) + return; + + thread = threads__findnew(self->ip.pid); if (thread == NULL) return; @@ -885,6 +915,9 @@ static void event__process_sample(const event_t *self, int counter) return; /* Fall thru */ case PERF_RECORD_MISC_KERNEL: + if (hide_kernel_symbols) + return; + sym = kernel_maps__find_symbol(ip, &map); if (sym == NULL) return; @@ -897,6 +930,7 @@ static void event__process_sample(const event_t *self, int counter) if (!syme->skip) { syme->count[counter]++; + syme->origin = origin; record_precise_ip(syme, counter, ip); pthread_mutex_lock(&active_symbols_lock); if (list_empty(&syme->node) || !syme->node.next) @@ -1178,6 +1212,8 @@ static const struct option options[] = { OPT_INTEGER('C', "CPU", &profile_cpu, "CPU to profile on"), OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), + OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols, + "hide kernel symbols"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), OPT_INTEGER('r', "realtime", &realtime_prio, @@ -1200,6 +1236,8 @@ static const struct option options[] = { "profile at this frequency"), OPT_INTEGER('E', "entries", &print_entries, "display this many functions"), + OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, + "hide user symbols"), OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_END() From 1124ba73be6a758965340bd997593b2996649d60 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Nov 2009 21:45:25 -0200 Subject: [PATCH 286/470] perf buildid-list: Always show the DSO name Porcelain can ignore it, humans can make more sense of it. Suggested-by: Frederic Weisbecker Suggested-by: Ingo Molnar Suggested-by: Peter Zijlstra Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258415125-15019-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-buildid-list.txt | 2 +- tools/perf/builtin-buildid-list.c | 2 +- tools/perf/util/symbol.c | 5 +---- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt index abab34e0557..01b642c0bf8 100644 --- a/tools/perf/Documentation/perf-buildid-list.txt +++ b/tools/perf/Documentation/perf-buildid-list.txt @@ -26,7 +26,7 @@ OPTIONS Don't do ownership validation. -v:: --verbose:: - Be more verbose, showing the name of the DSOs after the buildids. + Be more verbose. SEE ALSO -------- diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 2e377e1be43..7dee9d19ab7 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -28,7 +28,7 @@ static const struct option options[] = { "input file name"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('v', "verbose", &verbose, - "be more verbose (show counter open errors, etc)"), + "be more verbose"), OPT_END() }; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 53de9c4488d..1b77e81b38d 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1442,10 +1442,7 @@ size_t dsos__fprintf_buildid(FILE *fp) list_for_each_entry(pos, &dsos, node) { ret += dso__fprintf_buildid(pos, fp); - if (verbose) - ret += fprintf(fp, " %s\n", pos->long_name); - else - ret += fprintf(fp, "\n"); + ret += fprintf(fp, " %s\n", pos->long_name); } return ret; } From 11deb1f9f6ca6318fa9470e024b9f0634df48b4c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 01:18:09 -0200 Subject: [PATCH 287/470] perf tools: Don't die() in perf_header__add_attr() Propagate the errors instead, the users are the ones to decide what to do if a library call fails. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258427892-16312-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 5 ++++- tools/perf/util/header.c | 30 ++++++++++++++++++++++-------- tools/perf/util/header.h | 6 ++++-- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4c03bb7a4eb..5411be4cfd7 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -221,7 +221,10 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n } else { h_attr = perf_header_attr__new(a); if (h_attr != NULL) - perf_header__add_attr(header, h_attr); + if (perf_header__add_attr(header, h_attr) < 0) { + perf_header_attr__delete(h_attr); + h_attr = NULL; + } } return h_attr; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 2f07a238ffd..23ccddae0b0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -33,6 +33,12 @@ struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) return self; } +void perf_header_attr__delete(struct perf_header_attr *self) +{ + free(self->id); + free(self); +} + void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) { int pos = self->ids; @@ -66,22 +72,28 @@ struct perf_header *perf_header__new(void) return self; } -void perf_header__add_attr(struct perf_header *self, - struct perf_header_attr *attr) +int perf_header__add_attr(struct perf_header *self, + struct perf_header_attr *attr) { int pos = self->attrs; if (self->frozen) - die("frozen"); + return -1; self->attrs++; if (self->attrs > self->size) { - self->size *= 2; - self->attr = realloc(self->attr, self->size * sizeof(void *)); - if (!self->attr) - die("nomem"); + int nsize = self->size * 2; + struct perf_header_attr **nattr; + + nattr = realloc(self->attr, nsize * sizeof(void *)); + if (nattr == NULL) + return -1; + + self->size = nsize; + self->attr = nattr; } self->attr[pos] = attr; + return 0; } #define MAX_EVENT_NAME 64 @@ -434,7 +446,9 @@ struct perf_header *perf_header__read(int fd) perf_header_attr__add_id(attr, f_id); } - perf_header__add_attr(self, attr); + if (perf_header__add_attr(self, attr) < 0) + die("nomem"); + lseek(fd, tmp, SEEK_SET); } diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 0cbd4c9e982..b0d5cd707a7 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -58,13 +58,15 @@ struct perf_header { struct perf_header *perf_header__read(int fd); void perf_header__write(struct perf_header *self, int fd, bool at_exit); -void perf_header__add_attr(struct perf_header *self, - struct perf_header_attr *attr); +int perf_header__add_attr(struct perf_header *self, + struct perf_header_attr *attr); void perf_header__push_event(u64 id, const char *name); char *perf_header__find_event(u64 id); struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); +void perf_header_attr__delete(struct perf_header_attr *self); + void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); From 5875412152ce67fb5087157b86ab6597f91d23e8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 01:18:10 -0200 Subject: [PATCH 288/470] perf tools: Don't die() in perf_header_attr__add_id() Propagate the errors instead, the users are the ones to decide what to do if a library call fails. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258427892-16312-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 5 ++++- tools/perf/util/header.c | 18 ++++++++++++------ tools/perf/util/header.h | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 5411be4cfd7..2a85205ba01 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -327,7 +327,10 @@ try_again: exit(-1); } - perf_header_attr__add_id(h_attr, read_data.id); + if (perf_header_attr__add_id(h_attr, read_data.id) < 0) { + pr_warning("Not enough memory to add id\n"); + exit(-1); + } assert(fd[nr_cpu][counter] >= 0); fcntl(fd[nr_cpu][counter], F_SETFL, O_NONBLOCK); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 23ccddae0b0..dee1ed2f0d1 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -39,18 +39,23 @@ void perf_header_attr__delete(struct perf_header_attr *self) free(self); } -void perf_header_attr__add_id(struct perf_header_attr *self, u64 id) +int perf_header_attr__add_id(struct perf_header_attr *self, u64 id) { int pos = self->ids; self->ids++; if (self->ids > self->size) { - self->size *= 2; - self->id = realloc(self->id, self->size * sizeof(u64)); - if (!self->id) - die("nomem"); + int nsize = self->size * 2; + u64 *nid = realloc(self->id, nsize * sizeof(u64)); + + if (nid == NULL) + return -1; + + self->size = nsize; + self->id = nid; } self->id[pos] = id; + return 0; } /* @@ -444,7 +449,8 @@ struct perf_header *perf_header__read(int fd) for (j = 0; j < nr_ids; j++) { do_read(fd, &f_id, sizeof(f_id)); - perf_header_attr__add_id(attr, f_id); + if (perf_header_attr__add_id(attr, f_id) < 0) + die("nomem"); } if (perf_header__add_attr(self, attr) < 0) die("nomem"); diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index b0d5cd707a7..f46a94e09ee 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -67,7 +67,7 @@ char *perf_header__find_event(u64 id); struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr); void perf_header_attr__delete(struct perf_header_attr *self); -void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); +int perf_header_attr__add_id(struct perf_header_attr *self, u64 id); u64 perf_header__sample_type(struct perf_header *header); struct perf_event_attr * From a9a70bbce7ab0bf3b1cba3ac662c4d502da6305c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 01:18:11 -0200 Subject: [PATCH 289/470] perf tools: Don't die() in perf_header__new() Propagate the errors instead, the users are the ones to decide what to do if a library call fails. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258427892-16312-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 5 +++++ tools/perf/util/header.c | 18 +++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 2a85205ba01..82260c56db3 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -439,6 +439,11 @@ static int __cmd_record(int argc, const char **argv) else header = perf_header__new(); + if (header == NULL) { + pr_err("Not enough memory for reading perf file header\n"); + return -1; + } + if (raw_samples) { perf_header__set_feat(header, HEADER_TRACE_INFO); } else { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index dee1ed2f0d1..726a0eb5f19 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -65,14 +65,15 @@ struct perf_header *perf_header__new(void) { struct perf_header *self = calloc(sizeof(*self), 1); - if (!self) - die("nomem"); + if (self != NULL) { + self->size = 1; + self->attr = malloc(sizeof(void *)); - self->size = 1; - self->attr = malloc(sizeof(void *)); - - if (!self->attr) - die("nomem"); + if (self->attr == NULL) { + free(self); + self = NULL; + } + } return self; } @@ -426,6 +427,9 @@ struct perf_header *perf_header__read(int fd) u64 f_id; int nr_attrs, nr_ids, i, j; + if (self == NULL) + die("nomem"); + if (perf_file_header__read(&f_header, self, fd) < 0) die("incompatible file format"); From 3726cc75e581c157202da93bb2333cce25c15c98 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 01:18:12 -0200 Subject: [PATCH 290/470] perf tools: Don't die() in do_write() Propagate the errors instead, the users are the ones to decide what to do if a library call fails. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <1258427892-16312-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 726a0eb5f19..b01a9537977 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -161,31 +161,36 @@ bool perf_header__has_feat(const struct perf_header *self, int feat) return test_bit(feat, self->adds_features); } -static void do_write(int fd, void *buf, size_t size) +static int do_write(int fd, const void *buf, size_t size) { while (size) { int ret = write(fd, buf, size); if (ret < 0) - die("failed to write"); + return -1; size -= ret; buf += ret; } + + return 0; } -static void write_buildid_table(int fd, struct list_head *id_head) +static int write_buildid_table(int fd, struct list_head *id_head) { struct build_id_list *iter, *next; list_for_each_entry_safe(iter, next, id_head, list) { struct build_id_event *b = &iter->event; - do_write(fd, b, sizeof(*b)); - do_write(fd, (void *)iter->dso_name, iter->len); + if (do_write(fd, b, sizeof(*b)) < 0 || + do_write(fd, iter->dso_name, iter->len) < 0) + return -1; list_del(&iter->list); free(iter); } + + return 0; } static void @@ -233,12 +238,14 @@ perf_header__adds_write(struct perf_header *self, int fd) /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); - write_buildid_table(fd, &id_list); + if (write_buildid_table(fd, &id_list) < 0) + die("failed to write buildid table"); buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; } lseek(fd, sec_start, SEEK_SET); - do_write(fd, feat_sec, sec_size); + if (do_write(fd, feat_sec, sec_size) < 0) + die("failed to write feature section"); free(feat_sec); } @@ -256,7 +263,8 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) attr = self->attr[i]; attr->id_offset = lseek(fd, 0, SEEK_CUR); - do_write(fd, attr->id, attr->ids * sizeof(u64)); + if (do_write(fd, attr->id, attr->ids * sizeof(u64)) < 0) + die("failed to write perf header"); } @@ -272,13 +280,15 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) .size = attr->ids * sizeof(u64), } }; - do_write(fd, &f_attr, sizeof(f_attr)); + if (do_write(fd, &f_attr, sizeof(f_attr)) < 0) + die("failed to write perf header attribute"); } self->event_offset = lseek(fd, 0, SEEK_CUR); self->event_size = event_count * sizeof(struct perf_trace_event_type); if (events) - do_write(fd, events, self->event_size); + if (do_write(fd, events, self->event_size) < 0) + die("failed to write perf header events"); self->data_offset = lseek(fd, 0, SEEK_CUR); @@ -306,7 +316,8 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); lseek(fd, 0, SEEK_SET); - do_write(fd, &f_header, sizeof(f_header)); + if (do_write(fd, &f_header, sizeof(f_header)) < 0) + die("failed to write perf header"); lseek(fd, self->data_offset + self->data_size, SEEK_SET); self->frozen = 1; From 751386507701010831d72c522171753d2cd903d2 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 29 Oct 2009 17:20:02 +0200 Subject: [PATCH 291/470] perf tools: Support static build This makes it possible to build perf statically, by performing: make LDFLAGS=-static Since static libraries are only searched in the order they are specified, move library list from LDFLAGS to EXTLIBS, so that they are put at the end of linker command line. Signed-off-by: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091029152002.GA5406@redhat.com> [ v2: resolved conflicts ] Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3dbb5c5bb8c..5d1a8b0dff8 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -145,6 +145,8 @@ all:: # Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call # your external grep (e.g., if your system lacks grep, if its grep is # broken, or spawning external process is slower than built-in grep perf has). +# +# Define LDFLAGS=-static to build a static binary. PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN @@ -208,7 +210,7 @@ ifndef PERF_DEBUG endif CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) -LDFLAGS = -lpthread -lrt -lelf -lm +EXTLIBS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) STRIP ?= strip @@ -470,19 +472,19 @@ ifeq ($(uname_S),Darwin) PTHREAD_LIBS = endif -ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) -ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) +ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) +ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); endif - ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) + ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) BASIC_CFLAGS += -DLIBELF_NO_MMAP endif else msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); endif -ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) > /dev/null 2>&1 && echo y"), y) +ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); BASIC_CFLAGS += -DNO_LIBDWARF else @@ -494,20 +496,20 @@ endif ifdef NO_DEMANGLE BASIC_CFLAGS += -DNO_DEMANGLE else - has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd > /dev/null 2>&1 && echo y") + has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd > /dev/null 2>&1 && echo y") ifeq ($(has_bfd),y) EXTLIBS += -lbfd else - has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty > /dev/null 2>&1 && echo y") + has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty > /dev/null 2>&1 && echo y") ifeq ($(has_bfd_iberty),y) EXTLIBS += -lbfd -liberty else - has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") + has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") ifeq ($(has_bfd_iberty_z),y) EXTLIBS += -lbfd -liberty -lz else - has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) -liberty > /dev/null 2>&1 && echo y") + has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty > /dev/null 2>&1 && echo y") ifeq ($(has_cplus_demangle),y) EXTLIBS += -liberty BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE From d62d77fd18cc82e839e49b7ef3360e4411f7d2e5 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 17 Nov 2009 12:29:38 +0100 Subject: [PATCH 292/470] perf annotate: Allocate history size correctly Symbol offset history table size does not get updated properly when it is being resized. This leads to garbage results in perf annotate. Signed-off-by: Nick Piggin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Mike Galbraith LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 226f44a2357..cbac5754929 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -227,6 +227,10 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v) *curr = rb_entry(nd, struct symbol, rb_node); prev->end = curr->start - 1; + if (prev->hist) { + free(prev->hist); + prev->hist = calloc(sizeof(u64), prev->end - prev->start); + } prevnd = nd; } @@ -883,6 +887,10 @@ static inline void dso__fill_symbol_holes(struct dso *self) pos->end = prev->end; else if (hole) pos->end = prev->start - 1; + if (pos->hist) { + free(pos->hist); + pos->hist = calloc(sizeof(u64), pos->end - pos->start); + } } } prev = pos; From 11ada26c78febe4662a8e848f3bff74e3200c920 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Tue, 17 Nov 2009 09:05:56 -0800 Subject: [PATCH 293/470] perf tools: Add ia64 support for tools/perf/ Compiler on ia64 rejects the "-m64" option. Add arch specific pieces to perf.h Signed-off-by: Tony Luck Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <4b02d7f43514327a@agluck-desktop.sc.intel.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 8 +++++--- tools/perf/perf.h | 6 ++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5d1a8b0dff8..3f0666af93d 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -166,10 +166,12 @@ ifdef NO_64BIT MBITS := -m32 else # - # If we're on a 64-bit kernel, use -m64: + # If we're on a 64-bit kernel (except ia64), use -m64: # - ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) - MBITS := -m64 + ifneq ($(uname_M),ia64) + ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) + MBITS := -m64 + endif endif endif diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 216bdb223f6..454d5d55f32 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -53,6 +53,12 @@ #define cpu_relax() asm volatile("" ::: "memory") #endif +#ifdef __ia64__ +#include "../../arch/ia64/include/asm/unistd.h" +#define rmb() asm volatile ("mf" ::: "memory") +#define cpu_relax() asm volatile ("hint @pause" ::: "memory") +#endif + #include #include #include From cfc10d3bcc50d70f72c0f43d03eee965c726ccc0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 15:40:53 -0200 Subject: [PATCH 294/470] perf symbols: Add a long_name_len member to struct dso MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using a two bytes hole we already had and since we also need to calculate this strlen for fetching the buildids. We'll use it in 'perf top' to auto-adjust the output based on the terminal width. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258479655-28662-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 26 +++++++++++++++++++++----- tools/perf/util/symbol.h | 1 + 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1b77e81b38d..5cc96c86861 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -109,13 +109,24 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) self->start, self->end, self->name); } +static void dso__set_long_name(struct dso *self, char *name) +{ + self->long_name = name; + self->long_name_len = strlen(name); +} + +static void dso__set_basename(struct dso *self) +{ + self->short_name = basename(self->long_name); +} + struct dso *dso__new(const char *name) { struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); if (self != NULL) { strcpy(self->name, name); - self->long_name = self->name; + dso__set_long_name(self, self->name); self->short_name = self->name; self->syms = RB_ROOT; self->find_symbol = dso__find_symbol; @@ -888,7 +899,7 @@ bool fetch_build_id_table(struct list_head *head) continue; have_buildid = true; memset(&b.header, 0, sizeof(b.header)); - len = strlen(pos->long_name) + 1; + len = pos->long_name_len + 1; len = ALIGN(len, 64); b.header.size = sizeof(b) + len; @@ -1165,6 +1176,7 @@ static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) dso_name[PATH_MAX]; struct map *map; struct rb_node *last; + char *long_name; if (dot == NULL || strcmp(dot, ".ko")) continue; @@ -1179,9 +1191,11 @@ static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) snprintf(path, sizeof(path), "%s/%s", dirname, dent->d_name); - map->dso->long_name = strdup(path); - if (map->dso->long_name == NULL) + long_name = strdup(path); + if (long_name == NULL) goto failure; + dso__set_long_name(map->dso, long_name); + dso__set_basename(map->dso); err = dso__load_module_sym(map->dso, map, filter); if (err < 0) @@ -1420,8 +1434,10 @@ struct dso *dsos__findnew(const char *name) if (!dso) { dso = dso__new(name); - if (dso != NULL) + if (dso != NULL) { dsos__add(dso); + dso__set_basename(dso); + } } return dso; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 51c5a4a0813..5ad1019607d 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -66,6 +66,7 @@ struct dso { u8 has_build_id:1; unsigned char origin; u8 build_id[BUILD_ID_SIZE]; + u16 long_name_len; const char *short_name; char *long_name; char name[0]; From 13cc5079f235906e60577dbce8da2f9607e67e93 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 15:40:54 -0200 Subject: [PATCH 295/470] perf top: Auto adjust symbol and dso widths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We pre-calculate the symbol name length, then after we sort the entries to print, calculate the biggest one and use that for the symbol name width justification, then use the dso->long_name->len to justificate the DSO name, deciding whether using the short or long name depending on how much space we have on the terminal. IOW give as much info to the user as the terminal width allows. Suggested-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258479655-28662-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 90 +++++++++++++++++++++++++++++----------- 1 file changed, 66 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 89b7f68a179..a368978d517 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -78,6 +78,14 @@ static int dump_symtab = 0; static bool hide_kernel_symbols = false; static bool hide_user_symbols = false; +static struct winsize winsize; +static const char *graph_line = + "_____________________________________________________________________" + "_____________________________________________________________________"; +static const char *graph_dotted_line = + "---------------------------------------------------------------------" + "---------------------------------------------------------------------" + "---------------------------------------------------------------------"; /* * Source @@ -107,6 +115,7 @@ struct sym_entry { unsigned long snap_count; double weight; int skip; + u16 name_len; u8 origin; struct map *map; struct source_line *source; @@ -119,34 +128,40 @@ struct sym_entry { * Source functions */ -/* most GUI terminals set LINES (although some don't export it) */ -static int term_rows(void) +static void get_term_dimensions(struct winsize *ws) { - char *lines_string = getenv("LINES"); - int n_lines; + char *s = getenv("LINES"); - if (lines_string && (n_lines = atoi(lines_string)) > 0) - return n_lines; -#ifdef TIOCGWINSZ - else { - struct winsize ws; - if (!ioctl(1, TIOCGWINSZ, &ws) && ws.ws_row) - return ws.ws_row; + if (s != NULL) { + ws->ws_row = atoi(s); + s = getenv("COLUMNS"); + if (s != NULL) { + ws->ws_col = atoi(s); + if (ws->ws_row && ws->ws_col) + return; + } } +#ifdef TIOCGWINSZ + if (ioctl(1, TIOCGWINSZ, ws) == 0 && + ws->ws_row && ws->ws_col) + return; #endif - return 25; + ws->ws_row = 25; + ws->ws_col = 80; } -static void update_print_entries(void) +static void update_print_entries(struct winsize *ws) { - print_entries = term_rows(); + print_entries = ws->ws_row; + if (print_entries > 9) print_entries -= 9; } static void sig_winch_handler(int sig __used) { - update_print_entries(); + get_term_dimensions(&winsize); + update_print_entries(&winsize); } static void parse_source(struct sym_entry *syme) @@ -423,6 +438,8 @@ static void print_sym_table(void) struct sym_entry *syme, *n; struct rb_root tmp = RB_ROOT; struct rb_node *nd; + int sym_width = 0, dso_width; + const int win_width = winsize.ws_col - 1; samples = userspace_samples = 0; @@ -434,6 +451,7 @@ static void print_sym_table(void) list_for_each_entry_safe_from(syme, n, &active_symbols, node) { syme->snap_count = syme->count[snap]; if (syme->snap_count != 0) { + if ((hide_user_symbols && syme->origin == PERF_RECORD_MISC_USER) || (hide_kernel_symbols && @@ -453,8 +471,7 @@ static void print_sym_table(void) puts(CONSOLE_CLEAR); - printf( -"------------------------------------------------------------------------------\n"); + printf("%-*.*s\n", win_width, win_width, graph_dotted_line); printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [", samples_per_sec, 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec))); @@ -492,26 +509,44 @@ static void print_sym_table(void) printf(", %d CPUs)\n", nr_cpus); } - printf("------------------------------------------------------------------------------\n\n"); + printf("%-*.*s\n\n", win_width, win_width, graph_dotted_line); if (sym_filter_entry) { show_details(sym_filter_entry); return; } + /* + * Find the longest symbol name that will be displayed + */ + for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { + syme = rb_entry(nd, struct sym_entry, rb_node); + if (++printed > print_entries || + (int)syme->snap_count < count_filter) + continue; + + if (syme->name_len > sym_width) + sym_width = syme->name_len; + } + + printed = 0; + if (nr_counters == 1) printf(" samples pcnt"); else printf(" weight samples pcnt"); + dso_width = winsize.ws_col - sym_width - 29; + if (verbose) printf(" RIP "); - printf(" function DSO\n"); + printf(" %-*.*s DSO\n", sym_width, sym_width, "function"); printf(" %s _______ _____", nr_counters == 1 ? " " : "______"); if (verbose) printf(" ________________"); - printf(" ________________________________ ________________\n\n"); + printf(" %-*.*s %-*.*s\n\n", sym_width, sym_width, graph_line, + dso_width, dso_width, graph_line); for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { struct symbol *sym; @@ -534,8 +569,11 @@ static void print_sym_table(void) percent_color_fprintf(stdout, "%4.1f%%", pcnt); if (verbose) printf(" %016llx", sym->start); - printf(" %-32s", sym->name); - printf(" %s", syme->map->dso->short_name); + printf(" %-*.*s", sym_width, sym_width, sym->name); + printf(" %-*.*s", dso_width, dso_width, + dso_width >= syme->map->dso->long_name_len ? + syme->map->dso->long_name : + syme->map->dso->short_name); printf("\n"); } } @@ -718,7 +756,7 @@ static void handle_keypress(int c) case 'e': prompt_integer(&print_entries, "Enter display entries (lines)"); if (print_entries == 0) { - update_print_entries(); + sig_winch_handler(SIGWINCH); signal(SIGWINCH, sig_winch_handler); } else signal(SIGWINCH, SIG_DFL); @@ -862,6 +900,9 @@ static int symbol_filter(struct map *map, struct symbol *sym) } } + if (!syme->skip) + syme->name_len = strlen(sym->name); + return 0; } @@ -1301,8 +1342,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (target_pid != -1 || profile_cpu != -1) nr_cpus = 1; + get_term_dimensions(&winsize); if (print_entries == 0) { - update_print_entries(); + update_print_entries(&winsize); signal(SIGWINCH, sig_winch_handler); } From 1a105f743d9fa5f7b8eeeca0afb789951164a361 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 15:40:55 -0200 Subject: [PATCH 296/470] perf top: Suppress DSO column if only one is present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E.g. [root@doppio ~]# perf top -U --------------------------------------------------------------------------- PerfTop: 482 irqs/sec kernel:100.0% [1000Hz cycles], (all, 2 CPUs) --------------------------------------------------------------------------- DSO: vmlinux samples pcnt function _______ _____ _________________________ 471.00 47.9% read_hpet 57.00 5.8% acpi_os_read_port 30.00 3.1% hpet_next_event 30.00 3.1% find_busiest_group 22.00 2.2% schedule 18.00 1.8% sched_clock_local 14.00 1.4% _spin_lock_irqsave 14.00 1.4% native_read_tsc 13.00 1.3% trace_hardirqs_off 9.00 0.9% fget_light 9.00 0.9% ioread8 8.00 0.8% do_sys_poll Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258479655-28662-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 43 ++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a368978d517..6db0e37ee33 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -438,8 +438,9 @@ static void print_sym_table(void) struct sym_entry *syme, *n; struct rb_root tmp = RB_ROOT; struct rb_node *nd; - int sym_width = 0, dso_width; + int sym_width = 0, dso_width = 0; const int win_width = winsize.ws_col - 1; + struct dso *unique_dso = NULL, *first_dso = NULL; samples = userspace_samples = 0; @@ -509,7 +510,7 @@ static void print_sym_table(void) printf(", %d CPUs)\n", nr_cpus); } - printf("%-*.*s\n\n", win_width, win_width, graph_dotted_line); + printf("%-*.*s\n", win_width, win_width, graph_dotted_line); if (sym_filter_entry) { show_details(sym_filter_entry); @@ -525,28 +526,47 @@ static void print_sym_table(void) (int)syme->snap_count < count_filter) continue; + if (first_dso == NULL) + unique_dso = first_dso = syme->map->dso; + else if (syme->map->dso != first_dso) + unique_dso = NULL; + + if (syme->map->dso->long_name_len > dso_width) + dso_width = syme->map->dso->long_name_len; + if (syme->name_len > sym_width) sym_width = syme->name_len; } printed = 0; + if (unique_dso) + printf("DSO: %s\n", unique_dso->long_name); + else { + int max_dso_width = winsize.ws_col - sym_width - 29; + if (dso_width > max_dso_width) + dso_width = max_dso_width; + putchar('\n'); + } if (nr_counters == 1) printf(" samples pcnt"); else printf(" weight samples pcnt"); - dso_width = winsize.ws_col - sym_width - 29; - if (verbose) printf(" RIP "); - printf(" %-*.*s DSO\n", sym_width, sym_width, "function"); + printf(" %-*.*s", sym_width, sym_width, "function"); + if (!unique_dso) + printf(" DSO"); + putchar('\n'); printf(" %s _______ _____", nr_counters == 1 ? " " : "______"); if (verbose) printf(" ________________"); - printf(" %-*.*s %-*.*s\n\n", sym_width, sym_width, graph_line, - dso_width, dso_width, graph_line); + printf(" %-*.*s", sym_width, sym_width, graph_line); + if (!unique_dso) + printf(" %-*.*s", dso_width, dso_width, graph_line); + puts("\n"); for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { struct symbol *sym; @@ -570,10 +590,11 @@ static void print_sym_table(void) if (verbose) printf(" %016llx", sym->start); printf(" %-*.*s", sym_width, sym_width, sym->name); - printf(" %-*.*s", dso_width, dso_width, - dso_width >= syme->map->dso->long_name_len ? - syme->map->dso->long_name : - syme->map->dso->short_name); + if (!unique_dso) + printf(" %-*.*s", dso_width, dso_width, + dso_width >= syme->map->dso->long_name_len ? + syme->map->dso->long_name : + syme->map->dso->short_name); printf("\n"); } } From 51a472decb845e920137284a5cfef51fb7d61206 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 18:38:00 -0200 Subject: [PATCH 297/470] perf top: Introduce helper function to access symbol from sym_entry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258490282-1821-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6db0e37ee33..0d60c517c0b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -128,6 +128,11 @@ struct sym_entry { * Source functions */ +static inline struct symbol *sym_entry__symbol(struct sym_entry *self) +{ + return (struct symbol *)(self + 1); +} + static void get_term_dimensions(struct winsize *ws) { char *s = getenv("LINES"); @@ -181,7 +186,7 @@ static void parse_source(struct sym_entry *syme) goto out_assign; } - sym = (struct symbol *)(syme + 1); + sym = sym_entry__symbol(syme); map = syme->map; path = map->dso->long_name; @@ -276,7 +281,7 @@ out_unlock: static void lookup_sym_source(struct sym_entry *syme) { - struct symbol *symbol = (struct symbol *)(syme + 1); + struct symbol *symbol = sym_entry__symbol(syme); struct source_line *line; char pattern[PATH_MAX]; @@ -325,7 +330,7 @@ static void show_details(struct sym_entry *syme) if (!syme->source) return; - symbol = (struct symbol *)(syme + 1); + symbol = sym_entry__symbol(syme); printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); @@ -573,7 +578,7 @@ static void print_sym_table(void) double pcnt; syme = rb_entry(nd, struct sym_entry, rb_node); - sym = (struct symbol *)(syme + 1); + sym = sym_entry__symbol(syme); if (++printed > print_entries || (int)syme->snap_count < count_filter) continue; @@ -661,7 +666,7 @@ static void prompt_symbol(struct sym_entry **target, const char *msg) pthread_mutex_unlock(&active_symbols_lock); list_for_each_entry_safe_from(syme, n, &active_symbols, node) { - struct symbol *sym = (struct symbol *)(syme + 1); + struct symbol *sym = sym_entry__symbol(syme); if (!strcmp(buf, sym->name)) { found = syme; @@ -685,7 +690,7 @@ static void print_mapped_keys(void) char *name = NULL; if (sym_filter_entry) { - struct symbol *sym = (struct symbol *)(sym_filter_entry+1); + struct symbol *sym = sym_entry__symbol(sym_filter_entry); name = sym->name; } From 5a8e5a3065bf04b7673262fd6c46123e4b888d2b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 18:38:01 -0200 Subject: [PATCH 298/470] perf top: Allocate space only for the number of counters used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reducing memory consumption on a typical desktop machine: From: 32710 root 20 0 172m 142m 1056 S 0.0 4.7 0:00.37 perf To: 420 root 20 0 47528 16m 1056 R 0.3 0.5 0:00.24 perf Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258490282-1821-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 0d60c517c0b..49cf87680fe 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -111,7 +111,6 @@ static int display_weighted = -1; struct sym_entry { struct rb_node rb_node; struct list_head node; - unsigned long count[MAX_COUNTERS]; unsigned long snap_count; double weight; int skip; @@ -122,6 +121,7 @@ struct sym_entry { struct source_line *lines; struct source_line **lines_tail; pthread_mutex_t source_lock; + unsigned long count[0]; }; /* @@ -130,7 +130,7 @@ struct sym_entry { static inline struct symbol *sym_entry__symbol(struct sym_entry *self) { - return (struct symbol *)(self + 1); + return ((void *)self) + symbol__priv_size; } static void get_term_dimensions(struct winsize *ws) @@ -1314,8 +1314,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) { int counter; - symbol__init(sizeof(struct sym_entry)); - page_size = sysconf(_SC_PAGE_SIZE); argc = parse_options(argc, argv, options, top_usage, 0); @@ -1332,6 +1330,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (!nr_counters) nr_counters = 1; + symbol__init(sizeof(struct sym_entry) + + (nr_counters + 1) * sizeof(unsigned long)); + if (delay_secs < 1) delay_secs = 1; From b269876c8d57fb8c801bea1fc34b461646c5abd0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Nov 2009 18:38:02 -0200 Subject: [PATCH 299/470] perf top: Don't allocate the source parsing members upfront MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Defer to parse_source() time allocating it. Now we use about this much memory: 1724 root 20 0 42104 10m 940 S 0.0 0.4 0:00.23 perf Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258490282-1821-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 76 ++++++++++++++++++++++++---------------- 1 file changed, 45 insertions(+), 31 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 49cf87680fe..07b92c378ae 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -108,6 +108,13 @@ static int display_weighted = -1; * Symbols */ +struct sym_entry_source { + struct source_line *source; + struct source_line *lines; + struct source_line **lines_tail; + pthread_mutex_t lock; +}; + struct sym_entry { struct rb_node rb_node; struct list_head node; @@ -117,10 +124,7 @@ struct sym_entry { u16 name_len; u8 origin; struct map *map; - struct source_line *source; - struct source_line *lines; - struct source_line **lines_tail; - pthread_mutex_t source_lock; + struct sym_entry_source *src; unsigned long count[0]; }; @@ -172,6 +176,7 @@ static void sig_winch_handler(int sig __used) static void parse_source(struct sym_entry *syme) { struct symbol *sym; + struct sym_entry_source *source; struct map *map; FILE *file; char command[PATH_MAX*2]; @@ -181,8 +186,17 @@ static void parse_source(struct sym_entry *syme) if (!syme) return; - if (syme->lines) { - pthread_mutex_lock(&syme->source_lock); + if (syme->src == NULL) { + syme->src = calloc(1, sizeof(*source)); + if (syme->src == NULL) + return; + pthread_mutex_init(&syme->src->lock, NULL); + } + + source = syme->src; + + if (source->lines) { + pthread_mutex_lock(&source->lock); goto out_assign; } @@ -202,8 +216,8 @@ static void parse_source(struct sym_entry *syme) if (!file) return; - pthread_mutex_lock(&syme->source_lock); - syme->lines_tail = &syme->lines; + pthread_mutex_lock(&source->lock); + source->lines_tail = &source->lines; while (!feof(file)) { struct source_line *src; size_t dummy = 0; @@ -223,8 +237,8 @@ static void parse_source(struct sym_entry *syme) *c = 0; src->next = NULL; - *syme->lines_tail = src; - syme->lines_tail = &src->next; + *source->lines_tail = src; + source->lines_tail = &src->next; if (strlen(src->line)>8 && src->line[8] == ':') { src->eip = strtoull(src->line, NULL, 16); @@ -238,7 +252,7 @@ static void parse_source(struct sym_entry *syme) pclose(file); out_assign: sym_filter_entry = syme; - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&source->lock); } static void __zero_source_counters(struct sym_entry *syme) @@ -246,7 +260,7 @@ static void __zero_source_counters(struct sym_entry *syme) int i; struct source_line *line; - line = syme->lines; + line = syme->src->lines; while (line) { for (i = 0; i < nr_counters; i++) line->count[i] = 0; @@ -261,13 +275,13 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) if (syme != sym_filter_entry) return; - if (pthread_mutex_trylock(&syme->source_lock)) + if (pthread_mutex_trylock(&syme->src->lock)) return; - if (!syme->source) + if (syme->src == NULL || syme->src->source == NULL) goto out_unlock; - for (line = syme->lines; line; line = line->next) { + for (line = syme->src->lines; line; line = line->next) { if (line->eip == ip) { line->count[counter]++; break; @@ -276,7 +290,7 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) break; } out_unlock: - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&syme->src->lock); } static void lookup_sym_source(struct sym_entry *syme) @@ -287,14 +301,14 @@ static void lookup_sym_source(struct sym_entry *syme) sprintf(pattern, "<%s>:", symbol->name); - pthread_mutex_lock(&syme->source_lock); - for (line = syme->lines; line; line = line->next) { + pthread_mutex_lock(&syme->src->lock); + for (line = syme->src->lines; line; line = line->next) { if (strstr(line->line, pattern)) { - syme->source = line; + syme->src->source = line; break; } } - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&syme->src->lock); } static void show_lines(struct source_line *queue, int count, int total) @@ -324,24 +338,24 @@ static void show_details(struct sym_entry *syme) if (!syme) return; - if (!syme->source) + if (!syme->src->source) lookup_sym_source(syme); - if (!syme->source) + if (!syme->src->source) return; symbol = sym_entry__symbol(syme); printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); - pthread_mutex_lock(&syme->source_lock); - line = syme->source; + pthread_mutex_lock(&syme->src->lock); + line = syme->src->source; while (line) { total += line->count[sym_counter]; line = line->next; } - line = syme->source; + line = syme->src->source; while (line) { float pcnt = 0.0; @@ -366,7 +380,7 @@ static void show_details(struct sym_entry *syme) line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8; line = line->next; } - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&syme->src->lock); if (more) printf("%d lines not displayed, maybe increase display entries [e]\n", more); } @@ -647,10 +661,10 @@ static void prompt_symbol(struct sym_entry **target, const char *msg) /* zero counters of active symbol */ if (syme) { - pthread_mutex_lock(&syme->source_lock); + pthread_mutex_lock(&syme->src->lock); __zero_source_counters(syme); *target = NULL; - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&syme->src->lock); } fprintf(stdout, "\n%s: ", msg); @@ -826,10 +840,10 @@ static void handle_keypress(int c) else { struct sym_entry *syme = sym_filter_entry; - pthread_mutex_lock(&syme->source_lock); + pthread_mutex_lock(&syme->src->lock); sym_filter_entry = NULL; __zero_source_counters(syme); - pthread_mutex_unlock(&syme->source_lock); + pthread_mutex_unlock(&syme->src->lock); } break; case 'U': @@ -915,7 +929,7 @@ static int symbol_filter(struct map *map, struct symbol *sym) syme = symbol__priv(sym); syme->map = map; - pthread_mutex_init(&syme->source_lock, NULL); + syme->src = NULL; if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) sym_filter_entry = syme; From 827f3b4974c5db2968d4979fe6a0ae00ab37bdd8 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Wed, 18 Nov 2009 00:20:09 +0900 Subject: [PATCH 300/470] perf bench: Add memcpy() benchmark 'perf bench mem memcpy' is a benchmark suite for measuring memcpy() performance. Example on a Intel(R) Core(TM)2 Duo CPU E6850 @ 3.00GHz: | % perf bench mem memcpy -l 1GB | # Running mem/memcpy benchmark... | # Copying 1MB Bytes from 0xb7d98008 to 0xb7e99008 ... | | 726.216412 MB/Sec Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1258471212-30281-1-git-send-email-mitake@dcl.info.waseda.ac.jp> [ v2: updated changelog, clarified history of builtin-bench.c ] Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/bench/bench.h | 1 + tools/perf/bench/mem-memcpy.c | 186 ++++++++++++++++++++++++++++++++++ tools/perf/builtin-bench.c | 15 ++- 4 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 tools/perf/bench/mem-memcpy.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3f0666af93d..53e663a5fa2 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -432,6 +432,7 @@ BUILTIN_OBJS += builtin-bench.o # Benchmark modules BUILTIN_OBJS += bench/sched-messaging.o BUILTIN_OBJS += bench/sched-pipe.o +BUILTIN_OBJS += bench/mem-memcpy.o BUILTIN_OBJS += builtin-help.o BUILTIN_OBJS += builtin-sched.o diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index 9fbd8d745fa..f7781c6267c 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -3,6 +3,7 @@ extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); +extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); #define BENCH_FORMAT_DEFAULT_STR "default" #define BENCH_FORMAT_DEFAULT 0 diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c new file mode 100644 index 00000000000..d4f4f9806ae --- /dev/null +++ b/tools/perf/bench/mem-memcpy.c @@ -0,0 +1,186 @@ +/* + * mem-memcpy.c + * + * memcpy: Simple memory copy in various ways + * + * Written by Hitoshi Mitake + */ +#include + +#include "../perf.h" +#include "../util/util.h" +#include "../util/parse-options.h" +#include "../util/string.h" +#include "../util/header.h" +#include "bench.h" + +#include +#include +#include +#include +#include + +#define K 1024 + +static const char *length_str = "1MB"; +static const char *routine = "default"; +static int use_clock = 0; + +static const struct option options[] = { + OPT_STRING('l', "length", &length_str, "1MB", + "Specify length of memory to copy. " + "available unit: B, MB, GB (upper and lower)"), + OPT_STRING('r', "routine", &routine, "default", + "Specify routine to copy"), + OPT_BOOLEAN('c', "clock", &use_clock, + "Use CPU clock for measuring"), + OPT_END() +}; + +struct routine { + const char *name; + const char *desc; + void * (*fn)(void *dst, const void *src, size_t len); +}; + +struct routine routines[] = { + { "default", + "Default memcpy() provided by glibc", + memcpy }, + { NULL, + NULL, + NULL } +}; + +static const char * const bench_mem_memcpy_usage[] = { + "perf bench mem memcpy ", + NULL +}; + +static int clock_fd; + +static struct perf_event_attr clock_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES +}; + +static void init_clock(void) +{ + clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); + BUG_ON(clock_fd < 0); +} + +static u64 get_clock(void) +{ + int ret; + u64 clk; + + ret = read(clock_fd, &clk, sizeof(u64)); + BUG_ON(ret != sizeof(u64)); + + return clk; +} + +static double timeval2double(struct timeval *ts) +{ + return (double)ts->tv_sec + + (double)ts->tv_usec / (double)1000000; +} + +int bench_mem_memcpy(int argc, const char **argv, + const char *prefix __used) +{ + int i; + void *dst, *src; + size_t length; + double bps = 0.0; + struct timeval tv_start, tv_end, tv_diff; + u64 clock_start, clock_end, clock_diff; + + clock_start = clock_end = clock_diff = 0ULL; + argc = parse_options(argc, argv, options, + bench_mem_memcpy_usage, 0); + + tv_diff.tv_sec = 0; + tv_diff.tv_usec = 0; + length = (size_t)perf_atoll((char *)length_str); + if ((long long int)length <= 0) { + fprintf(stderr, "Invalid length:%s\n", length_str); + return 1; + } + + for (i = 0; routines[i].name; i++) { + if (!strcmp(routines[i].name, routine)) + break; + } + if (!routines[i].name) { + printf("Unknown routine:%s\n", routine); + printf("Available routines...\n"); + for (i = 0; routines[i].name; i++) { + printf("\t%s ... %s\n", + routines[i].name, routines[i].desc); + } + return 1; + } + + dst = calloc(length, sizeof(char)); + assert(dst); + src = calloc(length, sizeof(char)); + assert(src); + + if (bench_format == BENCH_FORMAT_DEFAULT) { + printf("# Copying %s Bytes from %p to %p ...\n\n", + length_str, src, dst); + } + + if (use_clock) { + init_clock(); + clock_start = get_clock(); + } else + BUG_ON(gettimeofday(&tv_start, NULL)); + + routines[i].fn(dst, src, length); + + if (use_clock) { + clock_end = get_clock(); + clock_diff = clock_end - clock_start; + } else { + BUG_ON(gettimeofday(&tv_end, NULL)); + timersub(&tv_end, &tv_start, &tv_diff); + bps = (double)((double)length / timeval2double(&tv_diff)); + } + + switch (bench_format) { + case BENCH_FORMAT_DEFAULT: + if (use_clock) { + printf(" %14lf Clock/Byte\n", + (double)clock_diff / (double)length); + } else { + if (bps < K) + printf(" %14lf B/Sec\n", bps); + else if (bps < K * K) + printf(" %14lfd KB/Sec\n", bps / 1024); + else if (bps < K * K * K) + printf(" %14lf MB/Sec\n", bps / 1024 / 1024); + else { + printf(" %14lf GB/Sec\n", + bps / 1024 / 1024 / 1024); + } + } + break; + case BENCH_FORMAT_SIMPLE: + if (use_clock) { + printf("%14lf\n", + (double)clock_diff / (double)length); + } else + printf("%lf\n", bps); + break; + default: + /* reaching here is something disaster */ + fprintf(stderr, "Unknown format:%d\n", bench_format); + exit(1); + break; + } + + return 0; +} diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 90c39baae0d..e043eb83092 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -12,6 +12,7 @@ * * Available subsystem list: * sched ... scheduler and IPC mechanism + * mem ... memory access performance * */ @@ -43,6 +44,15 @@ static struct bench_suite sched_suites[] = { NULL } }; +static struct bench_suite mem_suites[] = { + { "memcpy", + "Simple memory copy in various ways", + bench_mem_memcpy }, + { NULL, + NULL, + NULL } +}; + struct bench_subsys { const char *name; const char *summary; @@ -53,9 +63,12 @@ static struct bench_subsys subsystems[] = { { "sched", "scheduler and IPC mechanism", sched_suites }, + { "mem", + "memory access performance", + mem_suites }, { NULL, NULL, - NULL } + NULL } }; static void dump_suites(int subsys_index) From d3379ab9050e5522da2aac53d413651fc06be562 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Nov 2009 20:20:50 -0200 Subject: [PATCH 301/470] perf symbols: Fix comparision of build_ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we read the build_id from the DSO name to then index into /usr/lib/debug/.buildid/DSO_BUILD_ID[0:2]/DSO_BUILD_ID[2:], we were jumping directly to the comparision with the buildid we already have in dso->build_id (that came from the perf.data build_id section, collected at perf record time) unconditionally, even if we didn't had recorded it, and furthermore, comparing a formatted buildid with a rawbuildid, yikes. Fix it by deleting the dso__read_build_id() function, that was really misdesigned anyway, and do the necessary checks and correct comparison of raw buildids. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258582853-8579-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 52 +++++++++++++--------------------------- 1 file changed, 16 insertions(+), 36 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 5cc96c86861..594f36a1da8 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -962,25 +962,6 @@ out: return err; } -static char *dso__read_build_id(struct dso *self) -{ - int len; - char *build_id = NULL; - unsigned char rawbf[BUILD_ID_SIZE]; - - len = filename__read_build_id(self->long_name, rawbf, sizeof(rawbf)); - if (len < 0) - goto out; - - build_id = malloc(len * 2 + 1); - if (build_id == NULL) - goto out; - - build_id__sprintf(rawbf, len, build_id); -out: - return build_id; -} - char dso__symtab_origin(const struct dso *self) { static const char origin[] = { @@ -1001,7 +982,8 @@ char dso__symtab_origin(const struct dso *self) int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; - char *name = malloc(size), *build_id = NULL; + char *name = malloc(size); + u8 build_id[BUILD_ID_SIZE]; int ret = -1; int fd; @@ -1023,8 +1005,6 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) more: do { - int berr = 0; - self->origin++; switch (self->origin) { case DSO__ORIG_FEDORA: @@ -1036,12 +1016,18 @@ more: self->long_name); break; case DSO__ORIG_BUILDID: - build_id = dso__read_build_id(self); - if (build_id != NULL) { + if (filename__read_build_id(self->long_name, build_id, + sizeof(build_id))) { + char build_id_hex[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(build_id, sizeof(build_id), + build_id_hex); snprintf(name, size, "/usr/lib/debug/.build-id/%.2s/%s.debug", - build_id, build_id + 2); - goto compare_build_id; + build_id_hex, build_id_hex + 2); + if (self->has_build_id) + goto compare_build_id; + break; } self->origin++; /* Fall thru */ @@ -1054,18 +1040,12 @@ more: } if (self->has_build_id) { - bool match; - build_id = malloc(BUILD_ID_SIZE); - if (build_id == NULL) + if (filename__read_build_id(name, build_id, + sizeof(build_id)) < 0) goto more; - berr = filename__read_build_id(name, build_id, - BUILD_ID_SIZE); compare_build_id: - match = berr > 0 && memcmp(build_id, self->build_id, - sizeof(self->build_id)) == 0; - free(build_id); - build_id = NULL; - if (!match) + if (memcmp(build_id, self->build_id, + sizeof(self->build_id)) != 0) goto more; } From e30a3d12ddf04add3268bfceb0e57ffe47f254c6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Nov 2009 20:20:51 -0200 Subject: [PATCH 302/470] perf symbols: Kill struct build_id_list and die() another day MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need for this struct and its allocations, we can just use the ->build_id member we already have in struct dso, then ask for it to be read, and later traverse the dsos list, writing the buildid table to the perf.data file. As a bonus, one more die() function got killed. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258582853-8579-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.h | 7 ------- tools/perf/util/header.c | 27 ++++++++++++++++----------- tools/perf/util/symbol.c | 37 +++++++++---------------------------- tools/perf/util/symbol.h | 2 +- 4 files changed, 26 insertions(+), 47 deletions(-) diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 1f771ce3a95..34c6fcb82d9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -69,13 +69,6 @@ struct build_id_event { char filename[]; }; -struct build_id_list { - struct build_id_event event; - struct list_head list; - const char *dso_name; - int len; -}; - typedef union event_union { struct perf_event_header header; struct ip_event ip; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index b01a9537977..31731f1606b 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -176,18 +176,24 @@ static int do_write(int fd, const void *buf, size_t size) return 0; } -static int write_buildid_table(int fd, struct list_head *id_head) +static int dsos__write_buildid_table(int fd) { - struct build_id_list *iter, *next; + struct dso *pos; - list_for_each_entry_safe(iter, next, id_head, list) { - struct build_id_event *b = &iter->event; + list_for_each_entry(pos, &dsos, node) { + struct build_id_event b; + size_t len; - if (do_write(fd, b, sizeof(*b)) < 0 || - do_write(fd, iter->dso_name, iter->len) < 0) + if (!pos->has_build_id) + continue; + len = pos->long_name_len + 1; + len = ALIGN(len, 64); + memset(&b, 0, sizeof(b)); + memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); + b.header.size = sizeof(b) + len; + if (do_write(fd, &b, sizeof(b)) < 0 || + do_write(fd, pos->long_name, len) < 0) return -1; - list_del(&iter->list); - free(iter); } return 0; @@ -196,14 +202,13 @@ static int write_buildid_table(int fd, struct list_head *id_head) static void perf_header__adds_write(struct perf_header *self, int fd) { - LIST_HEAD(id_list); int nr_sections; struct perf_file_section *feat_sec; int sec_size; u64 sec_start; int idx = 0; - if (fetch_build_id_table(&id_list)) + if (dsos__read_build_ids()) perf_header__set_feat(self, HEADER_BUILD_ID); nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); @@ -238,7 +243,7 @@ perf_header__adds_write(struct perf_header *self, int fd) /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); - if (write_buildid_table(fd, &id_list) < 0) + if (dsos__write_buildid_table(fd) < 0) die("failed to write buildid table"); buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 594f36a1da8..946ec319568 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -883,38 +883,19 @@ out_close: return err; } -bool fetch_build_id_table(struct list_head *head) +bool dsos__read_build_ids(void) { - bool have_buildid = false; + bool have_build_id = false; struct dso *pos; - list_for_each_entry(pos, &dsos, node) { - struct build_id_list *new; - struct build_id_event b; - size_t len; + list_for_each_entry(pos, &dsos, node) + if (filename__read_build_id(pos->long_name, pos->build_id, + sizeof(pos->build_id)) > 0) { + have_build_id = true; + pos->has_build_id = true; + } - if (filename__read_build_id(pos->long_name, - &b.build_id, - sizeof(b.build_id)) < 0) - continue; - have_buildid = true; - memset(&b.header, 0, sizeof(b.header)); - len = pos->long_name_len + 1; - len = ALIGN(len, 64); - b.header.size = sizeof(b) + len; - - new = malloc(sizeof(*new)); - if (!new) - die("No memory\n"); - - memcpy(&new->event, &b, sizeof(b)); - new->dso_name = pos->long_name; - new->len = len; - - list_add_tail(&new->list, head); - } - - return have_buildid; + return have_build_id; } int filename__read_build_id(const char *filename, void *bf, size_t size) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5ad1019607d..546eb766d81 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -89,7 +89,7 @@ char dso__symtab_origin(const struct dso *self); void dso__set_build_id(struct dso *self, void *build_id); int filename__read_build_id(const char *filename, void *bf, size_t size); -bool fetch_build_id_table(struct list_head *head); +bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); int load_kernel(symbol_filter_t filter); From f1617b40596cb341ee6602a9d969c5e4cebe9260 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Nov 2009 20:20:52 -0200 Subject: [PATCH 303/470] perf symbols: Record the build_ids of kernel modules too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [root@doppio linux-2.6-tip]# perf record -a sleep 2s;perf buildid-list|tail [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.162 MB perf.data (~7078 samples) ] 881588fa57b3c1696bc91e5e804a11304f093535 [cfg80211] 4d47ce1da9d16bad00c962c072451b7c681e82df [snd_page_alloc] 5146377e89a7caac617f9782f1a02e46263d3a31 [rfkill] 2153b937bff0d345fea83b63a2e1d3138569f83d [i915] 4e6fb1bb97362e3ee4d306988b9ad6912d5fb9ae [drm_kms_helper] f56ef2bf853e3a798f0d8d51f797622e5dc4420e [drm] b0d157a3b5c4e017329ffc07c64623cd6ad65e95 [i2c_algo_bit] 8125374b905ef9fa8b65d98e166b008ad952f198 [i2c_core] fc875c6e5a90e7b915e9d445d0efc859e1b2678c [video] 4b43c5006589f977e9762fdfc7ac1a92b72fca52 [output] [root@doppio linux-2.6-tip]# elfutils libdwfl/linux-kernel-modules.c was used as reference, as suggested by Roland McGrath. Signed-off-by: Arnaldo Carvalho de Melo Cc: Roland McGrath Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258582853-8579-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 5 ++++ tools/perf/util/symbol.c | 53 +++++++++++++++++++++++++++++++++++++++- tools/perf/util/symbol.h | 2 ++ 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 31731f1606b..d3d656f9a62 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -241,6 +241,11 @@ perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; + /* + * Read the list of loaded modules with its build_ids + */ + dsos__load_modules(); + /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); if (dsos__write_buildid_table(fd) < 0) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 946ec319568..7b4cedeb302 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -9,6 +9,7 @@ #include #include #include +#include #include enum dso_origin { @@ -943,6 +944,50 @@ out: return err; } +int sysfs__read_build_id(const char *filename, void *build_id, size_t size) +{ + int fd, err = -1; + + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + while (1) { + char bf[BUFSIZ]; + GElf_Nhdr nhdr; + int namesz, descsz; + + if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) + break; + + namesz = (nhdr.n_namesz + 3) & -4U; + descsz = (nhdr.n_descsz + 3) & -4U; + if (nhdr.n_type == NT_GNU_BUILD_ID && + nhdr.n_namesz == sizeof("GNU")) { + if (read(fd, bf, namesz) != namesz) + break; + if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { + if (read(fd, build_id, + BUILD_ID_SIZE) == BUILD_ID_SIZE) { + err = 0; + break; + } + } else if (read(fd, bf, descsz) != descsz) + break; + } else { + int n = namesz + descsz; + if (read(fd, bf, n) != n) + break; + } + } + close(fd); +out: + return err; +} + char dso__symtab_origin(const struct dso *self) { static const char origin[] = { @@ -1218,7 +1263,7 @@ static struct map *map__new2(u64 start, struct dso *dso) return self; } -static int dsos__load_modules(void) +int dsos__load_modules(void) { char *line = NULL; size_t n; @@ -1268,6 +1313,12 @@ static int dsos__load_modules(void) goto out_delete_line; } + snprintf(name, sizeof(name), + "/sys/module/%s/notes/.note.gnu.build-id", line); + if (sysfs__read_build_id(name, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; + dso->origin = DSO__ORIG_KMODULE; kernel_maps__insert(map); dsos__add(dso); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 546eb766d81..da7ec1af255 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -78,6 +78,7 @@ void dso__delete(struct dso *self); struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, int modules); +int dsos__load_modules(void); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); @@ -89,6 +90,7 @@ char dso__symtab_origin(const struct dso *self); void dso__set_build_id(struct dso *self, void *build_id); int filename__read_build_id(const char *filename, void *bf, size_t size); +int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); From 2446042c93bfc6eeebfc89e88fdef2435d2bb5c4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Nov 2009 20:20:53 -0200 Subject: [PATCH 304/470] perf symbols: Capture the running kernel buildid too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [root@doppio linux-2.6-tip]# perf record -a -f sleep 3s ; perf buildid-list | grep vmlinux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.171 MB perf.data (~7489 samples) ] 18e7cc53db62a7d35e9d6f6c9ddc23017d38ee9a vmlinux [root@doppio linux-2.6-tip]# Several refactorings were needed so that we can have symmetry between dsos__load_modules() and dsos__load_kernel(), i.e. those functions will respectively create and add to the dsos list the loaded modules and kernel, with its buildids, but not load its symbols. That is something the subcomands that need will have to call dso__load_kernel_sym(), just like we do with modules with dsos__load_module_sym()/dso__load_module_sym(). Next csets will actually use this info to stop producing bogus results using mismatched vmlinux and .ko files. Signed-off-by: Arnaldo Carvalho de Melo Cc: Roland McGrath Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258582853-8579-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 7 ++++- tools/perf/util/header.c | 1 + tools/perf/util/symbol.c | 65 +++++++++++++++++++++++----------------- tools/perf/util/symbol.h | 3 +- 4 files changed, 46 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 07b92c378ae..6d770ac7be0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -948,7 +948,12 @@ static int symbol_filter(struct map *map, struct symbol *sym) static int parse_symbols(void) { - if (dsos__load_kernel(vmlinux_name, symbol_filter, 1) <= 0) + struct dso *kernel = dsos__load_kernel(); + + if (kernel == NULL) + return -1; + + if (dso__load_kernel_sym(kernel, symbol_filter, 1) <= 0) return -1; if (dump_symtab) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d3d656f9a62..425a29ba01a 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -241,6 +241,7 @@ perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; + dsos__load_kernel(); /* * Read the list of loaded modules with its build_ids */ diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 7b4cedeb302..4d75e745288 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1352,17 +1352,11 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return err; } -int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, - int use_modules) +int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, int use_modules) { int err = -1; - struct dso *dso = dso__new(vmlinux); - if (dso == NULL) - return -1; - - dso->short_name = "[kernel]"; - kernel_map = map__new2(0, dso); + kernel_map = map__new2(0, self); if (kernel_map == NULL) goto out_delete_dso; @@ -1374,39 +1368,36 @@ int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, use_modules = 0; } - if (vmlinux) { - err = dso__load_vmlinux(dso, kernel_map, vmlinux, filter); - if (err > 0 && use_modules) { - int syms = dsos__load_modules_sym(filter); + err = dso__load_vmlinux(self, kernel_map, self->name, filter); + if (err > 0 && use_modules) { + int syms = dsos__load_modules_sym(filter); - if (syms < 0) - pr_warning("Failed to read module symbols!" - " Continuing...\n"); - else - err += syms; - } + if (syms < 0) + pr_warning("Failed to read module symbols!" + " Continuing...\n"); + else + err += syms; } if (err <= 0) err = kernel_maps__load_kallsyms(filter, use_modules); if (err > 0) { - struct rb_node *node = rb_first(&dso->syms); + struct rb_node *node = rb_first(&self->syms); struct symbol *sym = rb_entry(node, struct symbol, rb_node); kernel_map->start = sym->start; - node = rb_last(&dso->syms); + node = rb_last(&self->syms); sym = rb_entry(node, struct symbol, rb_node); kernel_map->end = sym->end; - dso->origin = DSO__ORIG_KERNEL; + self->origin = DSO__ORIG_KERNEL; kernel_maps__insert(kernel_map); /* * Now that we have all sorted out, just set the ->end of all * maps: */ kernel_maps__fixup_end(); - dsos__add(dso); if (verbose) kernel_maps__fprintf(stderr); @@ -1415,7 +1406,7 @@ int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, return err; out_delete_dso: - dso__delete(dso); + dso__delete(self); return -1; } @@ -1475,18 +1466,36 @@ size_t dsos__fprintf_buildid(FILE *fp) return ret; } -int load_kernel(symbol_filter_t filter) +struct dso *dsos__load_kernel(void) { - if (dsos__load_kernel(vmlinux_name, filter, modules) <= 0) - return -1; + struct dso *kernel = dso__new(vmlinux_name); + if (kernel == NULL) + return NULL; + + kernel->short_name = "[kernel]"; vdso = dso__new("[vdso]"); if (!vdso) - return -1; + return NULL; + if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, + sizeof(kernel->build_id)) == 0) + kernel->has_build_id = true; + + dsos__add(kernel); dsos__add(vdso); - return 0; + return kernel; +} + +int load_kernel(symbol_filter_t filter) +{ + struct dso *kernel = dsos__load_kernel(); + + if (kernel == NULL) + return -1; + + return dso__load_kernel_sym(kernel, filter, modules); } void symbol__init(unsigned int priv_size) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index da7ec1af255..f0593a649c3 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -77,10 +77,10 @@ void dso__delete(struct dso *self); struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dsos__load_kernel(const char *vmlinux, symbol_filter_t filter, int modules); int dsos__load_modules(void); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); +int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, int modules); void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp); @@ -94,6 +94,7 @@ int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); +struct dso *dsos__load_kernel(void); int load_kernel(symbol_filter_t filter); void symbol__init(unsigned int priv_size); From 4dc0a04bb18fe9b80cefa08694f46a3a19ebfe50 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Nov 2009 14:55:55 -0200 Subject: [PATCH 305/470] perf tools: perf_header__read() shouldn't die() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And also don't call the constructor in it, this way it adheres to the model the other methods follow. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258649757-17554-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 16 ++++++----- tools/perf/builtin-timechart.c | 17 ++++++++--- tools/perf/util/data_map.c | 18 ++++++++---- tools/perf/util/header.c | 52 ++++++++++++++++++++-------------- tools/perf/util/header.h | 7 +++-- 5 files changed, 70 insertions(+), 40 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 82260c56db3..c97cb2ca8fa 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -400,7 +400,7 @@ static int __cmd_record(int argc, const char **argv) struct stat st; pid_t pid = 0; int flags; - int ret; + int err; unsigned long waking = 0; page_size = sysconf(_SC_PAGE_SIZE); @@ -434,16 +434,18 @@ static int __cmd_record(int argc, const char **argv) exit(-1); } - if (!file_new) - header = perf_header__read(output); - else - header = perf_header__new(); - + header = perf_header__new(); if (header == NULL) { pr_err("Not enough memory for reading perf file header\n"); return -1; } + if (!file_new) { + err = perf_header__read(header, output); + if (err < 0) + return err; + } + if (raw_samples) { perf_header__set_feat(header, HEADER_TRACE_INFO); } else { @@ -527,7 +529,7 @@ static int __cmd_record(int argc, const char **argv) if (hits == samples) { if (done) break; - ret = poll(event_array, nr_poll, -1); + err = poll(event_array, nr_poll, -1); waking++; } diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 665877e4a94..dd4d82ac7aa 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1093,7 +1093,7 @@ static void process_samples(void) static int __cmd_timechart(void) { - int ret, rc = EXIT_FAILURE; + int err, rc = EXIT_FAILURE; unsigned long offset = 0; unsigned long head, shift; struct stat statbuf; @@ -1111,8 +1111,8 @@ static int __cmd_timechart(void) exit(-1); } - ret = fstat(input, &statbuf); - if (ret < 0) { + err = fstat(input, &statbuf); + if (err < 0) { perror("failed to stat file"); exit(-1); } @@ -1122,7 +1122,16 @@ static int __cmd_timechart(void) exit(0); } - header = perf_header__read(input); + header = perf_header__new(); + if (header == NULL) + return -ENOMEM; + + err = perf_header__read(header, input); + if (err < 0) { + perf_header__delete(header); + return err; + } + head = header->data_offset; sample_type = perf_header__sample_type(header); diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 14cb8465eb0..b8fc0fa2f63 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -106,7 +106,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int *cwdlen, char **cwd) { - int ret, rc = EXIT_FAILURE; + int err, rc = EXIT_FAILURE; struct perf_header *header; unsigned long head, shift; unsigned long offset = 0; @@ -132,8 +132,8 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, exit(-1); } - ret = fstat(input, &input_stat); - if (ret < 0) { + err = fstat(input, &input_stat); + if (err < 0) { perror("failed to stat file"); exit(-1); } @@ -149,8 +149,16 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, exit(0); } - *pheader = perf_header__read(input); - header = *pheader; + header = perf_header__new(); + if (header == NULL) + return -ENOMEM; + + err = perf_header__read(header, input); + if (err < 0) { + perf_header__delete(header); + return err; + } + *pheader = header; head = header->data_offset; sample_type = perf_header__sample_type(header); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 425a29ba01a..e66c7bd4cc8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -78,16 +78,24 @@ struct perf_header *perf_header__new(void) return self; } +void perf_header__delete(struct perf_header *self) +{ + int i; + + for (i = 0; i < self->attrs; ++i) + perf_header_attr__delete(self->attr[i]); + + free(self->attr); + free(self); +} + int perf_header__add_attr(struct perf_header *self, struct perf_header_attr *attr) { - int pos = self->attrs; - if (self->frozen) return -1; - self->attrs++; - if (self->attrs > self->size) { + if (self->attrs == self->size) { int nsize = self->size * 2; struct perf_header_attr **nattr; @@ -98,7 +106,8 @@ int perf_header__add_attr(struct perf_header *self, self->size = nsize; self->attr = nattr; } - self->attr[pos] = attr; + + self->attr[self->attrs++] = attr; return 0; } @@ -441,19 +450,17 @@ static int perf_file_section__process(struct perf_file_section *self, return 0; } -struct perf_header *perf_header__read(int fd) +int perf_header__read(struct perf_header *self, int fd) { - struct perf_header *self = perf_header__new(); struct perf_file_header f_header; struct perf_file_attr f_attr; u64 f_id; int nr_attrs, nr_ids, i, j; - if (self == NULL) - die("nomem"); - - if (perf_file_header__read(&f_header, self, fd) < 0) - die("incompatible file format"); + if (perf_file_header__read(&f_header, self, fd) < 0) { + pr_debug("incompatible file format\n"); + return -EINVAL; + } nr_attrs = f_header.attrs.size / sizeof(f_attr); lseek(fd, f_header.attrs.offset, SEEK_SET); @@ -467,7 +474,7 @@ struct perf_header *perf_header__read(int fd) attr = perf_header_attr__new(&f_attr.attr); if (attr == NULL) - die("nomem"); + return -ENOMEM; nr_ids = f_attr.ids.size / sizeof(u64); lseek(fd, f_attr.ids.offset, SEEK_SET); @@ -475,11 +482,15 @@ struct perf_header *perf_header__read(int fd) for (j = 0; j < nr_ids; j++) { do_read(fd, &f_id, sizeof(f_id)); - if (perf_header_attr__add_id(attr, f_id) < 0) - die("nomem"); + if (perf_header_attr__add_id(attr, f_id) < 0) { + perf_header_attr__delete(attr); + return -ENOMEM; + } + } + if (perf_header__add_attr(self, attr) < 0) { + perf_header_attr__delete(attr); + return -ENOMEM; } - if (perf_header__add_attr(self, attr) < 0) - die("nomem"); lseek(fd, tmp, SEEK_SET); } @@ -487,8 +498,8 @@ struct perf_header *perf_header__read(int fd) if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); events = malloc(f_header.event_types.size); - if (!events) - die("nomem"); + if (events == NULL) + return -ENOMEM; do_read(fd, events, f_header.event_types.size); event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } @@ -498,8 +509,7 @@ struct perf_header *perf_header__read(int fd) lseek(fd, self->data_offset, SEEK_SET); self->frozen = 1; - - return self; + return 0; } u64 perf_header__sample_type(struct perf_header *header) diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index f46a94e09ee..dc8fedb066a 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -55,7 +55,10 @@ struct perf_header { DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); }; -struct perf_header *perf_header__read(int fd); +struct perf_header *perf_header__new(void); +void perf_header__delete(struct perf_header *self); + +int perf_header__read(struct perf_header *self, int fd); void perf_header__write(struct perf_header *self, int fd, bool at_exit); int perf_header__add_attr(struct perf_header *self, @@ -75,8 +78,6 @@ perf_header__find_attr(u64 id, struct perf_header *header); void perf_header__set_feat(struct perf_header *self, int feat); bool perf_header__has_feat(const struct perf_header *self, int feat); -struct perf_header *perf_header__new(void); - int perf_header__process_sections(struct perf_header *self, int fd, int (*process)(struct perf_file_section *self, int feat, int fd)); From d5eed904bb6010b429b82c47e7cdb6a32f0c1343 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Nov 2009 14:55:56 -0200 Subject: [PATCH 306/470] perf tools: Eliminate some more die() uses in library functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This time in perf_header__adds_write, propagating the do_write error returns. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258649757-17554-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 7 +++- tools/perf/util/header.c | 80 ++++++++++++++++++++++++------------- tools/perf/util/header.h | 2 +- 3 files changed, 59 insertions(+), 30 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index c97cb2ca8fa..87f98fdb051 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -474,8 +474,11 @@ static int __cmd_record(int argc, const char **argv) } } - if (file_new) - perf_header__write(header, output, false); + if (file_new) { + err = perf_header__write(header, output, false); + if (err < 0) + return err; + } if (!system_wide) event__synthesize_thread(pid, process_synthesized_event); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e66c7bd4cc8..d5c81ebc0a8 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -176,7 +176,7 @@ static int do_write(int fd, const void *buf, size_t size) int ret = write(fd, buf, size); if (ret < 0) - return -1; + return -errno; size -= ret; buf += ret; @@ -190,6 +190,7 @@ static int dsos__write_buildid_table(int fd) struct dso *pos; list_for_each_entry(pos, &dsos, node) { + int err; struct build_id_event b; size_t len; @@ -200,33 +201,35 @@ static int dsos__write_buildid_table(int fd) memset(&b, 0, sizeof(b)); memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); b.header.size = sizeof(b) + len; - if (do_write(fd, &b, sizeof(b)) < 0 || - do_write(fd, pos->long_name, len) < 0) - return -1; + err = do_write(fd, &b, sizeof(b)); + if (err < 0) + return err; + err = do_write(fd, pos->long_name, len); + if (err < 0) + return err; } return 0; } -static void -perf_header__adds_write(struct perf_header *self, int fd) +static int perf_header__adds_write(struct perf_header *self, int fd) { int nr_sections; struct perf_file_section *feat_sec; int sec_size; u64 sec_start; - int idx = 0; + int idx = 0, err; if (dsos__read_build_ids()) perf_header__set_feat(self, HEADER_BUILD_ID); nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); if (!nr_sections) - return; + return 0; feat_sec = calloc(sizeof(*feat_sec), nr_sections); - if (!feat_sec) - die("No memory"); + if (feat_sec == NULL) + return -ENOMEM; sec_size = sizeof(*feat_sec) * nr_sections; @@ -258,23 +261,29 @@ perf_header__adds_write(struct perf_header *self, int fd) /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); - if (dsos__write_buildid_table(fd) < 0) - die("failed to write buildid table"); + err = dsos__write_buildid_table(fd); + if (err < 0) { + pr_debug("failed to write buildid table\n"); + goto out_free; + } buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset; } lseek(fd, sec_start, SEEK_SET); - if (do_write(fd, feat_sec, sec_size) < 0) - die("failed to write feature section"); + err = do_write(fd, feat_sec, sec_size); + if (err < 0) + pr_debug("failed to write feature section\n"); +out_free: free(feat_sec); + return err; } -void perf_header__write(struct perf_header *self, int fd, bool at_exit) +int perf_header__write(struct perf_header *self, int fd, bool at_exit) { struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header_attr *attr; - int i; + int i, err; lseek(fd, sizeof(f_header), SEEK_SET); @@ -283,8 +292,11 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) attr = self->attr[i]; attr->id_offset = lseek(fd, 0, SEEK_CUR); - if (do_write(fd, attr->id, attr->ids * sizeof(u64)) < 0) - die("failed to write perf header"); + err = do_write(fd, attr->id, attr->ids * sizeof(u64)); + if (err < 0) { + pr_debug("failed to write perf header\n"); + return err; + } } @@ -300,20 +312,30 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) .size = attr->ids * sizeof(u64), } }; - if (do_write(fd, &f_attr, sizeof(f_attr)) < 0) - die("failed to write perf header attribute"); + err = do_write(fd, &f_attr, sizeof(f_attr)); + if (err < 0) { + pr_debug("failed to write perf header attribute\n"); + return err; + } } self->event_offset = lseek(fd, 0, SEEK_CUR); self->event_size = event_count * sizeof(struct perf_trace_event_type); - if (events) - if (do_write(fd, events, self->event_size) < 0) - die("failed to write perf header events"); + if (events) { + err = do_write(fd, events, self->event_size); + if (err < 0) { + pr_debug("failed to write perf header events\n"); + return err; + } + } self->data_offset = lseek(fd, 0, SEEK_CUR); - if (at_exit) - perf_header__adds_write(self, fd); + if (at_exit) { + err = perf_header__adds_write(self, fd); + if (err < 0) + return err; + } f_header = (struct perf_file_header){ .magic = PERF_MAGIC, @@ -336,11 +358,15 @@ void perf_header__write(struct perf_header *self, int fd, bool at_exit) memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features)); lseek(fd, 0, SEEK_SET); - if (do_write(fd, &f_header, sizeof(f_header)) < 0) - die("failed to write perf header"); + err = do_write(fd, &f_header, sizeof(f_header)); + if (err < 0) { + pr_debug("failed to write perf header\n"); + return err; + } lseek(fd, self->data_offset + self->data_size, SEEK_SET); self->frozen = 1; + return 0; } static void do_read(int fd, void *buf, size_t size) diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index dc8fedb066a..d1dbe2b79c4 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -59,7 +59,7 @@ struct perf_header *perf_header__new(void); void perf_header__delete(struct perf_header *self); int perf_header__read(struct perf_header *self, int fd); -void perf_header__write(struct perf_header *self, int fd, bool at_exit); +int perf_header__write(struct perf_header *self, int fd, bool at_exit); int perf_header__add_attr(struct perf_header *self, struct perf_header_attr *attr); From 6b0cb5f9f7033c72b19697c33deab83f0dd9848d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 19 Nov 2009 14:55:57 -0200 Subject: [PATCH 307/470] perf tools: Don't die() in mmap_dispatch_perf_file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Propagate the error, that, interestingly, are already handled by all callers :-) Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258649757-17554-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/data_map.c | 80 +++++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index b8fc0fa2f63..5543e7d0487 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -106,7 +106,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int *cwdlen, char **cwd) { - int err, rc = EXIT_FAILURE; + int err; struct perf_header *header; unsigned long head, shift; unsigned long offset = 0; @@ -118,64 +118,69 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, int input; char *buf; - if (!curr_handler) - die("Forgot to register perf file handler"); + if (curr_handler == NULL) { + pr_debug("Forgot to register perf file handler\n"); + return -EINVAL; + } page_size = getpagesize(); input = open(input_name, O_RDONLY); if (input < 0) { - fprintf(stderr, " failed to open file: %s", input_name); + pr_err("Failed to open file: %s", input_name); if (!strcmp(input_name, "perf.data")) - fprintf(stderr, " (try 'perf record' first)"); - fprintf(stderr, "\n"); - exit(-1); + pr_err(" (try 'perf record' first)"); + pr_err("\n"); + return -errno; } - err = fstat(input, &input_stat); - if (err < 0) { - perror("failed to stat file"); - exit(-1); + if (fstat(input, &input_stat) < 0) { + pr_err("failed to stat file"); + err = -errno; + goto out_close; } + err = -EACCES; if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - fprintf(stderr, "file: %s not owned by current user or root\n", + pr_err("file: %s not owned by current user or root\n", input_name); - exit(-1); + goto out_close; } - if (!input_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); + if (input_stat.st_size == 0) { + pr_info("zero-sized file, nothing to do!\n"); + goto done; } + err = -ENOMEM; header = perf_header__new(); if (header == NULL) - return -ENOMEM; + goto out_close; err = perf_header__read(header, input); - if (err < 0) { - perf_header__delete(header); - return err; - } + if (err < 0) + goto out_delete; *pheader = header; head = header->data_offset; sample_type = perf_header__sample_type(header); - if (curr_handler->sample_type_check) - if (curr_handler->sample_type_check(sample_type) < 0) - exit(-1); + err = -EINVAL; + if (curr_handler->sample_type_check && + curr_handler->sample_type_check(sample_type) < 0) + goto out_delete; + err = -ENOMEM; if (load_kernel(NULL) < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; + pr_err("failed to load kernel symbols\n"); + goto out_delete; } if (!full_paths) { if (getcwd(__cwd, sizeof(__cwd)) == NULL) { - perror("failed to get the current directory"); - return EXIT_FAILURE; + pr_err("failed to get the current directory\n"); + err = -errno; + goto out_delete; } *cwd = __cwd; *cwdlen = strlen(*cwd); @@ -189,11 +194,12 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, head -= shift; remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); + buf = mmap(NULL, page_size * mmap_window, PROT_READ, + MAP_SHARED, input, offset); if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); + pr_err("failed to mmap file\n"); + err = -errno; + goto out_delete; } more: @@ -250,10 +256,12 @@ more: goto more; done: - rc = EXIT_SUCCESS; + err = 0; +out_close: close(input); - return rc; + return err; +out_delete: + perf_header__delete(header); + goto out_close; } - - From ce64c62074d945fe5f8a7f01bdc30125f994ea67 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 16 Nov 2009 18:06:31 -0500 Subject: [PATCH 308/470] x86: Instruction decoder test should generate build warning Since some instructions are not decoded correctly by older versions of objdump, it may cause false positive error in insn decoder posttest. This changes build error of insn decoder test to build warning. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Stephen Rothwell Cc: Randy Dunlap Cc: Jim Keniston Cc: Stephen Rothwell LKML-Reference: <20091116230631.5250.41579.stgit@harusame> Signed-off-by: Ingo Molnar --- arch/x86/tools/test_get_len.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index af75e07217b..d8214dc03fa 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -114,6 +114,7 @@ int main(int argc, char **argv) unsigned char insn_buf[16]; struct insn insn; int insns = 0, c; + int warnings = 0; parse_args(argc, argv); @@ -151,18 +152,22 @@ int main(int argc, char **argv) insn_init(&insn, insn_buf, x86_64); insn_get_length(&insn); if (insn.length != nb) { - fprintf(stderr, "Error: %s found a difference at %s\n", + warnings++; + fprintf(stderr, "Warning: %s found difference at %s\n", prog, sym); - fprintf(stderr, "Error: %s", line); - fprintf(stderr, "Error: objdump says %d bytes, but " + fprintf(stderr, "Warning: %s", line); + fprintf(stderr, "Warning: objdump says %d bytes, but " "insn_get_length() says %d\n", nb, insn.length); if (verbose) dump_insn(stderr, &insn); - exit(2); } } - fprintf(stderr, "Succeed: decoded and checked %d instructions\n", - insns); + if (warnings) + fprintf(stderr, "Warning: decoded and checked %d" + " instructions with %d warnings\n", insns, warnings); + else + fprintf(stderr, "Succeed: decoded and checked %d" + " instructions\n", insns); return 0; } From ba77c9e11111a172c9e8687fe16a6a173a61916f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 20 Nov 2009 15:53:25 +0800 Subject: [PATCH 309/470] perf: Add 'perf kmem' tool This tool is mostly a perf version of kmemtrace-user. The following information is provided by this tool: - the total amount of memory allocated and fragmentation per call-site - the total amount of memory allocated and fragmentation per allocation - total memory allocated and fragmentation in the collected dataset - ... Sample output: # ./perf kmem record ^C # ./perf kmem --stat caller --stat alloc -l 10 ------------------------------------------------------------------------------ Callsite | Total_alloc/Per | Total_req/Per | Hit | Fragmentation ------------------------------------------------------------------------------ 0xc052f37a | 790528/4096 | 790528/4096 | 193 | 0.000% 0xc0541d70 | 524288/4096 | 524288/4096 | 128 | 0.000% 0xc051cc68 | 481600/200 | 481600/200 | 2408 | 0.000% 0xc0572623 | 297444/676 | 297440/676 | 440 | 0.001% 0xc05399f1 | 73476/164 | 73472/164 | 448 | 0.005% 0xc05243bf | 51456/256 | 51456/256 | 201 | 0.000% 0xc0730d0e | 31844/497 | 31808/497 | 64 | 0.113% 0xc0734c4e | 17152/256 | 17152/256 | 67 | 0.000% 0xc0541a6d | 16384/128 | 16384/128 | 128 | 0.000% 0xc059c217 | 13120/40 | 13120/40 | 328 | 0.000% 0xc0501ee6 | 11264/88 | 11264/88 | 128 | 0.000% 0xc04daef0 | 7504/682 | 7128/648 | 11 | 5.011% 0xc04e14a3 | 4216/191 | 4216/191 | 22 | 0.000% 0xc05041ca | 3524/44 | 3520/44 | 80 | 0.114% 0xc0734fa3 | 2104/701 | 1620/540 | 3 | 23.004% 0xc05ec9f1 | 2024/289 | 2016/288 | 7 | 0.395% 0xc06a1999 | 1792/256 | 1792/256 | 7 | 0.000% 0xc0463b9a | 1584/144 | 1584/144 | 11 | 0.000% 0xc0541eb0 | 1024/16 | 1024/16 | 64 | 0.000% 0xc06a19ac | 896/128 | 896/128 | 7 | 0.000% 0xc05721c0 | 772/12 | 768/12 | 64 | 0.518% 0xc054d1e6 | 288/57 | 280/56 | 5 | 2.778% 0xc04b562e | 157/31 | 154/30 | 5 | 1.911% 0xc04b536f | 80/16 | 80/16 | 5 | 0.000% 0xc05855a0 | 64/64 | 36/36 | 1 | 43.750% ------------------------------------------------------------------------------ ------------------------------------------------------------------------------ Alloc Ptr | Total_alloc/Per | Total_req/Per | Hit | Fragmentation ------------------------------------------------------------------------------ 0xda884000 | 1052672/4096 | 1052672/4096 | 257 | 0.000% 0xda886000 | 262144/4096 | 262144/4096 | 64 | 0.000% 0xf60c7c00 | 16512/128 | 16512/128 | 129 | 0.000% 0xf59a4118 | 13120/40 | 13120/40 | 328 | 0.000% 0xdfd4b2c0 | 11264/88 | 11264/88 | 128 | 0.000% 0xf5274600 | 7680/256 | 7680/256 | 30 | 0.000% 0xe8395000 | 5948/594 | 5464/546 | 10 | 8.137% 0xe59c3c00 | 5748/479 | 5712/476 | 12 | 0.626% 0xf4cd1a80 | 3524/44 | 3520/44 | 80 | 0.114% 0xe5bd1600 | 2892/482 | 2856/476 | 6 | 1.245% ... | ... | ... | ... | ... ------------------------------------------------------------------------------ SUMMARY ======= Total bytes requested: 2333626 Total bytes allocated: 2353712 Total bytes wasted on internal fragmentation: 20086 Internal fragmentation: 0.853375% TODO: - show sym+offset in 'callsite' column - show cross node allocation stats - collect more useful stats? - ... Signed-off-by: Li Zefan Acked-by: Pekka Enberg Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Eduard - Gabriel Munteanu Cc: linux-mm@kvack.org LKML-Reference: <4B064AF5.9060208@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 1 + tools/perf/builtin-kmem.c | 578 ++++++++++++++++++++++++++++++++++++++ tools/perf/builtin.h | 1 + tools/perf/perf.c | 27 +- 4 files changed, 594 insertions(+), 13 deletions(-) create mode 100644 tools/perf/builtin-kmem.c diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3f0666af93d..d7198c54bb6 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -444,6 +444,7 @@ BUILTIN_OBJS += builtin-timechart.o BUILTIN_OBJS += builtin-top.o BUILTIN_OBJS += builtin-trace.o BUILTIN_OBJS += builtin-probe.o +BUILTIN_OBJS += builtin-kmem.o PERFLIBS = $(LIB_FILE) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c new file mode 100644 index 00000000000..f315b052f81 --- /dev/null +++ b/tools/perf/builtin-kmem.c @@ -0,0 +1,578 @@ +#include "builtin.h" +#include "perf.h" + +#include "util/util.h" +#include "util/cache.h" +#include "util/symbol.h" +#include "util/thread.h" +#include "util/header.h" + +#include "util/parse-options.h" +#include "util/trace-event.h" + +#include "util/debug.h" +#include "util/data_map.h" + +#include + +struct alloc_stat; +typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *); + +static char const *input_name = "perf.data"; + +static struct perf_header *header; +static u64 sample_type; + +static int alloc_flag; +static int caller_flag; + +sort_fn_t alloc_sort_fn; +sort_fn_t caller_sort_fn; + +static int alloc_lines = -1; +static int caller_lines = -1; + +static char *cwd; +static int cwdlen; + +struct alloc_stat { + union { + struct { + char *name; + u64 call_site; + }; + u64 ptr; + }; + u64 bytes_req; + u64 bytes_alloc; + u32 hit; + + struct rb_node node; +}; + +static struct rb_root root_alloc_stat; +static struct rb_root root_alloc_sorted; +static struct rb_root root_caller_stat; +static struct rb_root root_caller_sorted; + +static unsigned long total_requested, total_allocated; + +struct raw_event_sample { + u32 size; + char data[0]; +}; + +static int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm(thread, event->comm.comm)) { + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); + return -1; + } + + return 0; +} + +static void insert_alloc_stat(unsigned long ptr, + int bytes_req, int bytes_alloc) +{ + struct rb_node **node = &root_alloc_stat.rb_node; + struct rb_node *parent = NULL; + struct alloc_stat *data = NULL; + + if (!alloc_flag) + return; + + while (*node) { + parent = *node; + data = rb_entry(*node, struct alloc_stat, node); + + if (ptr > data->ptr) + node = &(*node)->rb_right; + else if (ptr < data->ptr) + node = &(*node)->rb_left; + else + break; + } + + if (data && data->ptr == ptr) { + data->hit++; + data->bytes_req += bytes_req; + data->bytes_alloc += bytes_req; + } else { + data = malloc(sizeof(*data)); + data->ptr = ptr; + data->hit = 1; + data->bytes_req = bytes_req; + data->bytes_alloc = bytes_alloc; + + rb_link_node(&data->node, parent, node); + rb_insert_color(&data->node, &root_alloc_stat); + } +} + +static void insert_caller_stat(unsigned long call_site, + int bytes_req, int bytes_alloc) +{ + struct rb_node **node = &root_caller_stat.rb_node; + struct rb_node *parent = NULL; + struct alloc_stat *data = NULL; + + if (!caller_flag) + return; + + while (*node) { + parent = *node; + data = rb_entry(*node, struct alloc_stat, node); + + if (call_site > data->call_site) + node = &(*node)->rb_right; + else if (call_site < data->call_site) + node = &(*node)->rb_left; + else + break; + } + + if (data && data->call_site == call_site) { + data->hit++; + data->bytes_req += bytes_req; + data->bytes_alloc += bytes_req; + } else { + data = malloc(sizeof(*data)); + data->call_site = call_site; + data->hit = 1; + data->bytes_req = bytes_req; + data->bytes_alloc = bytes_alloc; + + rb_link_node(&data->node, parent, node); + rb_insert_color(&data->node, &root_caller_stat); + } +} + +static void process_alloc_event(struct raw_event_sample *raw, + struct event *event, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used, + int node __used) +{ + unsigned long call_site; + unsigned long ptr; + int bytes_req; + int bytes_alloc; + + ptr = raw_field_value(event, "ptr", raw->data); + call_site = raw_field_value(event, "call_site", raw->data); + bytes_req = raw_field_value(event, "bytes_req", raw->data); + bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); + + insert_alloc_stat(ptr, bytes_req, bytes_alloc); + insert_caller_stat(call_site, bytes_req, bytes_alloc); + + total_requested += bytes_req; + total_allocated += bytes_alloc; +} + +static void process_free_event(struct raw_event_sample *raw __used, + struct event *event __used, + int cpu __used, + u64 timestamp __used, + struct thread *thread __used) +{ +} + +static void +process_raw_event(event_t *raw_event __used, void *more_data, + int cpu, u64 timestamp, struct thread *thread) +{ + struct raw_event_sample *raw = more_data; + struct event *event; + int type; + + type = trace_parse_common_type(raw->data); + event = trace_find_event(type); + + if (!strcmp(event->name, "kmalloc") || + !strcmp(event->name, "kmem_cache_alloc")) { + process_alloc_event(raw, event, cpu, timestamp, thread, 0); + return; + } + + if (!strcmp(event->name, "kmalloc_node") || + !strcmp(event->name, "kmem_cache_alloc_node")) { + process_alloc_event(raw, event, cpu, timestamp, thread, 1); + return; + } + + if (!strcmp(event->name, "kfree") || + !strcmp(event->name, "kmem_cache_free")) { + process_free_event(raw, event, cpu, timestamp, thread); + return; + } +} + +static int +process_sample_event(event_t *event, unsigned long offset, unsigned long head) +{ + u64 ip = event->ip.ip; + u64 timestamp = -1; + u32 cpu = -1; + u64 period = 1; + void *more_data = event->ip.__more_data; + struct thread *thread = threads__findnew(event->ip.pid); + + if (sample_type & PERF_SAMPLE_TIME) { + timestamp = *(u64 *)more_data; + more_data += sizeof(u64); + } + + if (sample_type & PERF_SAMPLE_CPU) { + cpu = *(u32 *)more_data; + more_data += sizeof(u32); + more_data += sizeof(u32); /* reserved */ + } + + if (sample_type & PERF_SAMPLE_PERIOD) { + period = *(u64 *)more_data; + more_data += sizeof(u64); + } + + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.misc, + event->ip.pid, event->ip.tid, + (void *)(long)ip, + (long long)period); + + if (thread == NULL) { + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + process_raw_event(event, more_data, cpu, timestamp, thread); + + return 0; +} + +static int sample_type_check(u64 type) +{ + sample_type = type; + + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, + "No trace sample to read. Did you call perf record " + "without -R?"); + return -1; + } + + return 0; +} + +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_comm_event = process_comm_event, + .sample_type_check = sample_type_check, +}; + +static int read_events(void) +{ + register_idle_thread(); + register_perf_file_handler(&file_handler); + + return mmap_dispatch_perf_file(&header, input_name, 0, 0, + &cwdlen, &cwd); +} + +static double fragmentation(unsigned long n_req, unsigned long n_alloc) +{ + if (n_alloc == 0) + return 0.0; + else + return 100.0 - (100.0 * n_req / n_alloc); +} + +static void __print_result(struct rb_root *root, int n_lines, int is_caller) +{ + struct rb_node *next; + + printf("\n ------------------------------------------------------------------------------\n"); + if (is_caller) + printf(" Callsite |"); + else + printf(" Alloc Ptr |"); + printf(" Total_alloc/Per | Total_req/Per | Hit | Fragmentation\n"); + printf(" ------------------------------------------------------------------------------\n"); + + next = rb_first(root); + + while (next && n_lines--) { + struct alloc_stat *data; + + data = rb_entry(next, struct alloc_stat, node); + + printf(" %-16p | %8llu/%-6lu | %8llu/%-6lu | %6lu | %8.3f%%\n", + is_caller ? (void *)(unsigned long)data->call_site : + (void *)(unsigned long)data->ptr, + (unsigned long long)data->bytes_alloc, + (unsigned long)data->bytes_alloc / data->hit, + (unsigned long long)data->bytes_req, + (unsigned long)data->bytes_req / data->hit, + (unsigned long)data->hit, + fragmentation(data->bytes_req, data->bytes_alloc)); + + next = rb_next(next); + } + + if (n_lines == -1) + printf(" ... | ... | ... | ... | ... \n"); + + printf(" ------------------------------------------------------------------------------\n"); +} + +static void print_summary(void) +{ + printf("\nSUMMARY\n=======\n"); + printf("Total bytes requested: %lu\n", total_requested); + printf("Total bytes allocated: %lu\n", total_allocated); + printf("Total bytes wasted on internal fragmentation: %lu\n", + total_allocated - total_requested); + printf("Internal fragmentation: %f%%\n", + fragmentation(total_requested, total_allocated)); +} + +static void print_result(void) +{ + if (caller_flag) + __print_result(&root_caller_sorted, caller_lines, 1); + if (alloc_flag) + __print_result(&root_alloc_sorted, alloc_lines, 0); + print_summary(); +} + +static void sort_insert(struct rb_root *root, struct alloc_stat *data, + sort_fn_t sort_fn) +{ + struct rb_node **new = &(root->rb_node); + struct rb_node *parent = NULL; + + while (*new) { + struct alloc_stat *this; + int cmp; + + this = rb_entry(*new, struct alloc_stat, node); + parent = *new; + + cmp = sort_fn(data, this); + + if (cmp > 0) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); +} + +static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, + sort_fn_t sort_fn) +{ + struct rb_node *node; + struct alloc_stat *data; + + for (;;) { + node = rb_first(root); + if (!node) + break; + + rb_erase(node, root); + data = rb_entry(node, struct alloc_stat, node); + sort_insert(root_sorted, data, sort_fn); + } +} + +static void sort_result(void) +{ + __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn); + __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn); +} + +static int __cmd_kmem(void) +{ + setup_pager(); + read_events(); + sort_result(); + print_result(); + + return 0; +} + +static const char * const kmem_usage[] = { + "perf kmem [] {record}", + NULL +}; + + +static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + if (l->ptr < r->ptr) + return -1; + else if (l->ptr > r->ptr) + return 1; + return 0; +} + +static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + if (l->call_site < r->call_site) + return -1; + else if (l->call_site > r->call_site) + return 1; + return 0; +} + +static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + if (l->bytes_alloc < r->bytes_alloc) + return -1; + else if (l->bytes_alloc > r->bytes_alloc) + return 1; + return 0; +} + +static int parse_sort_opt(const struct option *opt __used, + const char *arg, int unset __used) +{ + sort_fn_t sort_fn; + + if (!arg) + return -1; + + if (strcmp(arg, "ptr") == 0) + sort_fn = ptr_cmp; + else if (strcmp(arg, "call_site") == 0) + sort_fn = callsite_cmp; + else if (strcmp(arg, "bytes") == 0) + sort_fn = bytes_cmp; + else + return -1; + + if (caller_flag > alloc_flag) + caller_sort_fn = sort_fn; + else + alloc_sort_fn = sort_fn; + + return 0; +} + +static int parse_stat_opt(const struct option *opt __used, + const char *arg, int unset __used) +{ + if (!arg) + return -1; + + if (strcmp(arg, "alloc") == 0) + alloc_flag = (caller_flag + 1); + else if (strcmp(arg, "caller") == 0) + caller_flag = (alloc_flag + 1); + else + return -1; + return 0; +} + +static int parse_line_opt(const struct option *opt __used, + const char *arg, int unset __used) +{ + int lines; + + if (!arg) + return -1; + + lines = strtoul(arg, NULL, 10); + + if (caller_flag > alloc_flag) + caller_lines = lines; + else + alloc_lines = lines; + + return 0; +} + +static const struct option kmem_options[] = { + OPT_STRING('i', "input", &input_name, "file", + "input file name"), + OPT_CALLBACK(0, "stat", NULL, "|", + "stat selector, Pass 'alloc' or 'caller'.", + parse_stat_opt), + OPT_CALLBACK('s', "sort", NULL, "key", + "sort by key: ptr, call_site, hit, bytes", + parse_sort_opt), + OPT_CALLBACK('l', "line", NULL, "num", + "show n lins", + parse_line_opt), + OPT_END() +}; + +static const char *record_args[] = { + "record", + "-a", + "-R", + "-M", + "-f", + "-c", "1", + "-e", "kmem:kmalloc", + "-e", "kmem:kmalloc_node", + "-e", "kmem:kfree", + "-e", "kmem:kmem_cache_alloc", + "-e", "kmem:kmem_cache_alloc_node", + "-e", "kmem:kmem_cache_free", +}; + +static int __cmd_record(int argc, const char **argv) +{ + unsigned int rec_argc, i, j; + const char **rec_argv; + + rec_argc = ARRAY_SIZE(record_args) + argc - 1; + rec_argv = calloc(rec_argc + 1, sizeof(char *)); + + for (i = 0; i < ARRAY_SIZE(record_args); i++) + rec_argv[i] = strdup(record_args[i]); + + for (j = 1; j < (unsigned int)argc; j++, i++) + rec_argv[i] = argv[j]; + + return cmd_record(i, rec_argv, NULL); +} + +int cmd_kmem(int argc, const char **argv, const char *prefix __used) +{ + symbol__init(0); + + argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); + + if (argc && !strncmp(argv[0], "rec", 3)) + return __cmd_record(argc, argv); + else if (argc) + usage_with_options(kmem_usage, kmem_options); + + if (!alloc_sort_fn) + alloc_sort_fn = bytes_cmp; + if (!caller_sort_fn) + caller_sort_fn = bytes_cmp; + + return __cmd_kmem(); +} + diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index 9b02d85091f..a3d8bf65f26 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h @@ -28,5 +28,6 @@ extern int cmd_top(int argc, const char **argv, const char *prefix); extern int cmd_trace(int argc, const char **argv, const char *prefix); extern int cmd_version(int argc, const char **argv, const char *prefix); extern int cmd_probe(int argc, const char **argv, const char *prefix); +extern int cmd_kmem(int argc, const char **argv, const char *prefix); #endif diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 89b82acac7d..cf64049bc9b 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -285,20 +285,21 @@ static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; static struct cmd_struct commands[] = { - { "help", cmd_help, 0 }, - { "list", cmd_list, 0 }, { "buildid-list", cmd_buildid_list, 0 }, - { "record", cmd_record, 0 }, - { "report", cmd_report, 0 }, - { "bench", cmd_bench, 0 }, - { "stat", cmd_stat, 0 }, - { "timechart", cmd_timechart, 0 }, - { "top", cmd_top, 0 }, - { "annotate", cmd_annotate, 0 }, - { "version", cmd_version, 0 }, - { "trace", cmd_trace, 0 }, - { "sched", cmd_sched, 0 }, - { "probe", cmd_probe, 0 }, + { "help", cmd_help, 0 }, + { "list", cmd_list, 0 }, + { "record", cmd_record, 0 }, + { "report", cmd_report, 0 }, + { "bench", cmd_bench, 0 }, + { "stat", cmd_stat, 0 }, + { "timechart", cmd_timechart, 0 }, + { "top", cmd_top, 0 }, + { "annotate", cmd_annotate, 0 }, + { "version", cmd_version, 0 }, + { "trace", cmd_trace, 0 }, + { "sched", cmd_sched, 0 }, + { "probe", cmd_probe, 0 }, + { "kmem", cmd_kmem, 0 }, }; unsigned int i; static const char ext[] = STRIP_EXTENSION; From 80509e27e40d7554e576405ed9f5b7966c567112 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 20 Nov 2009 12:13:08 -0500 Subject: [PATCH 310/470] x86: Fix insn decoder test typos Fix postest_verbose to posttest_verbose, and add posttest_64bit option for CONFIG_64BIT != y, since old command just passed '-' instead of '-n' when CONFIG_64BIT is not set. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Stephen Rothwell Cc: Randy Dunlap Cc: Jim Keniston LKML-Reference: <20091120171307.6715.66099.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: H. Peter Anvin --- arch/x86/tools/Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 4688f90ce5a..c80b0792cd8 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,13 +1,19 @@ PHONY += posttest ifeq ($(KBUILD_VERBOSE),1) - postest_verbose = -v + posttest_verbose = -v else - postest_verbose = + posttest_verbose = +endif + +ifeq ($(CONFIG_64BIT),y) + posttest_64bit = -y +else + posttest_64bit = -n endif quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len -$(CONFIG_64BIT) $(posttest_verbose) + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) From 6f5f67267dc4faecd9cba63894de92ca92a608b8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 20 Nov 2009 12:13:14 -0500 Subject: [PATCH 311/470] x86: insn decoder test checks objdump version Check objdump version before using it for insn decoder build test, because some older objdump can't decode AVX code correctly. Signed-off-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Stephen Rothwell Cc: Randy Dunlap Cc: Jim Keniston LKML-Reference: <20091120171314.6715.30390.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: H. Peter Anvin --- arch/x86/tools/Makefile | 5 ++++- arch/x86/tools/chkobjdump.awk | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 arch/x86/tools/chkobjdump.awk diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index c80b0792cd8..f8208267733 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -12,8 +12,11 @@ else posttest_64bit = -n endif +distill_awk = $(srctree)/arch/x86/tools/distill.awk +chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk + quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) + cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose) posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk new file mode 100644 index 00000000000..0d13cd9fdcf --- /dev/null +++ b/arch/x86/tools/chkobjdump.awk @@ -0,0 +1,23 @@ +# GNU objdump version checker +# +# Usage: +# objdump -v | awk -f chkobjdump.awk +BEGIN { + # objdump version 2.19 or later is OK for the test. + od_ver = 2; + od_sver = 19; +} + +/^GNU/ { + split($4, ver, "."); + if (ver[1] > od_ver || + (ver[1] == od_ver && ver[2] >= od_sver)) { + exit 1; + } else { + printf("Warning: objdump version %s is older than %d.%d\n", + $4, od_ver, od_sver); + print("Warning: Skipping posttest."); + # Logic is inverted, because we just skip test without error. + exit 0; + } +} From 6671cb1674e69e2aba3d610714bdd3e97a7b51ff Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:24 -0200 Subject: [PATCH 312/470] perf symbols: Remove unrelated actions from dso__load_kernel_sym MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should just load kernel symbols, not load the list of modules. There are more stuff to move to other routines, but lets do it in several steps. End goal is to be able to defer symbol table loading till we find a hit for that map address range. So that the kernel & modules are handled just like all the other DSOs in the system. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 5 ++-- tools/perf/builtin-report.c | 3 ++- tools/perf/builtin-top.c | 10 +++++++- tools/perf/util/data_map.c | 2 +- tools/perf/util/symbol.c | 46 +++++++++++++++-------------------- tools/perf/util/symbol.h | 7 +++--- 6 files changed, 38 insertions(+), 35 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 77d50a6d680..b6da1476ab1 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -33,6 +33,7 @@ static int input; static int full_paths; static int print_line; +static bool use_modules; static unsigned long page_size; static unsigned long mmap_window = 32; @@ -636,7 +637,7 @@ static int __cmd_annotate(void) exit(0); } - if (load_kernel(symbol_filter) < 0) { + if (load_kernel(symbol_filter, use_modules) < 0) { perror("failed to load kernel symbols"); return EXIT_FAILURE; } @@ -742,7 +743,7 @@ static const struct option options[] = { OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), - OPT_BOOLEAN('m', "modules", &modules, + OPT_BOOLEAN('m', "modules", &use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('l', "print-line", &print_line, "print matching source lines (may be slow)"), diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 1a806d5f05c..0af48401f08 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -38,6 +38,7 @@ static char *dso_list_str, *comm_list_str, *sym_list_str, static struct strlist *dso_list, *comm_list, *sym_list; static int force; +static bool use_modules; static int full_paths; static int show_nr_samples; @@ -1023,7 +1024,7 @@ static const struct option options[] = { "dump raw trace in ASCII"), OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), - OPT_BOOLEAN('m', "modules", &modules, + OPT_BOOLEAN('m', "modules", &use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, "Show a column with the number of samples"), diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6d770ac7be0..48cc1084bc3 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -953,8 +953,16 @@ static int parse_symbols(void) if (kernel == NULL) return -1; + if (dsos__load_modules() < 0) + pr_debug("Couldn't read the complete list of modules, " + "continuing...\n"); + + if (dsos__load_modules_sym(symbol_filter) < 0) + pr_warning("Failed to read module symbols, continuing...\n"); + if (dso__load_kernel_sym(kernel, symbol_filter, 1) <= 0) - return -1; + pr_debug("Couldn't read the complete list of kernel symbols, " + "continuing...\n"); if (dump_symtab) dsos__fprintf(stderr); diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index 5543e7d0487..a444a2645c8 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -171,7 +171,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, goto out_delete; err = -ENOMEM; - if (load_kernel(NULL) < 0) { + if (load_kernel(NULL, 1) < 0) { pr_err("failed to load kernel symbols\n"); goto out_delete; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4d75e745288..3b23c18cd36 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1230,7 +1230,7 @@ failure: return -1; } -static int dsos__load_modules_sym(symbol_filter_t filter) +int dsos__load_modules_sym(symbol_filter_t filter) { struct utsname uts; char modules_path[PATH_MAX]; @@ -1352,33 +1352,18 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return err; } -int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, int use_modules) +int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, + int use_modules) { - int err = -1; + int err; kernel_map = map__new2(0, self); if (kernel_map == NULL) - goto out_delete_dso; + return -1; kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; - if (use_modules && dsos__load_modules() < 0) { - pr_warning("Failed to load list of modules in use! " - "Continuing...\n"); - use_modules = 0; - } - err = dso__load_vmlinux(self, kernel_map, self->name, filter); - if (err > 0 && use_modules) { - int syms = dsos__load_modules_sym(filter); - - if (syms < 0) - pr_warning("Failed to read module symbols!" - " Continuing...\n"); - else - err += syms; - } - if (err <= 0) err = kernel_maps__load_kallsyms(filter, use_modules); @@ -1404,17 +1389,12 @@ int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, int use_modul } return err; - -out_delete_dso: - dso__delete(self); - return -1; } LIST_HEAD(dsos); struct dso *vdso; const char *vmlinux_name = "vmlinux"; -int modules; static void dsos__add(struct dso *dso) { @@ -1488,14 +1468,26 @@ struct dso *dsos__load_kernel(void) return kernel; } -int load_kernel(symbol_filter_t filter) +int load_kernel(symbol_filter_t filter, bool use_modules) { struct dso *kernel = dsos__load_kernel(); if (kernel == NULL) return -1; - return dso__load_kernel_sym(kernel, filter, modules); + if (use_modules) { + if (dsos__load_modules() < 0) + pr_warning("Failed to load list of modules in use, " + "continuing...\n"); + else if (dsos__load_modules_sym(filter) < 0) + pr_warning("Failed to read module symbols, " + "continuing...\n"); + } + + if (dso__load_kernel_sym(kernel, filter, use_modules) < 0) + pr_warning("Failed to read kernel symbols, continuing...\n"); + + return 0; } void symbol__init(unsigned int priv_size) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index f0593a649c3..3d9d346d101 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -78,9 +78,11 @@ void dso__delete(struct dso *self); struct symbol *dso__find_symbol(struct dso *self, u64 ip); int dsos__load_modules(void); +int dsos__load_modules_sym(symbol_filter_t filter); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); -int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, int modules); +int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, + int use_modules); void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp); @@ -95,7 +97,7 @@ bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); struct dso *dsos__load_kernel(void); -int load_kernel(symbol_filter_t filter); +int load_kernel(symbol_filter_t filter, bool use_modules); void symbol__init(unsigned int priv_size); @@ -103,5 +105,4 @@ extern struct list_head dsos; extern struct map *kernel_map; extern struct dso *vdso; extern const char *vmlinux_name; -extern int modules; #endif /* __PERF_SYMBOL */ From fd7a346ea292074e9f6cdb5232a57c56bf98fdc9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:25 -0200 Subject: [PATCH 313/470] perf symbols: Filename__read_build_id should look at .notes section too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the kernel we have more than one notes section, so the linker script combines all and puts them into a ".notes" combined section. So we need to look at both sections and also traverse them looking at multiple GElf_Nhdr entries till we find the one we want, with the build_id. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 47 ++++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 3b23c18cd36..d22030828c2 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -899,13 +899,19 @@ bool dsos__read_build_ids(void) return have_build_id; } +/* + * Align offset to 4 bytes as needed for note name and descriptor data. + */ +#define NOTE_ALIGN(n) (((n) + 3) & -4U) + int filename__read_build_id(const char *filename, void *bf, size_t size) { int fd, err = -1; GElf_Ehdr ehdr; GElf_Shdr shdr; - Elf_Data *build_id_data; + Elf_Data *data; Elf_Scn *sec; + void *ptr; Elf *elf; if (size < BUILD_ID_SIZE) @@ -928,14 +934,37 @@ int filename__read_build_id(const char *filename, void *bf, size_t size) sec = elf_section_by_name(elf, &ehdr, &shdr, ".note.gnu.build-id", NULL); - if (sec == NULL) + if (sec == NULL) { + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".notes", NULL); + if (sec == NULL) + goto out_elf_end; + } + + data = elf_getdata(sec, NULL); + if (data == NULL) goto out_elf_end; - build_id_data = elf_getdata(sec, NULL); - if (build_id_data == NULL) - goto out_elf_end; - memcpy(bf, build_id_data->d_buf + 16, BUILD_ID_SIZE); - err = BUILD_ID_SIZE; + ptr = data->d_buf; + while (ptr < (data->d_buf + data->d_size)) { + GElf_Nhdr *nhdr = ptr; + int namesz = NOTE_ALIGN(nhdr->n_namesz), + descsz = NOTE_ALIGN(nhdr->n_descsz); + const char *name; + + ptr += sizeof(*nhdr); + name = ptr; + ptr += namesz; + if (nhdr->n_type == NT_GNU_BUILD_ID && + nhdr->n_namesz == sizeof("GNU")) { + if (memcmp(name, "GNU", sizeof("GNU")) == 0) { + memcpy(bf, ptr, BUILD_ID_SIZE); + err = BUILD_ID_SIZE; + break; + } + } + ptr += descsz; + } out_elf_end: elf_end(elf); out_close: @@ -963,8 +992,8 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) break; - namesz = (nhdr.n_namesz + 3) & -4U; - descsz = (nhdr.n_descsz + 3) & -4U; + namesz = NOTE_ALIGN(nhdr.n_namesz); + descsz = NOTE_ALIGN(nhdr.n_descsz); if (nhdr.n_type == NT_GNU_BUILD_ID && nhdr.n_namesz == sizeof("GNU")) { if (read(fd, bf, namesz) != namesz) From 78075caad99dc36ec6ef5826b7a5273ea14295fc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:26 -0200 Subject: [PATCH 314/470] perf symbols: Introduce dso__build_id_equal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will be used in more places. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index d22030828c2..c324bdf8bab 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -884,6 +884,11 @@ out_close: return err; } +static bool dso__build_id_equal(const struct dso *self, u8 *build_id) +{ + return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; +} + bool dsos__read_build_ids(void) { bool have_build_id = false; @@ -1099,8 +1104,7 @@ more: sizeof(build_id)) < 0) goto more; compare_build_id: - if (memcmp(build_id, self->build_id, - sizeof(self->build_id)) != 0) + if (!dso__build_id_equal(self, build_id)) goto more; } From c338aee853db197e1855b393e6d6cc667784537f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:27 -0200 Subject: [PATCH 315/470] perf symbols: Do lazy symtab loading for the kernel & modules too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just like we do with the other DSOs. This also simplifies the kernel_maps setup process, now all that the tools need to do is to call kernel_maps__init and the maps for the modules and kernel will be created, then, later, when kernel_maps__find_symbol() is used, it will also call maps__find_symbol that already checks if the symtab was loaded, loading it if needed. Now if one does 'perf top --hide_kernel_symbols' we won't pay the price of loading the (many) symbols in /proc/kallsyms or vmlinux. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 8 +- tools/perf/builtin-report.c | 6 +- tools/perf/builtin-top.c | 35 ++----- tools/perf/util/data_map.c | 4 +- tools/perf/util/event.h | 3 + tools/perf/util/header.c | 6 +- tools/perf/util/map.c | 23 +++++ tools/perf/util/symbol.c | 173 ++++++++++++---------------------- tools/perf/util/symbol.h | 9 +- tools/perf/util/thread.h | 3 +- 10 files changed, 109 insertions(+), 161 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index b6da1476ab1..203152729a6 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -157,7 +157,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (event->header.misc & PERF_RECORD_MISC_KERNEL) { level = 'k'; - sym = kernel_maps__find_symbol(ip, &map); + sym = kernel_maps__find_symbol(ip, &map, symbol_filter); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { @@ -637,9 +637,9 @@ static int __cmd_annotate(void) exit(0); } - if (load_kernel(symbol_filter, use_modules) < 0) { - perror("failed to load kernel symbols"); - return EXIT_FAILURE; + if (kernel_maps__init(use_modules) < 0) { + pr_err("failed to create kernel maps for symbol resolution\b"); + return -1; } remap: diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0af48401f08..0d39e805be2 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -449,7 +449,7 @@ got_map: * trick of looking in the whole kernel symbol list. */ if ((long long)ip < 0) - return kernel_maps__find_symbol(ip, mapp); + return kernel_maps__find_symbol(ip, mapp, NULL); } dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); @@ -496,7 +496,7 @@ static struct symbol **resolve_callchain(struct thread *thread, struct map *map, case PERF_CONTEXT_HV: break; case PERF_CONTEXT_KERNEL: - sym = kernel_maps__find_symbol(ip, &map); + sym = kernel_maps__find_symbol(ip, &map, NULL); break; default: sym = resolve_symbol(thread, &map, &ip); @@ -716,7 +716,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (cpumode == PERF_RECORD_MISC_KERNEL) { level = 'k'; - sym = kernel_maps__find_symbol(ip, &map); + sym = kernel_maps__find_symbol(ip, &map, NULL); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 48cc1084bc3..ea49c2e9dda 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -830,6 +830,8 @@ static void handle_keypress(int c) case 'q': case 'Q': printf("exiting.\n"); + if (dump_symtab) + dsos__fprintf(stderr); exit(0); case 's': prompt_symbol(&sym_filter_entry, "Enter details symbol"); @@ -946,30 +948,6 @@ static int symbol_filter(struct map *map, struct symbol *sym) return 0; } -static int parse_symbols(void) -{ - struct dso *kernel = dsos__load_kernel(); - - if (kernel == NULL) - return -1; - - if (dsos__load_modules() < 0) - pr_debug("Couldn't read the complete list of modules, " - "continuing...\n"); - - if (dsos__load_modules_sym(symbol_filter) < 0) - pr_warning("Failed to read module symbols, continuing...\n"); - - if (dso__load_kernel_sym(kernel, symbol_filter, 1) <= 0) - pr_debug("Couldn't read the complete list of kernel symbols, " - "continuing...\n"); - - if (dump_symtab) - dsos__fprintf(stderr); - - return 0; -} - static void event__process_sample(const event_t *self, int counter) { u64 ip = self->ip.ip; @@ -1012,7 +990,7 @@ static void event__process_sample(const event_t *self, int counter) if (hide_kernel_symbols) return; - sym = kernel_maps__find_symbol(ip, &map); + sym = kernel_maps__find_symbol(ip, &map, symbol_filter); if (sym == NULL) return; break; @@ -1339,7 +1317,7 @@ static const struct option options[] = { int cmd_top(int argc, const char **argv, const char *prefix __used) { - int counter; + int counter, err; page_size = sysconf(_SC_PAGE_SIZE); @@ -1363,10 +1341,11 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (delay_secs < 1) delay_secs = 1; - parse_symbols(); + err = kernel_maps__init(true); + if (err < 0) + return err; parse_source(sym_filter_entry); - /* * User specified count overrides default frequency. */ diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index a444a2645c8..e7b6c2bea3d 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -171,8 +171,8 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, goto out_delete; err = -ENOMEM; - if (load_kernel(NULL, 1) < 0) { - pr_err("failed to load kernel symbols\n"); + if (kernel_maps__init(true) < 0) { + pr_err("failed to setup the kernel maps to resolve symbols\n"); goto out_delete; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 34c6fcb82d9..f1e39261265 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -115,10 +115,13 @@ typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); void map__init(struct map *self, u64 start, u64 end, u64 pgoff, struct dso *dso); struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); +void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); struct symbol *map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter); +void map__fixup_start(struct map *self); +void map__fixup_end(struct map *self); int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); void event__synthesize_threads(int (*process)(event_t *event)); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d5c81ebc0a8..ac3410b8e9e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -253,11 +253,11 @@ static int perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; - dsos__load_kernel(); /* - * Read the list of loaded modules with its build_ids + * Read the kernel buildid nad the list of loaded modules with + * its build_ids: */ - dsos__load_modules(); + kernel_maps__init(true); /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 94ca95073c4..09412321a80 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -75,6 +75,29 @@ out_delete: return NULL; } +void map__delete(struct map *self) +{ + free(self); +} + +void map__fixup_start(struct map *self) +{ + struct rb_node *nd = rb_first(&self->dso->syms); + if (nd != NULL) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + self->start = sym->start; + } +} + +void map__fixup_end(struct map *self) +{ + struct rb_node *nd = rb_last(&self->dso->syms); + if (nd != NULL) { + struct symbol *sym = rb_entry(nd, struct symbol, rb_node); + self->end = sym->end; + } +} + #define DSO__DELETED "(deleted)" struct symbol * diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index c324bdf8bab..cb086cbe6b1 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -27,6 +27,8 @@ static void dsos__add(struct dso *dso); static struct dso *dsos__find(const char *name); static struct map *map__new2(u64 start, struct dso *dso); static void kernel_maps__insert(struct map *map); +static int dso__load_kernel_sym(struct dso *self, struct map *map, + symbol_filter_t filter); unsigned int symbol__priv_size; static struct rb_root kernel_maps; @@ -69,12 +71,6 @@ static void kernel_maps__fixup_end(void) curr = rb_entry(nd, struct map, rb_node); prev->end = curr->start - 1; } - - nd = rb_last(&curr->dso->syms); - if (nd) { - struct symbol *sym = rb_entry(nd, struct symbol, rb_node); - curr->end = sym->end; - } } static struct symbol *symbol__new(u64 start, u64 len, const char *name) @@ -324,7 +320,7 @@ out_failure: * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) +static int kernel_maps__split_kallsyms(symbol_filter_t filter) { struct map *map = kernel_map; struct symbol *pos; @@ -340,9 +336,6 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) module = strchr(pos->name, '\t'); if (module) { - if (!use_modules) - goto delete_symbol; - *module++ = '\0'; if (strcmp(map->dso->name, module)) { @@ -382,7 +375,6 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter, int use_modules) } if (filter && filter(map, pos)) { -delete_symbol: rb_erase(&pos->rb_node, &kernel_map->dso->syms); symbol__delete(pos); } else { @@ -398,17 +390,18 @@ delete_symbol: } -static int kernel_maps__load_kallsyms(symbol_filter_t filter, int use_modules) +static int kernel_maps__load_kallsyms(symbol_filter_t filter) { if (kernel_maps__load_all_kallsyms()) return -1; dso__fixup_sym_end(kernel_map->dso); + kernel_map->dso->origin = DSO__ORIG_KERNEL; - return kernel_maps__split_kallsyms(filter, use_modules); + return kernel_maps__split_kallsyms(filter); } -static size_t kernel_maps__fprintf(FILE *fp) +size_t kernel_maps__fprintf(FILE *fp) { size_t printed = fprintf(fp, "Kernel maps:\n"); struct rb_node *nd; @@ -1042,13 +1035,17 @@ char dso__symtab_origin(const struct dso *self) int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; - char *name = malloc(size); + char *name; u8 build_id[BUILD_ID_SIZE]; int ret = -1; int fd; self->loaded = 1; + if (self->kernel) + return dso__load_kernel_sym(self, map, filter); + + name = malloc(size); if (!name) return -1; @@ -1139,7 +1136,8 @@ static void kernel_maps__insert(struct map *map) maps__insert(&kernel_maps, map); } -struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) +struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp, + symbol_filter_t filter) { struct map *map = maps__find(&kernel_maps, ip); @@ -1148,7 +1146,7 @@ struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp) if (map) { ip = map->map_ip(map, ip); - return map->dso->find_symbol(map->dso, ip); + return map__find_symbol(map, ip, filter); } return NULL; @@ -1168,28 +1166,9 @@ struct map *kernel_maps__find_by_dso_name(const char *name) return NULL; } -static int dso__load_module_sym(struct dso *self, struct map *map, - symbol_filter_t filter) -{ - int err = 0, fd = open(self->long_name, O_RDONLY); - - self->loaded = 1; - - if (fd < 0) { - pr_err("%s: cannot open %s\n", __func__, self->long_name); - return err; - } - - err = dso__load_sym(self, map, self->long_name, fd, filter, 0, 1); - close(fd); - - return err; -} - -static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) +static int dsos__set_modules_path_dir(char *dirname) { struct dirent *dent; - int nr_symbols = 0, err; DIR *dir = opendir(dirname); if (!dir) { @@ -1207,14 +1186,12 @@ static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) snprintf(path, sizeof(path), "%s/%s", dirname, dent->d_name); - err = dsos__load_modules_sym_dir(path, filter); - if (err < 0) + if (dsos__set_modules_path_dir(path) < 0) goto failure; } else { char *dot = strrchr(dent->d_name, '.'), dso_name[PATH_MAX]; struct map *map; - struct rb_node *last; char *long_name; if (dot == NULL || strcmp(dot, ".ko")) @@ -1234,36 +1211,16 @@ static int dsos__load_modules_sym_dir(char *dirname, symbol_filter_t filter) if (long_name == NULL) goto failure; dso__set_long_name(map->dso, long_name); - dso__set_basename(map->dso); - - err = dso__load_module_sym(map->dso, map, filter); - if (err < 0) - goto failure; - last = rb_last(&map->dso->syms); - if (last) { - struct symbol *sym; - /* - * We do this here as well, even having the - * symbol size found in the symtab because - * misannotated ASM symbols may have the size - * set to zero. - */ - dso__fixup_sym_end(map->dso); - - sym = rb_entry(last, struct symbol, rb_node); - map->end = map->start + sym->end; - } } - nr_symbols += err; } - return nr_symbols; + return 0; failure: closedir(dir); return -1; } -int dsos__load_modules_sym(symbol_filter_t filter) +static int dsos__set_modules_path(void) { struct utsname uts; char modules_path[PATH_MAX]; @@ -1274,7 +1231,7 @@ int dsos__load_modules_sym(symbol_filter_t filter) snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel", uts.release); - return dsos__load_modules_sym_dir(modules_path, filter); + return dsos__set_modules_path_dir(modules_path); } /* @@ -1296,7 +1253,7 @@ static struct map *map__new2(u64 start, struct dso *dso) return self; } -int dsos__load_modules(void) +static int kernel_maps__create_module_maps(void) { char *line = NULL; size_t n; @@ -1360,7 +1317,13 @@ int dsos__load_modules(void) free(line); fclose(file); - return 0; + /* + * Now that we have all sorted out, just set the ->end of all + * maps: + */ + kernel_maps__fixup_end(); + + return dsos__set_modules_path(); out_delete_line: free(line); @@ -1385,40 +1348,17 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return err; } -int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, - int use_modules) +static int dso__load_kernel_sym(struct dso *self, struct map *map, + symbol_filter_t filter) { - int err; + int err = dso__load_vmlinux(self, map, self->name, filter); - kernel_map = map__new2(0, self); - if (kernel_map == NULL) - return -1; - - kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; - - err = dso__load_vmlinux(self, kernel_map, self->name, filter); if (err <= 0) - err = kernel_maps__load_kallsyms(filter, use_modules); + err = kernel_maps__load_kallsyms(filter); if (err > 0) { - struct rb_node *node = rb_first(&self->syms); - struct symbol *sym = rb_entry(node, struct symbol, rb_node); - - kernel_map->start = sym->start; - node = rb_last(&self->syms); - sym = rb_entry(node, struct symbol, rb_node); - kernel_map->end = sym->end; - - self->origin = DSO__ORIG_KERNEL; - kernel_maps__insert(kernel_map); - /* - * Now that we have all sorted out, just set the ->end of all - * maps: - */ - kernel_maps__fixup_end(); - - if (verbose) - kernel_maps__fprintf(stderr); + map__fixup_start(map); + map__fixup_end(map); } return err; @@ -1479,46 +1419,51 @@ size_t dsos__fprintf_buildid(FILE *fp) return ret; } -struct dso *dsos__load_kernel(void) +static int kernel_maps__create_kernel_map(void) { struct dso *kernel = dso__new(vmlinux_name); if (kernel == NULL) - return NULL; + return -1; + + kernel_map = map__new2(0, kernel); + if (kernel_map == NULL) + goto out_delete_kernel_dso; + + kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; kernel->short_name = "[kernel]"; + kernel->kernel = 1; vdso = dso__new("[vdso]"); - if (!vdso) - return NULL; + if (vdso == NULL) + goto out_delete_kernel_map; if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, sizeof(kernel->build_id)) == 0) kernel->has_build_id = true; + kernel_maps__insert(kernel_map); dsos__add(kernel); dsos__add(vdso); - return kernel; + return 0; + +out_delete_kernel_map: + map__delete(kernel_map); + kernel_map = NULL; +out_delete_kernel_dso: + dso__delete(kernel); + return -1; } -int load_kernel(symbol_filter_t filter, bool use_modules) +int kernel_maps__init(bool use_modules) { - struct dso *kernel = dsos__load_kernel(); - - if (kernel == NULL) + if (kernel_maps__create_kernel_map() < 0) return -1; - if (use_modules) { - if (dsos__load_modules() < 0) - pr_warning("Failed to load list of modules in use, " - "continuing...\n"); - else if (dsos__load_modules_sym(filter) < 0) - pr_warning("Failed to read module symbols, " - "continuing...\n"); - } - - if (dso__load_kernel_sym(kernel, filter, use_modules) < 0) - pr_warning("Failed to read kernel symbols, continuing...\n"); + if (use_modules && kernel_maps__create_module_maps() < 0) + pr_warning("Failed to load list of modules in use, " + "continuing...\n"); return 0; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 3d9d346d101..7a129047c47 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -64,6 +64,7 @@ struct dso { u8 slen_calculated:1; u8 loaded:1; u8 has_build_id:1; + u8 kernel:1; unsigned char origin; u8 build_id[BUILD_ID_SIZE]; u16 long_name_len; @@ -77,12 +78,8 @@ void dso__delete(struct dso *self); struct symbol *dso__find_symbol(struct dso *self, u64 ip); -int dsos__load_modules(void); -int dsos__load_modules_sym(symbol_filter_t filter); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); -int dso__load_kernel_sym(struct dso *self, symbol_filter_t filter, - int use_modules); void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp); @@ -96,8 +93,8 @@ int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); -struct dso *dsos__load_kernel(void); -int load_kernel(symbol_filter_t filter, bool use_modules); +int kernel_maps__init(bool use_modules); +size_t kernel_maps__fprintf(FILE *fp); void symbol__init(unsigned int priv_size); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 53addd77ce8..e4b8d437725 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -26,7 +26,8 @@ size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); -struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp); +struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp, + symbol_filter_t filter); struct map *kernel_maps__find_by_dso_name(const char *name); static inline struct map *thread__find_map(struct thread *self, u64 ip) From fbd733b815a5a57d7eb0d904edc49d18fd12df5c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:28 -0200 Subject: [PATCH 316/470] perf symbols: Check vmlinux buildid MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E.g.: [root@doppio linux-2.6-tip]# perf top -v --vmlinux ../build/tip/vmlinux > /dev/null build_id in vmlinux is e96699725a47413a50c231864a8e7a8ced40a31b while expected is 18e7cc53db62a7d35e9d6f6c9ddc23017d38ee9a, ignoring it I.e. perf top was told to use a vmlinux file that is not the one currently running on the machine, it ignores it and falls back to using /proc/kallsyms. This solves many, at first, mysterious results when people have a stale vmlinux file while keeping the default of trying to use the vmlinux file in the current directory in things like 'perf annotate' where the DWARF info is required and thus we can't use just /proc/kallsyms. Modules buildids are already being checked as of the previous changeset in this series, because we are using the default dso__load routine, that will look at a series of places looking for the best file with a matching buildid, starting in the -debuginfo directories. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cb086cbe6b1..9cf6dbcd158 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1334,13 +1334,37 @@ out_failure: static int dso__load_vmlinux(struct dso *self, struct map *map, const char *vmlinux, symbol_filter_t filter) { - int err, fd = open(vmlinux, O_RDONLY); + int err = -1, fd; - self->loaded = 1; + if (self->has_build_id) { + u8 build_id[BUILD_ID_SIZE]; + if (filename__read_build_id(vmlinux, build_id, + sizeof(build_id)) < 0) { + pr_debug("No build_id in %s, ignoring it\n", vmlinux); + return -1; + } + if (!dso__build_id_equal(self, build_id)) { + char expected_build_id[BUILD_ID_SIZE * 2 + 1], + vmlinux_build_id[BUILD_ID_SIZE * 2 + 1]; + + build_id__sprintf(self->build_id, + sizeof(self->build_id), + expected_build_id); + build_id__sprintf(build_id, sizeof(build_id), + vmlinux_build_id); + pr_debug("build_id in %s is %s while expected is %s, " + "ignoring it\n", vmlinux, vmlinux_build_id, + expected_build_id); + return -1; + } + } + + fd = open(vmlinux, O_RDONLY); if (fd < 0) return -1; + self->loaded = 1; err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0); close(fd); From ef6ae724253429ac70d81e65d052f6a346d330bd Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 20 Nov 2009 20:51:29 -0200 Subject: [PATCH 317/470] perf symbols: Change the kernel DSO name if it comes from kallsyms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that the user have a clearer indication about the source of the symbols, as we only state buildid mismatches in verbose mode, because 'perf top' would overwrite such warning anyway. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258757489-5978-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 9cf6dbcd158..48f87f065a2 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -108,6 +108,8 @@ static size_t symbol__fprintf(struct symbol *self, FILE *fp) static void dso__set_long_name(struct dso *self, char *name) { + if (name == NULL) + return; self->long_name = name; self->long_name_len = strlen(name); } @@ -1377,8 +1379,11 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, { int err = dso__load_vmlinux(self, map, self->name, filter); - if (err <= 0) + if (err <= 0) { err = kernel_maps__load_kallsyms(filter); + if (err > 0) + dso__set_long_name(self, strdup("[kernel.kallsyms]")); + } if (err > 0) { map__fixup_start(map); From 453f19eea7dbad837425e9b07d84568d14898794 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:43 +0100 Subject: [PATCH 318/470] perf: Allow for custom overflow handlers in-kernel perf users might wish to have custom actions on the sample interrupt. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.222339539@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 6 ++++++ kernel/perf_event.c | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b5cdac0de37..a430ac3074a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -567,6 +567,8 @@ struct perf_pending_entry { typedef void (*perf_callback_t)(struct perf_event *, void *); +struct perf_sample_data; + /** * struct perf_event - performance event kernel representation: */ @@ -658,6 +660,10 @@ struct perf_event { struct pid_namespace *ns; u64 id; + void (*overflow_handler)(struct perf_event *event, + int nmi, struct perf_sample_data *data, + struct pt_regs *regs); + #ifdef CONFIG_EVENT_PROFILE struct event_filter *filter; #endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3852e2656bb..1dfb6cc4fde 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3710,7 +3710,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, perf_event_disable(event); } - perf_event_output(event, nmi, data, regs); + if (event->overflow_handler) + event->overflow_handler(event, nmi, data, regs); + else + perf_event_output(event, nmi, data, regs); + return ret; } @@ -4836,6 +4840,8 @@ inherit_event(struct perf_event *parent_event, if (parent_event->attr.freq) child_event->hw.sample_period = parent_event->hw.sample_period; + child_event->overflow_handler = parent_event->overflow_handler; + /* * Link it up in the child's context: */ From 0cff784ae41cc125368ae77f1c01328ae2fdc6b3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:44 +0100 Subject: [PATCH 319/470] perf: Optimize some swcounter attr.sample_period==1 paths Avoid the rather expensive perf_swevent_set_period() if we know we have to sample every single event anyway. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.299508332@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1dfb6cc4fde..8e55b440e28 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3759,16 +3759,16 @@ again: return nr; } -static void perf_swevent_overflow(struct perf_event *event, +static void perf_swevent_overflow(struct perf_event *event, u64 overflow, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { struct hw_perf_event *hwc = &event->hw; int throttle = 0; - u64 overflow; data->period = event->hw.last_period; - overflow = perf_swevent_set_period(event); + if (!overflow) + overflow = perf_swevent_set_period(event); if (hwc->interrupts == MAX_INTERRUPTS) return; @@ -3801,14 +3801,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, atomic64_add(nr, &event->count); - if (!hwc->sample_period) - return; - if (!regs) return; - if (!atomic64_add_negative(nr, &hwc->period_left)) - perf_swevent_overflow(event, nmi, data, regs); + if (!hwc->sample_period) + return; + + if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) + return perf_swevent_overflow(event, 1, nmi, data, regs); + + if (atomic64_add_negative(nr, &hwc->period_left)) + return; + + perf_swevent_overflow(event, 0, nmi, data, regs); } static int perf_swevent_is_counting(struct perf_event *event) From 81520183878a8813c71c9372de28bb70913ba549 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:45 +0100 Subject: [PATCH 320/470] perf: Optimize perf_swevent_ctx_event() Remove a rcu_read_{,un}lock() pair and a few conditionals. We can remove the rcu_read_lock() by increasing the scope of one in the calling function. We can do away with the system_state check if the machine still boots after this patch (seems to be the case). We can do away with the list_empty() check because the bare list_for_each_entry_rcu() reduces to that now that we've removed everything else. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.378188589@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8e55b440e28..cda17acfcaf 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3886,15 +3886,10 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, { struct perf_event *event; - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_swevent_match(event, type, event_id, data, regs)) perf_swevent_add(event, nr, nmi, data, regs); } - rcu_read_unlock(); } static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) @@ -3926,9 +3921,9 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, (*recursion)++; barrier(); + rcu_read_lock(); perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, nr, nmi, data, regs); - rcu_read_lock(); /* * doesn't really matter which of the child contexts the * events ends up in. From d6ff86cfb50a72df820e7e839836d55d245306fb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:46 +0100 Subject: [PATCH 321/470] perf: Optimize perf_event_task_ctx() Remove a rcu_read_{,un}lock() pair and a few conditionals. We can remove the rcu_read_lock() by increasing the scope of one in the calling function. We can do away with the system_state check if the machine still boots after this patch (seems to be the case). We can do away with the list_empty() check because the bare list_for_each_entry_rcu() reduces to that now that we've removed everything else. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.452227115@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index cda17acfcaf..2afb305944a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3267,15 +3267,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx, { struct perf_event *event; - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_task_match(event)) perf_event_task_output(event, task_event); } - rcu_read_unlock(); } static void perf_event_task_event(struct perf_task_event *task_event) @@ -3283,11 +3278,11 @@ static void perf_event_task_event(struct perf_task_event *task_event) struct perf_cpu_context *cpuctx; struct perf_event_context *ctx = task_event->task_ctx; + rcu_read_lock(); cpuctx = &get_cpu_var(perf_cpu_context); perf_event_task_ctx(&cpuctx->ctx, task_event); put_cpu_var(perf_cpu_context); - rcu_read_lock(); if (!ctx) ctx = rcu_dereference(task_event->task->perf_event_ctxp); if (ctx) From f6595f3a9680c86b6332f881a7ae2cbbcfdc8619 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:47 +0100 Subject: [PATCH 322/470] perf: Optimize perf_event_comm_ctx() Remove a rcu_read_{,un}lock() pair and a few conditionals. We can remove the rcu_read_lock() by increasing the scope of one in the calling function. We can do away with the system_state check if the machine still boots after this patch (seems to be the case). We can do away with the list_empty() check because the bare list_for_each_entry_rcu() reduces to that now that we've removed everything else. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.527608793@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2afb305944a..4deefaace90 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3374,15 +3374,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx, { struct perf_event *event; - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_comm_match(event)) perf_event_comm_output(event, comm_event); } - rcu_read_unlock(); } static void perf_event_comm_event(struct perf_comm_event *comm_event) @@ -3401,11 +3396,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; + rcu_read_lock(); cpuctx = &get_cpu_var(perf_cpu_context); perf_event_comm_ctx(&cpuctx->ctx, comm_event); put_cpu_var(perf_cpu_context); - rcu_read_lock(); /* * doesn't really matter which of the child contexts the * events ends up in. From f6d9dd237da400effb265f3554c64413f8a3e7b4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:48 +0100 Subject: [PATCH 323/470] perf: Optimize perf_event_mmap_ctx() Remove a rcu_read_{,un}lock() pair and a few conditionals. We can remove the rcu_read_lock() by increasing the scope of one in the calling function. We can do away with the system_state check if the machine still boots after this patch (seems to be the case). We can do away with the list_empty() check because the bare list_for_each_entry_rcu() reduces to that now that we've removed everything else. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.606459548@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4deefaace90..68fbf4ff688 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3493,15 +3493,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx, { struct perf_event *event; - if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) - return; - - rcu_read_lock(); list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_mmap_match(event, mmap_event)) perf_event_mmap_output(event, mmap_event); } - rcu_read_unlock(); } static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) @@ -3557,11 +3552,11 @@ got_name: mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; + rcu_read_lock(); cpuctx = &get_cpu_var(perf_cpu_context); perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); put_cpu_var(perf_cpu_context); - rcu_read_lock(); /* * doesn't really matter which of the child contexts the * events ends up in. From abf4868b8548cae18d4fe8bbfb4e207443be01be Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:49 +0100 Subject: [PATCH 324/470] perf: Fix PERF_FORMAT_GROUP scale info As Corey reported, the total_enabled and total_running times could occasionally be 0, even though there were events counted. It turns out this is because we record the times before reading the counter while the latter updates the times. This patch corrects that. While looking at this code I found that there is a lot of locking iffyness around, the following patches correct most of that. Reported-by: Corey Ashford Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.685559857@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 52 ++++++++++++++++++--------------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 68fbf4ff688..9a18ff28ea5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1784,30 +1784,15 @@ u64 perf_event_read_value(struct perf_event *event) } EXPORT_SYMBOL_GPL(perf_event_read_value); -static int perf_event_read_entry(struct perf_event *event, - u64 read_format, char __user *buf) -{ - int n = 0, count = 0; - u64 values[2]; - - values[n++] = perf_event_read_value(event); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(event); - - count = n * sizeof(u64); - - if (copy_to_user(buf, values, count)) - return -EFAULT; - - return count; -} - static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, err = -EFAULT; - u64 values[3]; + int n = 0, size = 0, ret = 0; + u64 values[5]; + u64 count; + + count = perf_event_read_value(leader); values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { @@ -1818,28 +1803,33 @@ static int perf_event_read_group(struct perf_event *event, values[n++] = leader->total_time_running + atomic64_read(&leader->child_total_time_running); } + values[n++] = count; + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(leader); size = n * sizeof(u64); if (copy_to_user(buf, values, size)) return -EFAULT; - err = perf_event_read_entry(leader, read_format, buf + size); - if (err < 0) - return err; - - size += err; + ret += size; list_for_each_entry(sub, &leader->sibling_list, group_entry) { - err = perf_event_read_entry(sub, read_format, - buf + size); - if (err < 0) - return err; + n = 0; - size += err; + values[n++] = perf_event_read_value(sub); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(sub); + + size = n * sizeof(u64); + + if (copy_to_user(buf + size, values, size)) + return -EFAULT; + + ret += size; } - return size; + return ret; } static int perf_event_read_one(struct perf_event *event, From 02ffdbc866c8b1c8644601e9aa6155700eed4c91 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:50 +0100 Subject: [PATCH 325/470] perf: Optimize perf_event_task_sched_out Remove an update_context_time() call from the perf_event_task_sched_out() path and into the branch its needed. The call was both superfluous, because __perf_event_sched_out() already does it, and wrong, because it was done without holding ctx->lock. Place it in perf_event_sync_stat(), which is the only place it is needed and which does already hold ctx->lock. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.779516394@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9a18ff28ea5..65f4dab0ce6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1120,6 +1120,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx, if (!ctx->nr_stat) return; + update_context_time(ctx); + event = list_first_entry(&ctx->event_list, struct perf_event, event_entry); @@ -1163,8 +1165,6 @@ void perf_event_task_sched_out(struct task_struct *task, if (likely(!ctx || !cpuctx->task_ctx)) return; - update_context_time(ctx); - rcu_read_lock(); parent = rcu_dereference(ctx->parent_ctx); next_ctx = next->perf_event_ctxp; From f6f83785222b0ee037f7be90731f62a649292b5e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:51 +0100 Subject: [PATCH 326/470] perf: Optimize __perf_event_read() Both callers actually have IRQs disabled, no need doing so again. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.863685796@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 65f4dab0ce6..e66f6c400d1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1517,7 +1517,6 @@ static void __perf_event_read(void *info) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; - unsigned long flags; /* * If this is a task context, we need to check whether it is @@ -1529,12 +1528,10 @@ static void __perf_event_read(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - local_irq_save(flags); if (ctx->is_active) update_context_time(ctx); event->pmu->read(event); update_event_times(event); - local_irq_restore(flags); } static u64 perf_event_read(struct perf_event *event) From 3dbebf15c5d3e265f751eec72c1538a00da4be27 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:52 +0100 Subject: [PATCH 327/470] perf: Simplify __perf_event_sync_stat Removes constraints from __perf_event_read() by leaving it with a single callsite; this callsite had ctx->lock held, the other one does not. Removes some superfluous code from __perf_event_sync_stat(). Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212508.918544317@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e66f6c400d1..af150bbcfc5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1061,8 +1061,6 @@ static int context_equiv(struct perf_event_context *ctx1, && !ctx1->pin_count && !ctx2->pin_count; } -static void __perf_event_read(void *event); - static void __perf_event_sync_stat(struct perf_event *event, struct perf_event *next_event) { @@ -1080,8 +1078,8 @@ static void __perf_event_sync_stat(struct perf_event *event, */ switch (event->state) { case PERF_EVENT_STATE_ACTIVE: - __perf_event_read(event); - break; + event->pmu->read(event); + /* fall-through */ case PERF_EVENT_STATE_INACTIVE: update_event_times(event); From 58e5ad1de3d6ad931c84f0cc8ef0655c922f30ad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:53 +0100 Subject: [PATCH 328/470] perf: Simplify __perf_event_read cpuctx is always active, task context is always active for current the previous condition verifies that if its a task context its for current, hence we can assume ctx->is_active. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212509.000272254@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index af150bbcfc5..028619dd6d0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1526,10 +1526,9 @@ static void __perf_event_read(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; - if (ctx->is_active) - update_context_time(ctx); - event->pmu->read(event); + update_context_time(ctx); update_event_times(event); + event->pmu->read(event); } static u64 perf_event_read(struct perf_event *event) From 2b8988c9f7defe319cffe0cd362a7cd356c86f62 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:54 +0100 Subject: [PATCH 329/470] perf: Fix time locking Most sites updating ctx->time and event times do so under ctx->lock, make sure they all do. This was made possible by removing the __perf_event_read() call from __perf_event_sync_stat(), which already had this lock taken. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212509.102316434@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 028619dd6d0..fdfae888a67 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1526,8 +1526,11 @@ static void __perf_event_read(void *info) if (ctx->task && cpuctx->task_ctx != ctx) return; + spin_lock(&ctx->lock); update_context_time(ctx); update_event_times(event); + spin_unlock(&ctx->lock); + event->pmu->read(event); } @@ -1541,7 +1544,13 @@ static u64 perf_event_read(struct perf_event *event) smp_call_function_single(event->oncpu, __perf_event_read, event, 1); } else if (event->state == PERF_EVENT_STATE_INACTIVE) { + struct perf_event_context *ctx = event->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + update_context_time(ctx); update_event_times(event); + spin_unlock_irqrestore(&ctx->lock, flags); } return atomic64_read(&event->count); From 59ed446f792cc07d37b1536b9c4664d14e25e425 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:55 +0100 Subject: [PATCH 330/470] perf: Fix event scaling for inherited counters Properly account the full hierarchy of counters for both the count (we already did so) and the scale times (new). Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212509.153379276@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 3 ++- kernel/perf_event.c | 48 +++++++++++++++++++++----------------- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a430ac3074a..36fe89f7264 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -782,7 +782,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, pid_t pid, perf_callback_t callback); -extern u64 perf_event_read_value(struct perf_event *event); +extern u64 perf_event_read_value(struct perf_event *event, + u64 *enabled, u64 *running); struct perf_sample_data { u64 type; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fdfae888a67..80f40da9a01 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1774,14 +1774,25 @@ static int perf_event_read_size(struct perf_event *event) return size; } -u64 perf_event_read_value(struct perf_event *event) +u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) { struct perf_event *child; u64 total = 0; + *enabled = 0; + *running = 0; + total += perf_event_read(event); - list_for_each_entry(child, &event->child_list, child_list) + *enabled += event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + *running += event->total_time_running + + atomic64_read(&event->child_total_time_running); + + list_for_each_entry(child, &event->child_list, child_list) { total += perf_event_read(child); + *enabled += child->total_time_enabled; + *running += child->total_time_running; + } return total; } @@ -1793,19 +1804,15 @@ static int perf_event_read_group(struct perf_event *event, struct perf_event *leader = event->group_leader, *sub; int n = 0, size = 0, ret = 0; u64 values[5]; - u64 count; + u64 count, enabled, running; - count = perf_event_read_value(leader); + count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = leader->total_time_enabled + - atomic64_read(&leader->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = leader->total_time_running + - atomic64_read(&leader->child_total_time_running); - } + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = enabled; + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; values[n++] = count; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); @@ -1820,7 +1827,7 @@ static int perf_event_read_group(struct perf_event *event, list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; - values[n++] = perf_event_read_value(sub); + values[n++] = perf_event_read_value(sub, &enabled, &running); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); @@ -1838,18 +1845,15 @@ static int perf_event_read_group(struct perf_event *event, static int perf_event_read_one(struct perf_event *event, u64 read_format, char __user *buf) { + u64 enabled, running; u64 values[4]; int n = 0; - values[n++] = perf_event_read_value(event); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = event->total_time_running + - atomic64_read(&event->child_total_time_running); - } + values[n++] = perf_event_read_value(event, &enabled, &running); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = enabled; + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); From 6f10581aeaa5543a3b7a8c7a87a064375ec357f8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 20 Nov 2009 22:19:56 +0100 Subject: [PATCH 331/470] perf: Fix locking for PERF_FORMAT_GROUP We should hold event->child_mutex when iterating the inherited counters, we should hold ctx->mutex when iterating siblings. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212509.251030114@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 80f40da9a01..3ede0981f4a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1782,6 +1782,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) *enabled = 0; *running = 0; + mutex_lock(&event->child_mutex); total += perf_event_read(event); *enabled += event->total_time_enabled + atomic64_read(&event->child_total_time_enabled); @@ -1793,6 +1794,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) *enabled += child->total_time_enabled; *running += child->total_time_running; } + mutex_unlock(&event->child_mutex); return total; } @@ -1802,10 +1804,12 @@ static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, ret = 0; + int n = 0, size = 0, ret = -EFAULT; + struct perf_event_context *ctx = leader->ctx; u64 values[5]; u64 count, enabled, running; + mutex_lock(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; @@ -1820,9 +1824,9 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf, values, size)) - return -EFAULT; + goto unlock; - ret += size; + ret = size; list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; @@ -1833,11 +1837,15 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); - if (copy_to_user(buf + size, values, size)) - return -EFAULT; + if (copy_to_user(buf + size, values, size)) { + ret = -EFAULT; + goto unlock; + } ret += size; } +unlock: + mutex_unlock(&ctx->mutex); return ret; } @@ -1884,12 +1892,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count) return -ENOSPC; WARN_ON_ONCE(event->ctx->parent_ctx); - mutex_lock(&event->child_mutex); if (read_format & PERF_FORMAT_GROUP) ret = perf_event_read_group(event, read_format, buf); else ret = perf_event_read_one(event, read_format, buf); - mutex_unlock(&event->child_mutex); return ret; } From 8904b18046c2f050107f6449e887e7c1142b9ab9 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 20 Nov 2009 22:19:57 +0100 Subject: [PATCH 332/470] perf_events: Fix default watermark calculation This patch fixes the default watermark value for the sampling buffer. With the existing calculation (watermark = max(PAGE_SIZE, max_size / 2)), no notification was ever received when the buffer was exactly 1 page. This was because you would never cross the threshold (there is no partial samples). In certain configuration, there was no possibilty detecting the problem because there was not enough space left to store the LOST record.In fact, there may be a more generic problem here. The kernel should ensure that there is alaways enough space to store one LOST record. This patch sets the default watermark to half the buffer size. With such limit, we are guaranteed to get a notification even with a single page buffer assuming no sample is bigger than a page. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <20091120212509.344964101@chello.nl> Signed-off-by: Ingo Molnar LKML-Reference: <1256302576-6169-1-git-send-email-eranian@gmail.com> --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3ede0981f4a..718fa939b1a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2340,7 +2340,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) } if (!data->watermark) - data->watermark = max_t(long, PAGE_SIZE, max_size / 2); + data->watermark = max_size / 2; rcu_assign_pointer(event->data, data); From 90c83218c32d7c474da810cd3c9973a43ecbcb9b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 21 Nov 2009 14:31:24 -0200 Subject: [PATCH 333/470] perf symbols: Fixup kernel_maps__fixup_end end map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We better call this routine after both the kernel and modules are loaded, because as it was if there weren't modules it would not be called, resulting in kernel_map->end remaining at zero, so no map would be found and consequently the kernel symtab wouldn't get loaded, i.e. no kernel symbols would be resolved. Also this fixes another case, that is when we _have_ modules, but the last map would have its ->end address not set before we loaded its symbols, which would never happen because ->end was not set. Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258821086-11521-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 48f87f065a2..e161a51c9fe 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -71,6 +71,12 @@ static void kernel_maps__fixup_end(void) curr = rb_entry(nd, struct map, rb_node); prev->end = curr->start - 1; } + + /* + * We still haven't the actual symbols, so guess the + * last map final address. + */ + curr->end = ~0UL; } static struct symbol *symbol__new(u64 start, u64 len, const char *name) @@ -1319,12 +1325,6 @@ static int kernel_maps__create_module_maps(void) free(line); fclose(file); - /* - * Now that we have all sorted out, just set the ->end of all - * maps: - */ - kernel_maps__fixup_end(); - return dsos__set_modules_path(); out_delete_line: @@ -1493,7 +1493,10 @@ int kernel_maps__init(bool use_modules) if (use_modules && kernel_maps__create_module_maps() < 0) pr_warning("Failed to load list of modules in use, " "continuing...\n"); - + /* + * Now that we have all the maps created, just set the ->end of them: + */ + kernel_maps__fixup_end(); return 0; } From c12e15e71d4b32da045e798ffd21cbb6197d1c65 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 21 Nov 2009 14:31:25 -0200 Subject: [PATCH 334/470] perf symbols: Old versions of elf.h don't have NT_GNU_BUILD_ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258821086-11521-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e161a51c9fe..86ec6c720f0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -12,6 +12,10 @@ #include #include +#ifndef NT_GNU_BUILD_ID +#define NT_GNU_BUILD_ID 3 +#endif + enum dso_origin { DSO__ORIG_KERNEL = 0, DSO__ORIG_JAVA_JIT, From e25613683bd5c46d3e8c8ae6416dccc9f357dcdc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 21 Nov 2009 14:31:26 -0200 Subject: [PATCH 335/470] perf trace: Read_tracing_data should die() another day MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It better propagate errors, also if we do a simple: [root@doppio linux-2.6-tip]# perf record -R -a -f sleep 3s ; perf trace [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.182 MB perf.data (~7972 samples) ] Fatal: not an trace data file [root@doppio linux-2.6-tip]# That is what is expected, right? I.e. as we didn't specify any tracepoint event via -e, it should gracefully bail out and not SEGFAULT. Signed-off-by: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258821086-11521-3-git-send-email-acme@infradead.org> [ Fixed the error messages some more ] Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-info.c | 22 +++++++++++++++------- tools/perf/util/trace-event-read.c | 4 ++-- tools/perf/util/trace-event.h | 2 +- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index 831052d4b4f..cace3559553 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c @@ -33,11 +33,11 @@ #include #include #include +#include #include "../perf.h" #include "trace-event.h" - #define VERSION "0.5" #define _STR(x) #x @@ -483,23 +483,31 @@ static struct tracepoint_path * get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) { struct tracepoint_path path, *ppath = &path; - int i; + int i, nr_tracepoints = 0; for (i = 0; i < nb_events; i++) { if (pattrs[i].type != PERF_TYPE_TRACEPOINT) continue; + ++nr_tracepoints; ppath->next = tracepoint_id_to_path(pattrs[i].config); if (!ppath->next) die("%s\n", "No memory to alloc tracepoints list"); ppath = ppath->next; } - return path.next; + return nr_tracepoints > 0 ? path.next : NULL; } -void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) + +int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) { char buf[BUFSIZ]; - struct tracepoint_path *tps; + struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); + + /* + * What? No tracepoints? No sense writing anything here, bail out. + */ + if (tps == NULL) + return -1; output_fd = fd; @@ -528,11 +536,11 @@ void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) page_size = getpagesize(); write_or_die(&page_size, 4); - tps = get_tracepoints_path(pattrs, nb_events); - read_header_files(); read_ftrace_files(tps); read_event_files(tps); read_proc_kallsyms(); read_ftrace_printk(); + + return 0; } diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 44292e06cca..342dfdd43f8 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -471,11 +471,11 @@ void trace_report(int fd) read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) - die("not an trace data file"); + die("no trace data in the file"); read_or_die(buf, 7); if (memcmp(buf, "tracing", 7) != 0) - die("not a trace file (missing tracing)"); + die("not a trace file (missing 'tracing' tag)"); version = read_string(); if (show_version) diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index f6637c2fa1f..dd51c6872a1 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -248,7 +248,7 @@ unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); -void read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); +int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); /* taken from kernel/trace/trace.h */ enum trace_flag_type { From ce71b9df8893ec954e56c5979df6da274f20f65e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 22 Nov 2009 05:26:55 +0100 Subject: [PATCH 336/470] tracing: Use the perf recursion protection from trace event When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/ftrace_event.h | 9 +--- include/linux/perf_event.h | 4 ++ include/trace/ftrace.h | 23 +++++----- kernel/perf_event.c | 68 ++++++++++++++++++++---------- kernel/trace/trace_event_profile.c | 14 +++--- kernel/trace/trace_kprobe.c | 48 ++++++++------------- kernel/trace/trace_syscalls.c | 47 ++++++++------------- 7 files changed, 106 insertions(+), 107 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 43360c1d8f7..47bbdf9c38d 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -137,13 +137,8 @@ struct ftrace_event_call { #define FTRACE_MAX_PROFILE_SIZE 2048 -struct perf_trace_buf { - char buf[FTRACE_MAX_PROFILE_SIZE]; - int recursion; -}; - -extern struct perf_trace_buf *perf_trace_buf; -extern struct perf_trace_buf *perf_trace_buf_nmi; +extern char *perf_trace_buf; +extern char *perf_trace_buf_nmi; #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 36fe89f7264..74e98b1d339 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -874,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle, extern void perf_output_end(struct perf_output_handle *handle); extern void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); +extern int perf_swevent_get_recursion_context(int **recursion); +extern void perf_swevent_put_recursion_context(int *recursion); #else static inline void perf_event_task_sched_in(struct task_struct *task, int cpu) { } @@ -902,6 +904,8 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } +static int perf_swevent_get_recursion_context(int **recursion) { return -1; } +static void perf_swevent_put_recursion_context(int *recursion) { } #endif diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 4945d1c9986..c222ef5238b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -724,16 +724,19 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ + extern int perf_swevent_get_recursion_context(int **recursion); \ + extern void perf_swevent_put_recursion_context(int *recursion); \ struct ftrace_event_call *event_call = &event_##call; \ extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ - struct perf_trace_buf *trace_buf; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ struct trace_entry *ent; \ int __entry_size; \ int __data_size; \ + char *trace_buf; \ char *raw_data; \ + int *recursion; \ int __cpu; \ int pc; \ \ @@ -749,6 +752,10 @@ static void ftrace_profile_##call(proto) \ return; \ \ local_irq_save(irq_flags); \ + \ + if (perf_swevent_get_recursion_context(&recursion)) \ + goto end_recursion; \ + \ __cpu = smp_processor_id(); \ \ if (in_nmi()) \ @@ -759,13 +766,7 @@ static void ftrace_profile_##call(proto) \ if (!trace_buf) \ goto end; \ \ - trace_buf = per_cpu_ptr(trace_buf, __cpu); \ - if (trace_buf->recursion++) \ - goto end_recursion; \ - \ - barrier(); \ - \ - raw_data = trace_buf->buf; \ + raw_data = per_cpu_ptr(trace_buf, __cpu); \ \ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ entry = (struct ftrace_raw_##call *)raw_data; \ @@ -780,9 +781,9 @@ static void ftrace_profile_##call(proto) \ perf_tp_event(event_call->id, __addr, __count, entry, \ __entry_size); \ \ -end_recursion: \ - trace_buf->recursion--; \ -end: \ +end: \ + perf_swevent_put_recursion_context(recursion); \ +end_recursion: \ local_irq_restore(irq_flags); \ \ } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 718fa939b1a..aba82272230 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3880,34 +3880,42 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, } } -static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) +/* + * Must be called with preemption disabled + */ +int perf_swevent_get_recursion_context(int **recursion) { + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + if (in_nmi()) - return &cpuctx->recursion[3]; + *recursion = &cpuctx->recursion[3]; + else if (in_irq()) + *recursion = &cpuctx->recursion[2]; + else if (in_softirq()) + *recursion = &cpuctx->recursion[1]; + else + *recursion = &cpuctx->recursion[0]; - if (in_irq()) - return &cpuctx->recursion[2]; + if (**recursion) + return -1; - if (in_softirq()) - return &cpuctx->recursion[1]; + (**recursion)++; - return &cpuctx->recursion[0]; + return 0; } -static void do_perf_sw_event(enum perf_type_id type, u32 event_id, - u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) +void perf_swevent_put_recursion_context(int *recursion) +{ + (*recursion)--; +} + +static void __do_perf_sw_event(enum perf_type_id type, u32 event_id, + u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) { - struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); - int *recursion = perf_swevent_recursion_context(cpuctx); struct perf_event_context *ctx; - - if (*recursion) - goto out; - - (*recursion)++; - barrier(); + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); rcu_read_lock(); perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, @@ -3920,12 +3928,25 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, if (ctx) perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); rcu_read_unlock(); +} - barrier(); - (*recursion)--; +static void do_perf_sw_event(enum perf_type_id type, u32 event_id, + u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + int *recursion; + preempt_disable(); + + if (perf_swevent_get_recursion_context(&recursion)) + goto out; + + __do_perf_sw_event(type, event_id, nr, nmi, data, regs); + + perf_swevent_put_recursion_context(recursion); out: - put_cpu_var(perf_cpu_context); + preempt_enable(); } void __perf_sw_event(u32 event_id, u64 nr, int nmi, @@ -4159,7 +4180,8 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, if (!regs) regs = task_pt_regs(current); - do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, + /* Trace events already protected against recursion */ + __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index e0d351b01f5..d9c60f80aa0 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -9,31 +9,33 @@ #include "trace.h" -struct perf_trace_buf *perf_trace_buf; +char *perf_trace_buf; EXPORT_SYMBOL_GPL(perf_trace_buf); -struct perf_trace_buf *perf_trace_buf_nmi; +char *perf_trace_buf_nmi; EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); +typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; + /* Count the events in use (per event id, not per instance) */ static int total_profile_count; static int ftrace_profile_enable_event(struct ftrace_event_call *event) { - struct perf_trace_buf *buf; + char *buf; int ret = -ENOMEM; if (atomic_inc_return(&event->profile_count)) return 0; if (!total_profile_count) { - buf = alloc_percpu(struct perf_trace_buf); + buf = (char *)alloc_percpu(perf_trace_t); if (!buf) goto fail_buf; rcu_assign_pointer(perf_trace_buf, buf); - buf = alloc_percpu(struct perf_trace_buf); + buf = (char *)alloc_percpu(perf_trace_t); if (!buf) goto fail_buf_nmi; @@ -79,7 +81,7 @@ int ftrace_profile_enable(int event_id) static void ftrace_profile_disable_event(struct ftrace_event_call *event) { - struct perf_trace_buf *buf, *nmi_buf; + char *buf, *nmi_buf; if (!atomic_add_negative(-1, &event->profile_count)) return; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3696476f307..22e6f68b05b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct ftrace_event_call *call = &tp->call; struct kprobe_trace_entry *entry; - struct perf_trace_buf *trace_buf; struct trace_entry *ent; int size, __size, i, pc, __cpu; unsigned long irq_flags; + char *trace_buf; char *raw_data; + int *recursion; pc = preempt_count(); __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); @@ -1227,6 +1228,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, * This also protects the rcu read side */ local_irq_save(irq_flags); + + if (perf_swevent_get_recursion_context(&recursion)) + goto end_recursion; + __cpu = smp_processor_id(); if (in_nmi()) @@ -1237,18 +1242,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, if (!trace_buf) goto end; - trace_buf = per_cpu_ptr(trace_buf, __cpu); - - if (trace_buf->recursion++) - goto end_recursion; - - /* - * Make recursion update visible before entering perf_tp_event - * so that we protect from perf recursions. - */ - barrier(); - - raw_data = trace_buf->buf; + raw_data = per_cpu_ptr(trace_buf, __cpu); /* Zero dead bytes from alignment to avoid buffer leak to userspace */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -1263,9 +1257,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tp_event(call->id, entry->ip, 1, entry, size); -end_recursion: - trace_buf->recursion--; end: + perf_swevent_put_recursion_context(recursion); +end_recursion: local_irq_restore(irq_flags); return 0; @@ -1278,10 +1272,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct ftrace_event_call *call = &tp->call; struct kretprobe_trace_entry *entry; - struct perf_trace_buf *trace_buf; struct trace_entry *ent; int size, __size, i, pc, __cpu; unsigned long irq_flags; + char *trace_buf; + int *recursion; char *raw_data; pc = preempt_count(); @@ -1297,6 +1292,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, * This also protects the rcu read side */ local_irq_save(irq_flags); + + if (perf_swevent_get_recursion_context(&recursion)) + goto end_recursion; + __cpu = smp_processor_id(); if (in_nmi()) @@ -1307,18 +1306,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, if (!trace_buf) goto end; - trace_buf = per_cpu_ptr(trace_buf, __cpu); - - if (trace_buf->recursion++) - goto end_recursion; - - /* - * Make recursion update visible before entering perf_tp_event - * so that we protect from perf recursions. - */ - barrier(); - - raw_data = trace_buf->buf; + raw_data = per_cpu_ptr(trace_buf, __cpu); /* Zero dead bytes from alignment to avoid buffer leak to userspace */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -1334,9 +1322,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, entry->args[i] = call_fetch(&tp->args[i].fetch, regs); perf_tp_event(call->id, entry->ret_ip, 1, entry, size); -end_recursion: - trace_buf->recursion--; end: + perf_swevent_put_recursion_context(recursion); +end_recursion: local_irq_restore(irq_flags); return 0; diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 51213b0aa81..0bb93487526 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -477,10 +477,11 @@ static int sys_prof_refcount_exit; static void prof_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; - struct perf_trace_buf *trace_buf; struct syscall_trace_enter *rec; unsigned long flags; + char *trace_buf; char *raw_data; + int *recursion; int syscall_nr; int size; int cpu; @@ -505,6 +506,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) /* Protect the per cpu buffer, begin the rcu read side */ local_irq_save(flags); + if (perf_swevent_get_recursion_context(&recursion)) + goto end_recursion; + cpu = smp_processor_id(); if (in_nmi()) @@ -515,18 +519,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) if (!trace_buf) goto end; - trace_buf = per_cpu_ptr(trace_buf, cpu); - - if (trace_buf->recursion++) - goto end_recursion; - - /* - * Make recursion update visible before entering perf_tp_event - * so that we protect from perf recursions. - */ - barrier(); - - raw_data = trace_buf->buf; + raw_data = per_cpu_ptr(trace_buf, cpu); /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -539,9 +532,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) (unsigned long *)&rec->args); perf_tp_event(sys_data->enter_id, 0, 1, rec, size); -end_recursion: - trace_buf->recursion--; end: + perf_swevent_put_recursion_context(recursion); +end_recursion: local_irq_restore(flags); } @@ -588,10 +581,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; - struct perf_trace_buf *trace_buf; unsigned long flags; int syscall_nr; + char *trace_buf; char *raw_data; + int *recursion; int size; int cpu; @@ -617,6 +611,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) /* Protect the per cpu buffer, begin the rcu read side */ local_irq_save(flags); + + if (perf_swevent_get_recursion_context(&recursion)) + goto end_recursion; + cpu = smp_processor_id(); if (in_nmi()) @@ -627,18 +625,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) if (!trace_buf) goto end; - trace_buf = per_cpu_ptr(trace_buf, cpu); - - if (trace_buf->recursion++) - goto end_recursion; - - /* - * Make recursion update visible before entering perf_tp_event - * so that we protect from perf recursions. - */ - barrier(); - - raw_data = trace_buf->buf; + raw_data = per_cpu_ptr(trace_buf, cpu); /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; @@ -652,9 +639,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) perf_tp_event(sys_data->exit_id, 0, 1, rec, size); -end_recursion: - trace_buf->recursion--; end: + perf_swevent_put_recursion_context(recursion); +end_recursion: local_irq_restore(flags); } From 28889bf9e2db29747d58cd47a92d727f927c3aee Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 22 Nov 2009 05:21:33 +0100 Subject: [PATCH 337/470] tracing: Forget about the NMI buffer for syscall events We are never in an NMI context when we commit a syscall trace to perf. So just forget about the nmi buffer there. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Steven Rostedt Cc: Jason Baron LKML-Reference: <1258863695-10464-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_syscalls.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 0bb93487526..41b6dd963da 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -511,10 +511,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) cpu = smp_processor_id(); - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); + trace_buf = rcu_dereference(perf_trace_buf); if (!trace_buf) goto end; @@ -617,10 +614,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) cpu = smp_processor_id(); - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); + trace_buf = rcu_dereference(perf_trace_buf); if (!trace_buf) goto end; From b3a75542d329ce4e1c66b293cefeb4429a2af043 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 22 Nov 2009 05:21:34 +0100 Subject: [PATCH 338/470] hw-breakpoints: Remove x86 specific headers from core file Remove asm/processor.h and asm/debugreg.h as these headers are not used anymore in the hw-breakpoints core file. Signed-off-by: Frederic Weisbecker Cc: Prasad LKML-Reference: <1258863695-10464-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/hw_breakpoint.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 9ea9414e0e5..b6d6fa273ee 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -40,12 +40,6 @@ #include -#include - -#ifdef CONFIG_X86 -#include -#endif - /* * Constraints data */ From 5093ebad5f2348076fdc3dac7d2358b1ad7f85f7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 22 Nov 2009 05:21:35 +0100 Subject: [PATCH 339/470] hw-breakpoints: Separate the kernel part from breakpoint headers So that we can include this header from userspace tools, like perf tools, to get the breakpoint types and len definitions. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1258863695-10464-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/hw_breakpoint.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 0b98cbf76da..4659e0c55ea 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -16,6 +16,7 @@ enum { HW_BREAKPOINT_X = 4, }; +#ifdef __KERNEL__ #ifdef CONFIG_HAVE_HW_BREAKPOINT static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) @@ -133,5 +134,6 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) } #endif /* CONFIG_HAVE_HW_BREAKPOINT */ +#endif /* __KERNEL__ */ #endif /* _LINUX_HW_BREAKPOINT_H */ From 12eac0bf0461910ae6dd7f071f156f75461a37cf Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Fri, 20 Nov 2009 12:37:17 +0900 Subject: [PATCH 340/470] perf bench: Make the mem/memcpy tests more user-friendly mem-memcpy.c uses perf event system calls to obtain CPU clocks. And it suddenly dies with BUG_ON() when it running on Linux doesn't support perf event. Also fail at calloc() can occur easily when too large length is passed. Fail of calloc() causes sudden death with assert(). These behaviours are not friendly. So I fixed the treating of errors. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <1258688237-3797-1-git-send-email-mitake@dcl.info.waseda.ac.jp> [ v2: improved a few small details ] Signed-off-by: Ingo Molnar --- tools/perf/bench/mem-memcpy.c | 37 +++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index d4f4f9806ae..5165fd1d8d2 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -22,9 +22,10 @@ #define K 1024 -static const char *length_str = "1MB"; -static const char *routine = "default"; -static int use_clock = 0; +static const char *length_str = "1MB"; +static const char *routine = "default"; +static int use_clock = 0; +static int clock_fd; static const struct option options[] = { OPT_STRING('l', "length", &length_str, "1MB", @@ -57,17 +58,19 @@ static const char * const bench_mem_memcpy_usage[] = { NULL }; -static int clock_fd; - static struct perf_event_attr clock_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES }; static void init_clock(void) { clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0); - BUG_ON(clock_fd < 0); + + if (clock_fd < 0 && errno == ENOSYS) + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); + else + BUG_ON(clock_fd < 0); } static u64 get_clock(void) @@ -104,7 +107,8 @@ int bench_mem_memcpy(int argc, const char **argv, tv_diff.tv_sec = 0; tv_diff.tv_usec = 0; length = (size_t)perf_atoll((char *)length_str); - if ((long long int)length <= 0) { + + if ((s64)length <= 0) { fprintf(stderr, "Invalid length:%s\n", length_str); return 1; } @@ -124,9 +128,12 @@ int bench_mem_memcpy(int argc, const char **argv, } dst = calloc(length, sizeof(char)); - assert(dst); + if (!dst) + die("memory allocation failed - maybe length is too large?\n"); + src = calloc(length, sizeof(char)); - assert(src); + if (!src) + die("memory allocation failed - maybe length is too large?\n"); if (bench_format == BENCH_FORMAT_DEFAULT) { printf("# Copying %s Bytes from %p to %p ...\n\n", @@ -136,8 +143,9 @@ int bench_mem_memcpy(int argc, const char **argv, if (use_clock) { init_clock(); clock_start = get_clock(); - } else + } else { BUG_ON(gettimeofday(&tv_start, NULL)); + } routines[i].fn(dst, src, length); @@ -176,9 +184,8 @@ int bench_mem_memcpy(int argc, const char **argv, printf("%lf\n", bps); break; default: - /* reaching here is something disaster */ - fprintf(stderr, "Unknown format:%d\n", bench_format); - exit(1); + /* reaching this means there's some disaster: */ + die("unknown format: %d\n", bench_format); break; } From 96b02d78a7e47cd189f6b307c5513fec6b2155dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20N=C3=A9meth?= Date: Sat, 21 Nov 2009 23:10:15 +0100 Subject: [PATCH 341/470] perf_event: Remove redundant zero fill MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The buffer is first zeroed out by memset(). Then strncpy() is used to fill the content. The strncpy() function also pads the string till the end of the specified length, which is redundant. The strncpy() does not ensures that the string will be properly closed with 0. Use strlcpy() instead. The semantic match that finds this kind of pattern is as follows: (http://coccinelle.lip6.fr/) // @@ expression buffer; expression size; expression str; @@ memset(buffer, 0, size); ... - strncpy( + strlcpy( buffer, str, sizeof(buffer) ); @@ expression buffer; expression size; expression str; @@ memset(&buffer, 0, size); ... - strncpy( + strlcpy( &buffer, str, sizeof(buffer)); @@ expression buffer; identifier field; expression size; expression str; @@ memset(buffer, 0, size); ... - strncpy( + strlcpy( buffer->field, str, sizeof(buffer->field) ); @@ expression buffer; identifier field; expression size; expression str; @@ memset(&buffer, 0, size); ... - strncpy( + strlcpy( buffer.field, str, sizeof(buffer.field)); // On strncpy() vs strlcpy() see http://www.gratisoft.us/todd/papers/strlcpy.html . Signed-off-by: Márton Németh Cc: Julia Lawall Cc: cocci@diku.dk Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <4B086547.5040100@freemail.hu> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index aba82272230..b26cb03c191 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3391,7 +3391,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) char comm[TASK_COMM_LEN]; memset(comm, 0, sizeof(comm)); - strncpy(comm, comm_event->task->comm, sizeof(comm)); + strlcpy(comm, comm_event->task->comm, sizeof(comm)); size = ALIGN(strlen(comm)+1, sizeof(u64)); comm_event->comm = comm; From f3ced7cdb24e7968a353d828955fa2daf4167e72 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 22 Nov 2009 11:58:00 +0200 Subject: [PATCH 342/470] perf kmem: Add --sort hit and --sort frag This patch adds support for "--sort hit" and "--sort frag" to the "perf kmem" tool. The former was already mentioned in the help text and the latter is useful for finding call-sites that exhibit worst case behavior for SLAB allocators. Signed-off-by: Pekka Enberg Cc: Li Zefan Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Steven Rostedt Cc: Eduard - Gabriel Munteanu Cc: linux-mm@kvack.org LKML-Reference: <1258883880-7149-1-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index f315b052f81..4145049e7bf 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -443,6 +443,15 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + if (l->hit < r->hit) + return -1; + else if (l->hit > r->hit) + return 1; + return 0; +} + static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) { if (l->bytes_alloc < r->bytes_alloc) @@ -452,6 +461,20 @@ static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + double x, y; + + x = fragmentation(l->bytes_req, l->bytes_alloc); + y = fragmentation(r->bytes_req, r->bytes_alloc); + + if (x < y) + return -1; + else if (x > y) + return 1; + return 0; +} + static int parse_sort_opt(const struct option *opt __used, const char *arg, int unset __used) { @@ -464,8 +487,12 @@ static int parse_sort_opt(const struct option *opt __used, sort_fn = ptr_cmp; else if (strcmp(arg, "call_site") == 0) sort_fn = callsite_cmp; + else if (strcmp(arg, "hit") == 0) + sort_fn = hit_cmp; else if (strcmp(arg, "bytes") == 0) sort_fn = bytes_cmp; + else if (strcmp(arg, "frag") == 0) + sort_fn = frag_cmp; else return -1; @@ -517,7 +544,7 @@ static const struct option kmem_options[] = { "stat selector, Pass 'alloc' or 'caller'.", parse_stat_opt), OPT_CALLBACK('s', "sort", NULL, "key", - "sort by key: ptr, call_site, hit, bytes", + "sort by key: ptr, call_site, hit, bytes, frag", parse_sort_opt), OPT_CALLBACK('l', "line", NULL, "num", "show n lins", From e57cfcdac6badd846a1cd831de54a1359c2d1eea Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 22 Nov 2009 12:29:44 +0200 Subject: [PATCH 343/470] perf symbols: Fix ELF header errors during "perf kmem record" The write_event() function in builtin-record.c writes out all mmap()'d DSOs including non-ELF files like GNOME resource files and such. Therefore, check for ELF_K_ELF in filename__read_build_id() before attempting to read the ELF header with gelf_getehdr(). Fixes the following error messages when running "perf kmem record": penberg@penberg-laptop:~/src/linux/tools/perf$ perf kmem record ^C[ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.753 MB perf.data (~32885 samples) ] filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. filename__read_build_id: cannot get elf header. Signed-off-by: Pekka Enberg Cc: Arnaldo Carvalho de Melo Cc: Li Zefan Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: Steven Rostedt LKML-Reference: <1258885784-11709-1-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 86ec6c720f0..f56158fb4dc 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -921,6 +921,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size) GElf_Shdr shdr; Elf_Data *data; Elf_Scn *sec; + Elf_Kind ek; void *ptr; Elf *elf; @@ -937,6 +938,10 @@ int filename__read_build_id(const char *filename, void *bf, size_t size) goto out_close; } + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out_elf_end; + if (gelf_getehdr(elf, &ehdr) == NULL) { pr_err("%s: cannot get elf header.\n", __func__); goto out_elf_end; From 645e8cc0c9f01f07f384fd522b782e5e6ae9de18 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 22 Nov 2009 12:20:19 +0100 Subject: [PATCH 344/470] perf_events: Fix modular build Fix: ERROR: "perf_swevent_put_recursion_context" [fs/ext4/ext4.ko] undefined! ERROR: "perf_swevent_get_recursion_context" [fs/ext4/ext4.ko] undefined! Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b26cb03c191..abe1ef47496 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3903,11 +3903,13 @@ int perf_swevent_get_recursion_context(int **recursion) return 0; } +EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); void perf_swevent_put_recursion_context(int *recursion) { (*recursion)--; } +EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); static void __do_perf_sw_event(enum perf_type_id type, u32 event_id, u64 nr, int nmi, From 7baed9af4bf0d7850045e36d19a43a2c76872b62 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 22 Nov 2009 13:27:27 +0200 Subject: [PATCH 345/470] perf tools: Add V=2 option to help debug config issues Make standard error show up on console when V=2 is set. Signed-off-by: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091122112726.GC13644@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d7198c54bb6..31da6bed46c 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -2,6 +2,7 @@ all:: # Define V=1 to have a more verbose compile. +# Define V=2 to have an even more verbose compile. # # Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf() # or vsnprintf() return -1 instead of number of characters which would @@ -263,7 +264,7 @@ PTHREAD_LIBS = -lpthread # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ -ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null >/dev/null 2>&1 && echo y"), y) +ifeq ($(shell sh -c "echo 'int foo(void) {char X[2]; return 3;}' | $(CC) -x c -c -Werror -fstack-protector-all - -o /dev/null "$(QUIET_STDERR)" && echo y"), y) CFLAGS := $(CFLAGS) -fstack-protector-all endif @@ -448,6 +449,11 @@ BUILTIN_OBJS += builtin-kmem.o PERFLIBS = $(LIB_FILE) +ifeq ($(V), 2) + QUIET_STDERR = ">/dev/null" +else + QUIET_STDERR = ">/dev/null 2>&1" +endif # # Platform specific tweaks # @@ -475,19 +481,19 @@ ifeq ($(uname_S),Darwin) PTHREAD_LIBS = endif -ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) -ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) +ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) +ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); endif - ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) + ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) BASIC_CFLAGS += -DLIBELF_NO_MMAP endif else msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]); endif -ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) > /dev/null 2>&1 && echo y"), y) +ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231); BASIC_CFLAGS += -DNO_LIBDWARF else @@ -499,20 +505,20 @@ endif ifdef NO_DEMANGLE BASIC_CFLAGS += -DNO_DEMANGLE else - has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd > /dev/null 2>&1 && echo y") + has_bfd := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd "$(QUIET_STDERR)" && echo y") ifeq ($(has_bfd),y) EXTLIBS += -lbfd else - has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty > /dev/null 2>&1 && echo y") + has_bfd_iberty := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty "$(QUIET_STDERR)" && echo y") ifeq ($(has_bfd_iberty),y) EXTLIBS += -lbfd -liberty else - has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz > /dev/null 2>&1 && echo y") + has_bfd_iberty_z := $(shell sh -c "(echo '\#include '; echo 'int main(void) { bfd_demangle(0, 0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -lbfd -liberty -lz "$(QUIET_STDERR)" && echo y") ifeq ($(has_bfd_iberty_z),y) EXTLIBS += -lbfd -liberty -lz else - has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty > /dev/null 2>&1 && echo y") + has_cplus_demangle := $(shell sh -c "(echo 'extern char *cplus_demangle(const char *, int);'; echo 'int main(void) { cplus_demangle(0, 0); return 0; }') | $(CC) -x c - $(ALL_CFLAGS) -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) -liberty "$(QUIET_STDERR)" && echo y") ifeq ($(has_cplus_demangle),y) EXTLIBS += -liberty BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE From b197c7ef7169bd5f11fb9d803b322d0daef7e256 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 22 Nov 2009 15:13:11 +0200 Subject: [PATCH 346/470] perf tools: Suggest static libraries as well On error, suggest installing static libraries along with shared libraries. Signed-off-by: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091122131311.GA24318@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 31da6bed46c..fce4c3f051e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -483,7 +483,7 @@ endif ifeq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) - msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]); + msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); endif ifneq ($(shell sh -c "(echo '\#include '; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y) @@ -523,7 +523,7 @@ else EXTLIBS += -liberty BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE else - msg := $(warning No bfd.h/libbfd found, install binutils-dev[el] to gain symbol demangling) + msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling) BASIC_CFLAGS += -DNO_DEMANGLE endif endif From 87f8ea4cd3680ef7f4da4391aed97abb25eae333 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 22 Nov 2009 13:21:41 -0200 Subject: [PATCH 347/470] perf symbols: Show messages about module loading only if verbose >= 1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Suggested-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258903301-20584-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index f56158fb4dc..74b5b8a1695 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1189,7 +1189,7 @@ static int dsos__set_modules_path_dir(char *dirname) DIR *dir = opendir(dirname); if (!dir) { - pr_err("%s: cannot open %s dir\n", __func__, dirname); + pr_debug("%s: cannot open %s dir\n", __func__, dirname); return -1; } @@ -1500,8 +1500,8 @@ int kernel_maps__init(bool use_modules) return -1; if (use_modules && kernel_maps__create_module_maps() < 0) - pr_warning("Failed to load list of modules in use, " - "continuing...\n"); + pr_debug("Failed to load list of modules in use, " + "continuing...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ From 50e5095afa8c2be0f35e5c0e21d5f7912340e8f2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sun, 22 Nov 2009 14:59:22 -0200 Subject: [PATCH 348/470] perf report: Do map lookups in resolve_callchain() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bug introduced in 439d473b4777de510e1322168ac6f2f377ecd5bc, making the initial map be used for all IPs, so that symbols outside this initial map would either be erroneously resolved or not resolve at all. Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1258909162-28496-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0d39e805be2..7e690f73b51 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -467,7 +467,7 @@ static int call__match(struct symbol *sym) return 0; } -static struct symbol **resolve_callchain(struct thread *thread, struct map *map, +static struct symbol **resolve_callchain(struct thread *thread, struct ip_callchain *chain, struct symbol **parent) { @@ -496,10 +496,10 @@ static struct symbol **resolve_callchain(struct thread *thread, struct map *map, case PERF_CONTEXT_HV: break; case PERF_CONTEXT_KERNEL: - sym = kernel_maps__find_symbol(ip, &map, NULL); + sym = kernel_maps__find_symbol(ip, NULL, NULL); break; default: - sym = resolve_symbol(thread, &map, &ip); + sym = resolve_symbol(thread, NULL, &ip); break; } @@ -529,7 +529,7 @@ hist_entry__add(struct thread *thread, struct map *map, struct hist_entry *he; if ((sort__has_parent || callchain) && chain) - syms = resolve_callchain(thread, map, chain, &parent); + syms = resolve_callchain(thread, chain, &parent); he = __hist_entry__add(thread, map, sym, parent, ip, count, level, &hit); From 81516c5fc83a13a1d12f466aa7e14f5fd62a63ce Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 22 Nov 2009 14:13:35 +0200 Subject: [PATCH 349/470] perf: Use default compiler mode by default gcc with no flags typically is a sane default for systems to use, and looking at the running kernel is probably broken for cross-builds anyway, so let's not do this. Add EXTRA_CFLAGS so that users can override default gcc mode if they want to. Signed-off-by: Michael S. Tsirkin Acked-by: Arjan van de Ven Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091122121335.GA24254@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index fce4c3f051e..3ef6621bf6c 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -148,6 +148,8 @@ all:: # broken, or spawning external process is slower than built-in grep perf has). # # Define LDFLAGS=-static to build a static binary. +# +# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN @@ -160,22 +162,6 @@ uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not') uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not') uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not') -# -# Add -m32 for cross-builds: -# -ifdef NO_64BIT - MBITS := -m32 -else - # - # If we're on a 64-bit kernel (except ia64), use -m64: - # - ifneq ($(uname_M),ia64) - ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M)) - MBITS := -m64 - endif - endif -endif - # CFLAGS and LDFLAGS are for the users to override from the command line. # @@ -212,7 +198,7 @@ ifndef PERF_DEBUG CFLAGS_OPTIMIZE = -O6 endif -CFLAGS = $(MBITS) -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) +CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) EXTLIBS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) ALL_LDFLAGS = $(LDFLAGS) From bfd451184d80301d1ae970b1ebffde1e9c6240f9 Mon Sep 17 00:00:00 2001 From: Simon Kaempflein Date: Mon, 16 Nov 2009 15:25:53 +1000 Subject: [PATCH 350/470] perf record, x86: Print more intelligent error message when sampling fails Print more accurate error message when "perf record" fails because there is no APIC support, on x86. Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 87f98fdb051..0e519c667e3 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -307,6 +307,12 @@ try_again: printf("\n"); error("perfcounter syscall returned with %d (%s)\n", fd[nr_cpu][counter], strerror(err)); + +#if defined(__i386__) || defined(__x86_64__) + if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) + die("No hardware sampling interrupt available. No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.\n"); +#endif + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); exit(-1); } From 6e3d8330ae2c4b2c11a9577a0130d2ecda1c610d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Nov 2009 10:19:20 +0100 Subject: [PATCH 351/470] perf events: Do not generate function trace entries in perf code Decreases perf overhead when function tracing is enabled, by about 50%. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/Makefile | 1 + kernel/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 68537e957a9..1d2cb383410 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -5,6 +5,7 @@ # Don't trace early stages of a secondary CPU boot ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_common.o = -pg +CFLAGS_REMOVE_perf_event.o = -pg endif # Make sure load_percpu_segment has no stackprotector diff --git a/kernel/Makefile b/kernel/Makefile index 17b575ec7d0..6b7ce8173df 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg +CFLAGS_REMOVE_perf_event.o = -pg endif obj-$(CONFIG_FREEZER) += freezer.o From a4234bfcf4d72a10a99176cdef007345e9c3b4aa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 23 Nov 2009 10:57:59 +0100 Subject: [PATCH 352/470] perf_events: Optimize the swcounter hotpath The structure init creates a bit memcpy, which shows up big time in perf annotate output: : ffffffff810a859d <__perf_sw_event>: 1.68 : ffffffff810a859d: 55 push %rbp 1.69 : ffffffff810a859e: 41 89 fa mov %edi,%r10d 0.01 : ffffffff810a85a1: 49 89 c9 mov %rcx,%r9 0.00 : ffffffff810a85a4: 31 c0 xor %eax,%eax 1.71 : ffffffff810a85a6: b9 16 00 00 00 mov $0x16,%ecx 0.00 : ffffffff810a85ab: 48 89 e5 mov %rsp,%rbp 0.00 : ffffffff810a85ae: 48 83 ec 60 sub $0x60,%rsp 1.52 : ffffffff810a85b2: 48 8d 7d a0 lea -0x60(%rbp),%rdi 85.20 : ffffffff810a85b6: f3 ab rep stos %eax,%es:(%rdi) None of the callees depends on the structure being pre-initialized, so only initialize ->addr. This gets rid of the memcpy overhead. Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index abe1ef47496..20df8aba8da 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3954,12 +3954,12 @@ out: void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { - struct perf_sample_data data = { - .addr = addr, - }; + struct perf_sample_data data; - do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, - &data, regs); + data.addr = addr; + data.raw = NULL; + + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); } static void perf_swevent_read(struct perf_event *event) From a66a3052e2d4c5815d7ad26887b1d4193206e691 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:23 +0100 Subject: [PATCH 353/470] perf_events: Undo copy/paste damage We had two almost identical functions, avoid the duplication. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo LKML-Reference: <20091123103819.537537928@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 37 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 20df8aba8da..e2daa10bb5c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1704,31 +1704,6 @@ static void free_event(struct perf_event *event) call_rcu(&event->rcu_head, free_event_rcu); } -/* - * Called when the last reference to the file is gone. - */ -static int perf_release(struct inode *inode, struct file *file) -{ - struct perf_event *event = file->private_data; - struct perf_event_context *ctx = event->ctx; - - file->private_data = NULL; - - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - perf_event_remove_from_context(event); - mutex_unlock(&ctx->mutex); - - mutex_lock(&event->owner->perf_event_mutex); - list_del_init(&event->owner_entry); - mutex_unlock(&event->owner->perf_event_mutex); - put_task_struct(event->owner); - - free_event(event); - - return 0; -} - int perf_event_release_kernel(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; @@ -1749,6 +1724,18 @@ int perf_event_release_kernel(struct perf_event *event) } EXPORT_SYMBOL_GPL(perf_event_release_kernel); +/* + * Called when the last reference to the file is gone. + */ +static int perf_release(struct inode *inode, struct file *file) +{ + struct perf_event *event = file->private_data; + + file->private_data = NULL; + + return perf_event_release_kernel(event); +} + static int perf_event_read_size(struct perf_event *event) { int entry = sizeof(u64); /* value */ From 6c2bfcbe58e0dd39554be88940149f5aa11e17d1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:24 +0100 Subject: [PATCH 354/470] perf_events: Fix style nits Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20091123103819.613427378@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e2daa10bb5c..1f14481c233 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -447,9 +447,8 @@ retry: * can remove the event safely, if the call above did not * succeed. */ - if (!list_empty(&event->group_entry)) { + if (!list_empty(&event->group_entry)) list_del_event(event, ctx); - } spin_unlock_irq(&ctx->lock); } @@ -1033,10 +1032,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx, update_context_time(ctx); perf_disable(); - if (ctx->nr_active) + if (ctx->nr_active) { list_for_each_entry(event, &ctx->group_list, group_entry) group_sched_out(event, cpuctx, ctx); - + } perf_enable(); out: spin_unlock(&ctx->lock); From 2e2af50b1fab3c40636839a7f439c167ae559533 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:25 +0100 Subject: [PATCH 355/470] perf_events: Disable events when we detach them If we leave the event in STATE_INACTIVE, any read of the event after the detach will increase the running count but not the enabled count and cause funny scaling artefacts. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20091123103819.689055515@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1f14481c233..fb851ec3446 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -294,6 +294,8 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) if (event->group_leader != event) event->group_leader->nr_siblings--; + event->state = PERF_EVENT_STATE_OFF; + /* * If this was a group event with sibling events then * upgrade the siblings to singleton events by adding them From 5e942bb33371254a474653123cd9e13a4c89ee44 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:26 +0100 Subject: [PATCH 356/470] perf_events: Update the context time on exit It appeared we did call update_event_times() on exit, but we failed to update the context time, which renders the former moot. Locking is a bit iffy, we call update_event_times under ctx->mutex instead of ctx->lock - the next patch fixes this. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20091123103819.764207355@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fb851ec3446..8be2574b89b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4983,6 +4983,7 @@ void perf_event_exit_task(struct task_struct *child) * the events from it. */ unclone_ctx(child_ctx); + update_context_time(child_ctx); spin_unlock_irqrestore(&child_ctx->lock, flags); /* From f67218c3e93abaf0f480bb94b53d234853ffe4de Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:27 +0100 Subject: [PATCH 357/470] perf_events: Fix __perf_event_exit_task() vs. update_event_times() locking Move the update_event_times() call in __perf_event_exit_task() into list_del_event() because that holds the proper lock (ctx->lock) and seems a more natural place to do the last time update. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker LKML-Reference: <20091123103819.842455480@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 78 ++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8be2574b89b..50f11b5f8c3 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -246,6 +246,44 @@ static void perf_unpin_context(struct perf_event_context *ctx) put_ctx(ctx); } +static inline u64 perf_clock(void) +{ + return cpu_clock(smp_processor_id()); +} + +/* + * Update the record of the current time in a context. + */ +static void update_context_time(struct perf_event_context *ctx) +{ + u64 now = perf_clock(); + + ctx->time += now - ctx->timestamp; + ctx->timestamp = now; +} + +/* + * Update the total_time_enabled and total_time_running fields for a event. + */ +static void update_event_times(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + u64 run_end; + + if (event->state < PERF_EVENT_STATE_INACTIVE || + event->group_leader->state < PERF_EVENT_STATE_INACTIVE) + return; + + event->total_time_enabled = ctx->time - event->tstamp_enabled; + + if (event->state == PERF_EVENT_STATE_INACTIVE) + run_end = event->tstamp_stopped; + else + run_end = ctx->time; + + event->total_time_running = run_end - event->tstamp_running; +} + /* * Add a event from the lists for its context. * Must be called with ctx->mutex and ctx->lock held. @@ -294,6 +332,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) if (event->group_leader != event) event->group_leader->nr_siblings--; + update_event_times(event); event->state = PERF_EVENT_STATE_OFF; /* @@ -454,44 +493,6 @@ retry: spin_unlock_irq(&ctx->lock); } -static inline u64 perf_clock(void) -{ - return cpu_clock(smp_processor_id()); -} - -/* - * Update the record of the current time in a context. - */ -static void update_context_time(struct perf_event_context *ctx) -{ - u64 now = perf_clock(); - - ctx->time += now - ctx->timestamp; - ctx->timestamp = now; -} - -/* - * Update the total_time_enabled and total_time_running fields for a event. - */ -static void update_event_times(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - u64 run_end; - - if (event->state < PERF_EVENT_STATE_INACTIVE || - event->group_leader->state < PERF_EVENT_STATE_INACTIVE) - return; - - event->total_time_enabled = ctx->time - event->tstamp_enabled; - - if (event->state == PERF_EVENT_STATE_INACTIVE) - run_end = event->tstamp_stopped; - else - run_end = ctx->time; - - event->total_time_running = run_end - event->tstamp_running; -} - /* * Update total_time_enabled and total_time_running for all events in a group. */ @@ -4931,7 +4932,6 @@ __perf_event_exit_task(struct perf_event *child_event, { struct perf_event *parent_event; - update_event_times(child_event); perf_event_remove_from_context(child_event); parent_event = child_event->parent; From 4ed7c92d68a5387ba5f7030dc76eab03558e27f5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 11:37:29 +0100 Subject: [PATCH 358/470] perf_events: Undo some recursion damage Make perf_swevent_get_recursion_context return a context number and disable preemption. This could be used to remove the IRQ disable from the trace bit and index the per-cpu buffer with. Signed-off-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: Paul Mackerras LKML-Reference: <20091123103819.993226816@chello.nl> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 8 ++-- include/trace/ftrace.h | 17 +++++---- kernel/perf_event.c | 71 ++++++++++++++++------------------- kernel/trace/trace_kprobe.c | 14 ++++--- kernel/trace/trace_syscalls.c | 14 ++++--- 5 files changed, 61 insertions(+), 63 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 74e98b1d339..43adbd7f001 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -874,8 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle, extern void perf_output_end(struct perf_output_handle *handle); extern void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); -extern int perf_swevent_get_recursion_context(int **recursion); -extern void perf_swevent_put_recursion_context(int *recursion); +extern int perf_swevent_get_recursion_context(void); +extern void perf_swevent_put_recursion_context(int rctx); #else static inline void perf_event_task_sched_in(struct task_struct *task, int cpu) { } @@ -904,8 +904,8 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_init(void) { } -static int perf_swevent_get_recursion_context(int **recursion) { return -1; } -static void perf_swevent_put_recursion_context(int *recursion) { } +static inline int perf_swevent_get_recursion_context(void) { return -1; } +static inline void perf_swevent_put_recursion_context(int rctx) { } #endif diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index c222ef5238b..c3417c13e3e 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -724,8 +724,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ static void ftrace_profile_##call(proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ - extern int perf_swevent_get_recursion_context(int **recursion); \ - extern void perf_swevent_put_recursion_context(int *recursion); \ + extern int perf_swevent_get_recursion_context(void); \ + extern void perf_swevent_put_recursion_context(int rctx); \ struct ftrace_event_call *event_call = &event_##call; \ extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ @@ -736,8 +736,8 @@ static void ftrace_profile_##call(proto) \ int __data_size; \ char *trace_buf; \ char *raw_data; \ - int *recursion; \ int __cpu; \ + int rctx; \ int pc; \ \ pc = preempt_count(); \ @@ -753,8 +753,9 @@ static void ftrace_profile_##call(proto) \ \ local_irq_save(irq_flags); \ \ - if (perf_swevent_get_recursion_context(&recursion)) \ - goto end_recursion; \ + rctx = perf_swevent_get_recursion_context(); \ + if (rctx < 0) \ + goto end_recursion; \ \ __cpu = smp_processor_id(); \ \ @@ -781,9 +782,9 @@ static void ftrace_profile_##call(proto) \ perf_tp_event(event_call->id, __addr, __count, entry, \ __entry_size); \ \ -end: \ - perf_swevent_put_recursion_context(recursion); \ -end_recursion: \ +end: \ + perf_swevent_put_recursion_context(rctx); \ +end_recursion: \ local_irq_restore(irq_flags); \ \ } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 50f11b5f8c3..0b0d5f72fe7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3869,45 +3869,50 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, } } -/* - * Must be called with preemption disabled - */ -int perf_swevent_get_recursion_context(int **recursion) +int perf_swevent_get_recursion_context(void) { - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + int rctx; if (in_nmi()) - *recursion = &cpuctx->recursion[3]; + rctx = 3; else if (in_irq()) - *recursion = &cpuctx->recursion[2]; + rctx = 2; else if (in_softirq()) - *recursion = &cpuctx->recursion[1]; + rctx = 1; else - *recursion = &cpuctx->recursion[0]; + rctx = 0; - if (**recursion) + if (cpuctx->recursion[rctx]) { + put_cpu_var(perf_cpu_context); return -1; + } - (**recursion)++; + cpuctx->recursion[rctx]++; + barrier(); - return 0; + return rctx; } EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); -void perf_swevent_put_recursion_context(int *recursion) +void perf_swevent_put_recursion_context(int rctx) { - (*recursion)--; + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + barrier(); + cpuctx->recursion[rctx]++; + put_cpu_var(perf_cpu_context); } EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); -static void __do_perf_sw_event(enum perf_type_id type, u32 event_id, - u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) +static void do_perf_sw_event(enum perf_type_id type, u32 event_id, + u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) { + struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; - struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); + cpuctx = &__get_cpu_var(perf_cpu_context); rcu_read_lock(); perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, nr, nmi, data, regs); @@ -3921,34 +3926,22 @@ static void __do_perf_sw_event(enum perf_type_id type, u32 event_id, rcu_read_unlock(); } -static void do_perf_sw_event(enum perf_type_id type, u32 event_id, - u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - int *recursion; - - preempt_disable(); - - if (perf_swevent_get_recursion_context(&recursion)) - goto out; - - __do_perf_sw_event(type, event_id, nr, nmi, data, regs); - - perf_swevent_put_recursion_context(recursion); -out: - preempt_enable(); -} - void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct perf_sample_data data; + int rctx; + + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) + return; data.addr = addr; data.raw = NULL; do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); + + perf_swevent_put_recursion_context(rctx); } static void perf_swevent_read(struct perf_event *event) @@ -4172,7 +4165,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, regs = task_pt_regs(current); /* Trace events already protected against recursion */ - __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, + do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 22e6f68b05b..79ce6a2bd74 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1213,7 +1213,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, unsigned long irq_flags; char *trace_buf; char *raw_data; - int *recursion; + int rctx; pc = preempt_count(); __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); @@ -1229,7 +1229,8 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, */ local_irq_save(irq_flags); - if (perf_swevent_get_recursion_context(&recursion)) + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) goto end_recursion; __cpu = smp_processor_id(); @@ -1258,7 +1259,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, perf_tp_event(call->id, entry->ip, 1, entry, size); end: - perf_swevent_put_recursion_context(recursion); + perf_swevent_put_recursion_context(rctx); end_recursion: local_irq_restore(irq_flags); @@ -1276,8 +1277,8 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, int size, __size, i, pc, __cpu; unsigned long irq_flags; char *trace_buf; - int *recursion; char *raw_data; + int rctx; pc = preempt_count(); __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); @@ -1293,7 +1294,8 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, */ local_irq_save(irq_flags); - if (perf_swevent_get_recursion_context(&recursion)) + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) goto end_recursion; __cpu = smp_processor_id(); @@ -1323,7 +1325,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, perf_tp_event(call->id, entry->ret_ip, 1, entry, size); end: - perf_swevent_put_recursion_context(recursion); + perf_swevent_put_recursion_context(rctx); end_recursion: local_irq_restore(irq_flags); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 41b6dd963da..9189cbe8607 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -481,8 +481,8 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) unsigned long flags; char *trace_buf; char *raw_data; - int *recursion; int syscall_nr; + int rctx; int size; int cpu; @@ -506,7 +506,8 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) /* Protect the per cpu buffer, begin the rcu read side */ local_irq_save(flags); - if (perf_swevent_get_recursion_context(&recursion)) + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) goto end_recursion; cpu = smp_processor_id(); @@ -530,7 +531,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) perf_tp_event(sys_data->enter_id, 0, 1, rec, size); end: - perf_swevent_put_recursion_context(recursion); + perf_swevent_put_recursion_context(rctx); end_recursion: local_irq_restore(flags); } @@ -582,7 +583,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) int syscall_nr; char *trace_buf; char *raw_data; - int *recursion; + int rctx; int size; int cpu; @@ -609,7 +610,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) /* Protect the per cpu buffer, begin the rcu read side */ local_irq_save(flags); - if (perf_swevent_get_recursion_context(&recursion)) + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) goto end_recursion; cpu = smp_processor_id(); @@ -634,7 +636,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) perf_tp_event(sys_data->exit_id, 0, 1, rec, size); end: - perf_swevent_put_recursion_context(recursion); + perf_swevent_put_recursion_context(rctx); end_recursion: local_irq_restore(flags); } From acd1d7c1f8f3d848a3c5327dc09f8c1efb971678 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Nov 2009 15:00:36 +0100 Subject: [PATCH 359/470] perf_events: Restore sanity to scaling land It is quite possible to call update_event_times() on a context that isn't actually running and thereby confuse the thing. perf stat was reporting !100% scale values for software counters (2e2af50b perf_events: Disable events when we detach them, solved the worst of that, but there was still some left). The thing that happens is that because we are not self-reaping (we have a caring parent) there is a time between the last schedule (out) and having do_exit() called which will detach the events. This period would be accounted as enabled,!running because the event->state==INACTIVE, even though !event->ctx->is_active. Similar issues could have been observed by calling read() on a event while the attached task was not scheduled in. Solve this by teaching update_event_times() about ctx->is_active. Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Mike Galbraith Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1258984836.4531.480.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0b0d5f72fe7..0aafe85362f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -274,7 +274,12 @@ static void update_event_times(struct perf_event *event) event->group_leader->state < PERF_EVENT_STATE_INACTIVE) return; - event->total_time_enabled = ctx->time - event->tstamp_enabled; + if (ctx->is_active) + run_end = ctx->time; + else + run_end = event->tstamp_stopped; + + event->total_time_enabled = run_end - event->tstamp_enabled; if (event->state == PERF_EVENT_STATE_INACTIVE) run_end = event->tstamp_stopped; From ba6909b719a5ccc0c8100d2895bb7ff557b2eeae Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 23 Nov 2009 21:17:13 +0530 Subject: [PATCH 360/470] hw-breakpoint: Attribute authorship of hw-breakpoint related files Attribute authorship to developers of hw-breakpoint related files. Signed-off-by: K.Prasad Cc: Alan Stern Cc: Frederic Weisbecker LKML-Reference: <20091123154713.GA5593@in.ibm.com> [ v2: moved it to latest -tip ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/hw_breakpoint.c | 4 ++++ kernel/hw_breakpoint.c | 4 ++++ samples/hw_breakpoint/data_breakpoint.c | 2 ++ 3 files changed, 10 insertions(+) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 752daebe91c..4d267fb7782 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -16,6 +16,10 @@ * Copyright (C) 2007 Alan Stern * Copyright (C) 2009 IBM Corporation * Copyright (C) 2009 Frederic Weisbecker + * + * Authors: Alan Stern + * K.Prasad + * Frederic Weisbecker */ /* diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index b6d6fa273ee..c16662268d7 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -18,6 +18,10 @@ * Copyright (C) 2009, Frederic Weisbecker * * Thanks to Ingo Molnar for his many suggestions. + * + * Authors: Alan Stern + * K.Prasad + * Frederic Weisbecker */ /* diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 5bc9819a819..95063818bcf 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -23,6 +23,8 @@ * that variable. * * Copyright (C) IBM Corporation, 2009 + * + * Author: K.Prasad */ #include /* Needed by all modules */ #include /* Needed for KERN_INFO */ From e6db4876575f3fdd5b1df2cbff826df95ab9af6a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 23 Nov 2009 15:42:32 +0100 Subject: [PATCH 361/470] hw-breakpoints: Include only linux/perf_event.h from kernel part of bp headers As userspace only needs the breakpoints enum types from the breakpoints headers. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1258987355-8751-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/hw_breakpoint.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 4659e0c55ea..76a48ab9a81 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -1,8 +1,6 @@ #ifndef _LINUX_HW_BREAKPOINT_H #define _LINUX_HW_BREAKPOINT_H -#include - enum { HW_BREAKPOINT_LEN_1 = 1, HW_BREAKPOINT_LEN_2 = 2, @@ -19,6 +17,8 @@ enum { #ifdef __KERNEL__ #ifdef CONFIG_HAVE_HW_BREAKPOINT +#include + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; From fdf6bc95229821e3d9405eba28925b76e92b74d0 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 23 Nov 2009 15:42:33 +0100 Subject: [PATCH 362/470] hw-breakpoints: Check the breakpoint params from perf tools Perf tools create perf events as disabled in the beginning. Breakpoints are then considered like ptrace temporary breakpoints, only meant to reserve a breakpoint slot until we get all the necessary informations from the user. In this case, we don't check the address that is breakpointed as it is NULL in the ptrace case. But perf tools don't have the same purpose, events are created disabled to wait for all events to be created before enabling all of them. We want to check the breakpoint parameters in this case. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1258987355-8751-2-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/hw_breakpoint.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index c16662268d7..06d372fc026 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -267,7 +267,16 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) if (ret) return ret; - if (!bp->attr.disabled) + /* + * Ptrace breakpoints can be temporary perf events only + * meant to reserve a slot. In this case, it is created disabled and + * we don't want to check the params right now (as we put a null addr) + * But perf tools create events as disabled and we want to check + * the params for them. + * This is a quick hack that will be removed soon, once we remove + * the tmp breakpoints from ptrace + */ + if (!bp->attr.disabled || bp->callback == perf_bp_event) ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); return ret; From f5ffe02e5046003ae7e2ce70d3d1c2a73331268b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 23 Nov 2009 15:42:34 +0100 Subject: [PATCH 363/470] perf: Add kernel side syscall events support for breakpoints Add the remaining necessary bits to support breakpoints created through perf syscall. We don't use the software counter interface as: - We don't need to check against recursion, this is already done in hardware breakpoints arch level. - We already know the perf event we are dealing with when the event is to be committed. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1258987355-8751-3-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0aafe85362f..9425c9600c8 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3831,6 +3831,20 @@ static int perf_swevent_is_counting(struct perf_event *event) static int perf_tp_event_match(struct perf_event *event, struct perf_sample_data *data); +static int perf_exclude_event(struct perf_event *event, + struct pt_regs *regs) +{ + if (regs) { + if (event->attr.exclude_user && user_mode(regs)) + return 1; + + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + } + + return 0; +} + static int perf_swevent_match(struct perf_event *event, enum perf_type_id type, u32 event_id, @@ -3842,16 +3856,12 @@ static int perf_swevent_match(struct perf_event *event, if (event->attr.type != type) return 0; + if (event->attr.config != event_id) return 0; - if (regs) { - if (event->attr.exclude_user && user_mode(regs)) - return 0; - - if (event->attr.exclude_kernel && !user_mode(regs)) - return 0; - } + if (perf_exclude_event(event, regs)) + return 0; if (event->attr.type == PERF_TYPE_TRACEPOINT && !perf_tp_event_match(event, data)) @@ -4282,9 +4292,15 @@ static const struct pmu *bp_perf_event_init(struct perf_event *bp) return &perf_ops_bp; } -void perf_bp_event(struct perf_event *bp, void *regs) +void perf_bp_event(struct perf_event *bp, void *data) { - /* TODO */ + struct perf_sample_data sample; + struct pt_regs *regs = data; + + sample.addr = bp->attr.bp_addr; + + if (!perf_exclude_event(bp, regs)) + perf_swevent_add(bp, 1, 1, &sample, regs); } #else static void bp_perf_event_destroy(struct perf_event *event) From 1b290d670ffa883b7e062177463a8efd00eaa2c1 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 23 Nov 2009 15:42:35 +0100 Subject: [PATCH 364/470] perf tools: Add support for breakpoint events in perf tools Add the breakpoint events support with this new sysnopsis: mem:addr[:access] Where addr is a raw addr value in the kernel and access can be either [r][w][x] Example to profile tasklist_lock: $ grep tasklist_lock /proc/kallsyms ffffffff8189c000 D tasklist_lock $ perf record -e mem:0xffffffff8189c000:rw -a -f -c 1 $ perf report # Samples: 62 # # Overhead Command Shared Object Symbol # ........ ............... ............. ...... # 29.03% swapper [kernel] [k] _raw_read_trylock 29.03% swapper [kernel] [k] _raw_read_unlock 19.35% init [kernel] [k] _raw_read_trylock 19.35% init [kernel] [k] _raw_read_unlock 1.61% events/0 [kernel] [k] _raw_read_trylock 1.61% events/0 [kernel] [k] _raw_read_unlock Coming soon: - Support for symbols in the event definition. - Default period to 1 for breakpoint events because these are not high frequency events. The same thing is needed for trace events. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1258987355-8751-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad --- tools/perf/Documentation/perf-record.txt | 16 +++-- tools/perf/util/parse-events.c | 84 +++++++++++++++++++++++- 2 files changed, 95 insertions(+), 5 deletions(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 0ff23de9e45..fc46c0b40f6 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -26,11 +26,19 @@ OPTIONS -e:: --event=:: - Select the PMU event. Selection can be a symbolic event name - (use 'perf list' to list all events) or a raw PMU - event (eventsel+umask) in the form of rNNN where NNN is a - hexadecimal event descriptor. + Select the PMU event. Selection can be: + - a symbolic event name (use 'perf list' to list all events) + + - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a + hexadecimal event descriptor. + + - a hardware breakpoint event in the form of '\mem:addr[:access]' + where addr is the address in memory you want to break in. + Access is the memory access type (read, write, execute) it can + be passed as follows: '\mem:addr[:[r][w][x]]'. + If you want to profile read-write accesses in 0x1000, just set + 'mem:0x1000:rw'. -a:: System-wide collection. diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 0faf4f2bb5c..07002746927 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1,4 +1,4 @@ - +#include "../../../include/linux/hw_breakpoint.h" #include "util.h" #include "../perf.h" #include "parse-options.h" @@ -540,6 +540,81 @@ static enum event_result parse_tracepoint_event(const char **strp, attr, strp); } +static enum event_result +parse_breakpoint_type(const char *type, const char **strp, + struct perf_event_attr *attr) +{ + int i; + + for (i = 0; i < 3; i++) { + if (!type[i]) + break; + + switch (type[i]) { + case 'r': + attr->bp_type |= HW_BREAKPOINT_R; + break; + case 'w': + attr->bp_type |= HW_BREAKPOINT_W; + break; + case 'x': + attr->bp_type |= HW_BREAKPOINT_X; + break; + default: + return EVT_FAILED; + } + } + if (!attr->bp_type) /* Default */ + attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; + + *strp = type + i; + + return EVT_HANDLED; +} + +static enum event_result +parse_breakpoint_event(const char **strp, struct perf_event_attr *attr) +{ + const char *target; + const char *type; + char *endaddr; + u64 addr; + enum event_result err; + + target = strchr(*strp, ':'); + if (!target) + return EVT_FAILED; + + if (strncmp(*strp, "mem", target - *strp) != 0) + return EVT_FAILED; + + target++; + + addr = strtoull(target, &endaddr, 0); + if (target == endaddr) + return EVT_FAILED; + + attr->bp_addr = addr; + *strp = endaddr; + + type = strchr(target, ':'); + + /* If no type is defined, just rw as default */ + if (!type) { + attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; + } else { + err = parse_breakpoint_type(++type, strp, attr); + if (err == EVT_FAILED) + return EVT_FAILED; + } + + /* We should find a nice way to override the access type */ + attr->bp_len = HW_BREAKPOINT_LEN_4; + attr->type = PERF_TYPE_BREAKPOINT; + + return EVT_HANDLED; +} + static int check_events(const char *str, unsigned int i) { int n; @@ -673,6 +748,10 @@ parse_event_symbols(const char **str, struct perf_event_attr *attr) if (ret != EVT_FAILED) goto modifier; + ret = parse_breakpoint_event(str, attr); + if (ret != EVT_FAILED) + goto modifier; + fprintf(stderr, "invalid or unsupported event: '%s'\n", *str); fprintf(stderr, "Run 'perf list' for a list of valid events\n"); return EVT_FAILED; @@ -859,6 +938,9 @@ void print_events(void) "rNNN"); printf("\n"); + printf(" %-42s [hardware breakpoint]\n", "mem:[:access]"); + printf("\n"); + print_tracepoint_events(); exit(129); From 0444c9bd0cf4e0eb946a7fcaf34765accfa9404a Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 20 Nov 2009 14:03:05 +0000 Subject: [PATCH 365/470] x86: Tighten conditionals on MCE related statistics irq_thermal_count is only being maintained when X86_THERMAL_VECTOR, and both X86_THERMAL_VECTOR and X86_MCE_THRESHOLD don't need extra wrapping in X86_MCE conditionals. Signed-off-by: Jan Beulich Cc: Hidetoshi Seto Cc: Yong Wang Cc: Suresh Siddha Cc: Andi Kleen Cc: Borislav Petkov Cc: Arjan van de Ven LKML-Reference: <4B06AFA902000078000211F8@vpn.id2.novell.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hardirq.h | 6 +++--- arch/x86/kernel/irq.c | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 82e3e8f0104..108eb6fd1ae 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -20,11 +20,11 @@ typedef struct { unsigned int irq_call_count; unsigned int irq_tlb_count; #endif -#ifdef CONFIG_X86_MCE +#ifdef CONFIG_X86_THERMAL_VECTOR unsigned int irq_thermal_count; -# ifdef CONFIG_X86_MCE_THRESHOLD +#endif +#ifdef CONFIG_X86_MCE_THRESHOLD unsigned int irq_threshold_count; -# endif #endif } ____cacheline_aligned irq_cpustat_t; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 04bbd527856..19212cb0155 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -92,17 +92,17 @@ static int show_other_interrupts(struct seq_file *p, int prec) seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); seq_printf(p, " TLB shootdowns\n"); #endif -#ifdef CONFIG_X86_MCE +#ifdef CONFIG_X86_THERMAL_VECTOR seq_printf(p, "%*s: ", prec, "TRM"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); seq_printf(p, " Thermal event interrupts\n"); -# ifdef CONFIG_X86_MCE_THRESHOLD +#endif +#ifdef CONFIG_X86_MCE_THRESHOLD seq_printf(p, "%*s: ", prec, "THR"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); seq_printf(p, " Threshold APIC interrupts\n"); -# endif #endif #ifdef CONFIG_X86_MCE seq_printf(p, "%*s: ", prec, "MCE"); @@ -194,11 +194,11 @@ u64 arch_irq_stat_cpu(unsigned int cpu) sum += irq_stats(cpu)->irq_call_count; sum += irq_stats(cpu)->irq_tlb_count; #endif -#ifdef CONFIG_X86_MCE +#ifdef CONFIG_X86_THERMAL_VECTOR sum += irq_stats(cpu)->irq_thermal_count; -# ifdef CONFIG_X86_MCE_THRESHOLD +#endif +#ifdef CONFIG_X86_MCE_THRESHOLD sum += irq_stats(cpu)->irq_threshold_count; -# endif #endif #ifdef CONFIG_X86_MCE sum += per_cpu(mce_exception_count, cpu); From cc612d8199089413719397c9d92e5823da578eac Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 23 Nov 2009 16:39:10 -0200 Subject: [PATCH 366/470] perf symbols: Look for vmlinux in more places MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we can check the buildid to see if it really matches, this can be done safely: vmlinux /boot/vmlinux /boot/vmlinux- /lib/modules//build/vmlinux /usr/lib/debug/lib/modules/%s/vmlinux More can be added - if you know about distros that put the vmlinux somewhere else please let us know. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259001550-8194-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 3 +- tools/perf/builtin-kmem.c | 2 +- tools/perf/builtin-report.c | 6 +- tools/perf/builtin-sched.c | 3 +- tools/perf/builtin-top.c | 3 +- tools/perf/builtin-trace.c | 3 +- tools/perf/util/data_map.c | 4 +- tools/perf/util/data_map.h | 2 + tools/perf/util/header.c | 2 +- tools/perf/util/symbol.c | 113 ++++++++++++++++++++++++++++++---- tools/perf/util/symbol.h | 4 +- 11 files changed, 122 insertions(+), 23 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 203152729a6..6b13a1ecf1e 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -37,6 +37,7 @@ static bool use_modules; static unsigned long page_size; static unsigned long mmap_window = 32; +const char *vmlinux_name; struct sym_hist { u64 sum; @@ -637,7 +638,7 @@ static int __cmd_annotate(void) exit(0); } - if (kernel_maps__init(use_modules) < 0) { + if (kernel_maps__init(vmlinux_name, true, use_modules) < 0) { pr_err("failed to create kernel maps for symbol resolution\b"); return -1; } diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 4145049e7bf..5d8aeae5000 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -291,7 +291,7 @@ static int read_events(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, 0, 0, + return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0, &cwdlen, &cwd); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 7e690f73b51..fe474b7f8ad 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -52,6 +52,7 @@ static char *pretty_printing_style = default_pretty_printing_style; static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; +const char *vmlinux_name; static char *cwd; static int cwdlen; @@ -925,8 +926,9 @@ static int __cmd_report(void) register_perf_file_handler(&file_handler); - ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths, - &cwdlen, &cwd); + ret = mmap_dispatch_perf_file(&header, input_name, vmlinux_name, + !vmlinux_name, force, + full_paths, &cwdlen, &cwd); if (ret) return ret; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index df44b756cec..260f57a72ee 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1718,7 +1718,8 @@ static int read_events(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); + return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0, + &cwdlen, &cwd); } static void print_bad_events(void) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ea49c2e9dda..eef9caab6ee 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -79,6 +79,7 @@ static int dump_symtab = 0; static bool hide_kernel_symbols = false; static bool hide_user_symbols = false; static struct winsize winsize; +const char *vmlinux_name; static const char *graph_line = "_____________________________________________________________________" "_____________________________________________________________________"; @@ -1341,7 +1342,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (delay_secs < 1) delay_secs = 1; - err = kernel_maps__init(true); + err = kernel_maps__init(vmlinux_name, !vmlinux_name, true); if (err < 0) return err; parse_source(sym_filter_entry); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d042d656c56..b71198e5dc1 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -131,7 +131,8 @@ static int __cmd_trace(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); + return mmap_dispatch_perf_file(&header, input_name, NULL, false, + 0, 0, &cwdlen, &cwd); } static const char * const annotate_usage[] = { diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index e7b6c2bea3d..f318d19b256 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -101,6 +101,8 @@ out: int mmap_dispatch_perf_file(struct perf_header **pheader, const char *input_name, + const char *vmlinux_name, + bool try_vmlinux_path, int force, int full_paths, int *cwdlen, @@ -171,7 +173,7 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, goto out_delete; err = -ENOMEM; - if (kernel_maps__init(true) < 0) { + if (kernel_maps__init(vmlinux_name, try_vmlinux_path, true) < 0) { pr_err("failed to setup the kernel maps to resolve symbols\n"); goto out_delete; } diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index ae036ecd762..3f0d21b3819 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -23,6 +23,8 @@ struct perf_file_handler { void register_perf_file_handler(struct perf_file_handler *handler); int mmap_dispatch_perf_file(struct perf_header **pheader, const char *input_name, + const char *vmlinux_name, + bool try_vmlinux_path, int force, int full_paths, int *cwdlen, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index ac3410b8e9e..1332f8ec04a 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -257,7 +257,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) * Read the kernel buildid nad the list of loaded modules with * its build_ids: */ - kernel_maps__init(true); + kernel_maps__init(NULL, false, true); /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 74b5b8a1695..44d81d5ae8c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -34,6 +34,8 @@ static void kernel_maps__insert(struct map *map); static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); unsigned int symbol__priv_size; +static int vmlinux_path__nr_entries; +static char **vmlinux_path; static struct rb_root kernel_maps; @@ -1386,15 +1388,43 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter) { - int err = dso__load_vmlinux(self, map, self->name, filter); + int err; + bool is_kallsyms; + if (vmlinux_path != NULL) { + int i; + pr_debug("Looking at the vmlinux_path (%d entries long)\n", + vmlinux_path__nr_entries); + for (i = 0; i < vmlinux_path__nr_entries; ++i) { + err = dso__load_vmlinux(self, map, vmlinux_path[i], + filter); + if (err > 0) { + pr_debug("Using %s for symbols\n", + vmlinux_path[i]); + dso__set_long_name(self, + strdup(vmlinux_path[i])); + goto out_fixup; + } + } + } + + is_kallsyms = self->long_name[0] == '['; + if (is_kallsyms) + goto do_kallsyms; + + err = dso__load_vmlinux(self, map, self->long_name, filter); if (err <= 0) { + pr_info("The file %s cannot be used, " + "trying to use /proc/kallsyms...", self->long_name); + sleep(2); +do_kallsyms: err = kernel_maps__load_kallsyms(filter); - if (err > 0) + if (err > 0 && !is_kallsyms) dso__set_long_name(self, strdup("[kernel.kallsyms]")); } if (err > 0) { +out_fixup: map__fixup_start(map); map__fixup_end(map); } @@ -1403,9 +1433,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } LIST_HEAD(dsos); -struct dso *vdso; - -const char *vmlinux_name = "vmlinux"; +struct dso *vdso; static void dsos__add(struct dso *dso) { @@ -1457,9 +1485,9 @@ size_t dsos__fprintf_buildid(FILE *fp) return ret; } -static int kernel_maps__create_kernel_map(void) +static int kernel_maps__create_kernel_map(const char *vmlinux_name) { - struct dso *kernel = dso__new(vmlinux_name); + struct dso *kernel = dso__new(vmlinux_name ?: "[kernel.kallsyms]"); if (kernel == NULL) return -1; @@ -1468,10 +1496,10 @@ static int kernel_maps__create_kernel_map(void) if (kernel_map == NULL) goto out_delete_kernel_dso; - kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; + kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; + kernel->short_name = "[kernel]"; + kernel->kernel = 1; - kernel->short_name = "[kernel]"; - kernel->kernel = 1; vdso = dso__new("[vdso]"); if (vdso == NULL) goto out_delete_kernel_map; @@ -1494,11 +1522,72 @@ out_delete_kernel_dso: return -1; } -int kernel_maps__init(bool use_modules) +static void vmlinux_path__exit(void) { - if (kernel_maps__create_kernel_map() < 0) + while (--vmlinux_path__nr_entries >= 0) { + free(vmlinux_path[vmlinux_path__nr_entries]); + vmlinux_path[vmlinux_path__nr_entries] = NULL; + } + + free(vmlinux_path); + vmlinux_path = NULL; +} + +static int vmlinux_path__init(void) +{ + struct utsname uts; + char bf[PATH_MAX]; + + if (uname(&uts) < 0) return -1; + vmlinux_path = malloc(sizeof(char *) * 5); + if (vmlinux_path == NULL) + return -1; + + vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", + uts.release); + vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); + if (vmlinux_path[vmlinux_path__nr_entries] == NULL) + goto out_fail; + ++vmlinux_path__nr_entries; + + return 0; + +out_fail: + vmlinux_path__exit(); + return -1; +} + +int kernel_maps__init(const char *vmlinux_name, bool try_vmlinux_path, + bool use_modules) +{ + if (try_vmlinux_path && vmlinux_path__init() < 0) + return -1; + + if (kernel_maps__create_kernel_map(vmlinux_name) < 0) { + vmlinux_path__exit(); + return -1; + } + if (use_modules && kernel_maps__create_module_maps() < 0) pr_debug("Failed to load list of modules in use, " "continuing...\n"); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 7a129047c47..8c4d026e067 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -93,7 +93,8 @@ int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); -int kernel_maps__init(bool use_modules); +int kernel_maps__init(const char *vmlinux_name, bool try_vmlinux_path, + bool use_modules); size_t kernel_maps__fprintf(FILE *fp); void symbol__init(unsigned int priv_size); @@ -101,5 +102,4 @@ void symbol__init(unsigned int priv_size); extern struct list_head dsos; extern struct map *kernel_map; extern struct dso *vdso; -extern const char *vmlinux_name; #endif /* __PERF_SYMBOL */ From 2890284bcf5c13c10fae8a0c20ad2f575118a092 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 23 Nov 2009 17:51:08 -0200 Subject: [PATCH 367/470] perf tools: Move graph_line and graph_dotted_line from top MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that they can be used in other tools. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259005869-13487-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 7 ------- tools/perf/util/ctype.c | 8 ++++++++ tools/perf/util/util.h | 3 +++ 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index eef9caab6ee..6a5de90e9b8 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -80,13 +80,6 @@ static bool hide_kernel_symbols = false; static bool hide_user_symbols = false; static struct winsize winsize; const char *vmlinux_name; -static const char *graph_line = - "_____________________________________________________________________" - "_____________________________________________________________________"; -static const char *graph_dotted_line = - "---------------------------------------------------------------------" - "---------------------------------------------------------------------" - "---------------------------------------------------------------------"; /* * Source diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c index 0b791bd346b..35073621e5d 100644 --- a/tools/perf/util/ctype.c +++ b/tools/perf/util/ctype.c @@ -29,3 +29,11 @@ unsigned char sane_ctype[256] = { A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */ /* Nothing in the 128.. range */ }; + +const char *graph_line = + "_____________________________________________________________________" + "_____________________________________________________________________"; +const char *graph_dotted_line = + "---------------------------------------------------------------------" + "---------------------------------------------------------------------" + "---------------------------------------------------------------------"; diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index f2203a0946b..e1c623e0c99 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -84,6 +84,9 @@ #include #endif +extern const char *graph_line; +extern const char *graph_dotted_line; + /* On most systems would have given us this, but * not on some systems (e.g. GNU/Hurd). */ From 1b145ae58035f30353d78d25bea665091df9b438 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 23 Nov 2009 17:51:09 -0200 Subject: [PATCH 368/470] perf kmem: Resolve symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E.g.: [root@doppio linux-2.6-tip]# perf kmem record sleep 3s [ perf record: Woken up 2 times to write data ] [ perf record: Captured and wrote 0.804 MB perf.data (~35105 samples) ] [root@doppio linux-2.6-tip]# perf kmem --stat caller | head -10 ------------------------------------------------------------------------------ Callsite |Total_alloc/Per | Total_req/Per | Hit | Frag ------------------------------------------------------------------------------ getname/40 | 1519616/4096 | 1519616/4096 | 371| 0.000% seq_read/a2 | 987136/4096 | 987136/4096 | 241| 0.000% __netdev_alloc_skb/43 | 260368/1049 | 259968/1048 | 248| 0.154% __alloc_skb/5a | 77312/256 | 77312/256 | 302| 0.000% proc_alloc_inode/33 | 76480/632 | 76472/632 | 121| 0.010% get_empty_filp/8d | 70272/192 | 70272/192 | 366| 0.000% split_vma/8e | 42064/176 | 42064/176 | 239| 0.000% [root@doppio linux-2.6-tip]# Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Frédéric Weisbecker Cc: linux-mm@kvack.org Cc: Li Zefan Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt LKML-Reference: <1259005869-13487-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 5d8aeae5000..256d18fa047 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -307,25 +307,34 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) { struct rb_node *next; - printf("\n ------------------------------------------------------------------------------\n"); - if (is_caller) - printf(" Callsite |"); - else - printf(" Alloc Ptr |"); - printf(" Total_alloc/Per | Total_req/Per | Hit | Fragmentation\n"); - printf(" ------------------------------------------------------------------------------\n"); + printf("%.78s\n", graph_dotted_line); + printf("%-28s|", is_caller ? "Callsite": "Alloc Ptr"); + printf("Total_alloc/Per | Total_req/Per | Hit | Frag\n"); + printf("%.78s\n", graph_dotted_line); next = rb_first(root); while (next && n_lines--) { - struct alloc_stat *data; + struct alloc_stat *data = rb_entry(next, struct alloc_stat, + node); + struct symbol *sym = NULL; + char bf[BUFSIZ]; + u64 addr; - data = rb_entry(next, struct alloc_stat, node); + if (is_caller) { + addr = data->call_site; + sym = kernel_maps__find_symbol(addr, NULL, NULL); + } else + addr = data->ptr; - printf(" %-16p | %8llu/%-6lu | %8llu/%-6lu | %6lu | %8.3f%%\n", - is_caller ? (void *)(unsigned long)data->call_site : - (void *)(unsigned long)data->ptr, - (unsigned long long)data->bytes_alloc, + if (sym != NULL) + snprintf(bf, sizeof(bf), "%s/%Lx", sym->name, + addr - sym->start); + else + snprintf(bf, sizeof(bf), "%#Lx", addr); + + printf("%-28s|%8llu/%-6lu |%8llu/%-6lu|%6lu|%8.3f%%\n", + bf, (unsigned long long)data->bytes_alloc, (unsigned long)data->bytes_alloc / data->hit, (unsigned long long)data->bytes_req, (unsigned long)data->bytes_req / data->hit, From fa7c27ee9394fc0d52404b2a89882e95868a60b9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 23 Nov 2009 22:30:12 +0100 Subject: [PATCH 369/470] hw-breakpoints: Fix misordered ifdef Fix a misplaced ifdef. We need the perf event headers also in off-case to avoid the following build error: include/linux/hw_breakpoint.h:94: error: expected declaration specifiers or '...' before 'perf_callback_t' include/linux/hw_breakpoint.h:102: error: expected declaration specifiers or '...' before 'perf_callback_t' include/linux/hw_breakpoint.h:109: error: expected declaration specifiers or '...' before 'perf_callback_t' include/linux/hw_breakpoint.h:116: error: expected declaration specifiers or '...' before 'perf_callback_t' Reported-by: Kisskb-bot by Michael Ellerman Signed-off-by: Frederic Weisbecker Cc: Prasad LKML-Reference: <1259011812-8093-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/hw_breakpoint.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 76a48ab9a81..c9f7f7c7b0e 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -15,10 +15,11 @@ enum { }; #ifdef __KERNEL__ -#ifdef CONFIG_HAVE_HW_BREAKPOINT #include +#ifdef CONFIG_HAVE_HW_BREAKPOINT + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; From ee3d250446f1c1be4eceab48f3a23794d9a6564c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 24 Nov 2009 15:19:43 +1100 Subject: [PATCH 370/470] perf tools: Fix compilation on powerpc Currently, perf fails to compile on powerpc with this error: CC util/header.o In file included from util/../perf.h:17, from util/header.c:9: util/../../../arch/powerpc/include/asm/unistd.h:360:27: error: linux/linkage.h: No such file or directory make: *** [util/header.o] Error 1 The reason is that we still have a #define __KERNEL__ in effect at the point where gets included, which means we get extra stuff that we don't need or want. This fixes the problem by undefining __KERNEL__ once we have included the file for which we need __KERNEL__ defined. Signed-off-by: Paul Mackerras Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra LKML-Reference: <19211.24287.453183.78836@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- tools/perf/util/include/linux/bitops.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h index ace57c36d1d..8d63116e943 100644 --- a/tools/perf/util/include/linux/bitops.h +++ b/tools/perf/util/include/linux/bitops.h @@ -7,6 +7,8 @@ #define CONFIG_GENERIC_FIND_FIRST_BIT #include "../../../../include/linux/bitops.h" +#undef __KERNEL__ + static inline void set_bit(int nr, unsigned long *addr) { addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG); From 7707b6b6f8d9188b612f9fc88c65411264b1ed57 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 24 Nov 2009 13:25:48 +0800 Subject: [PATCH 371/470] perf kmem: Add new option to show raw ip Add option "--raw-ip" to show raw ip instead of symbols: # ./perf kmem --stat caller --raw-ip ------------------------------------------------------------------------------ Callsite |Total_alloc/Per | Total_req/Per | Hit | Frag ------------------------------------------------------------------------------ 0xc05301aa | 733184/4096 | 733184/4096 | 179| 0.000% 0xc0542ba0 | 483328/4096 | 483328/4096 | 118| 0.000% ... Also show symbols with format sym+offset instead of sym/offset. Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: linux-mm@kvack.org LKML-Reference: <4B0B6E5C.4080900@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 256d18fa047..1ef43c212d9 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -32,15 +32,14 @@ sort_fn_t caller_sort_fn; static int alloc_lines = -1; static int caller_lines = -1; +static bool raw_ip; + static char *cwd; static int cwdlen; struct alloc_stat { union { - struct { - char *name; - u64 call_site; - }; + u64 call_site; u64 ptr; }; u64 bytes_req; @@ -323,12 +322,14 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) if (is_caller) { addr = data->call_site; - sym = kernel_maps__find_symbol(addr, NULL, NULL); + if (!raw_ip) + sym = kernel_maps__find_symbol(addr, + NULL, NULL); } else addr = data->ptr; if (sym != NULL) - snprintf(bf, sizeof(bf), "%s/%Lx", sym->name, + snprintf(bf, sizeof(bf), "%s+%Lx", sym->name, addr - sym->start); else snprintf(bf, sizeof(bf), "%#Lx", addr); @@ -345,9 +346,9 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) } if (n_lines == -1) - printf(" ... | ... | ... | ... | ... \n"); + printf(" ... | ... | ... | ... | ... \n"); - printf(" ------------------------------------------------------------------------------\n"); + printf("%.78s\n", graph_dotted_line); } static void print_summary(void) @@ -558,6 +559,7 @@ static const struct option kmem_options[] = { OPT_CALLBACK('l', "line", NULL, "num", "show n lins", parse_line_opt), + OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), OPT_END() }; From 29b3e15289eb66788a0bf5ea4903f9fbeb1ec751 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 24 Nov 2009 13:26:10 +0800 Subject: [PATCH 372/470] perf kmem: Default to sort by fragmentation Make the output sort by fragmentation by default. Also make the usage of "--sort" option consistent with other perf tools. That is, we support multi keys: "--sort key1[,key2]...". # ./perf kmem --stat caller ------------------------------------------------------------------------------ Callsite |Total_alloc/Per | Total_req/Per | Hit | Frag ------------------------------------------------------------------------------ __netdev_alloc_skb+23 | 5048/1682 | 4564/1521 | 3| 9.588% perf_event_alloc.clone.0+0 | 7504/682 | 7128/648 | 11| 5.011% tracepoint_add_probe+32e | 157/31 | 154/30 | 5| 1.911% alloc_buffer_head+16 | 456/57 | 448/56 | 8| 1.754% radix_tree_preload+51 | 584/292 | 576/288 | 2| 1.370% ... TODO: - Extract duplicate code in builtin-kmem.c and builtin-sched.c into util/sort.c. Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: linux-mm@kvack.org LKML-Reference: <4B0B6E72.7010200@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 142 +++++++++++++++++++++++++++++--------- 1 file changed, 108 insertions(+), 34 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 1ef43c212d9..dc86f1e64b6 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -26,14 +26,13 @@ static u64 sample_type; static int alloc_flag; static int caller_flag; -sort_fn_t alloc_sort_fn; -sort_fn_t caller_sort_fn; - static int alloc_lines = -1; static int caller_lines = -1; static bool raw_ip; +static char default_sort_order[] = "frag,hit,bytes"; + static char *cwd; static int cwdlen; @@ -371,20 +370,34 @@ static void print_result(void) print_summary(); } +struct sort_dimension { + const char name[20]; + sort_fn_t cmp; + struct list_head list; +}; + +static LIST_HEAD(caller_sort); +static LIST_HEAD(alloc_sort); + static void sort_insert(struct rb_root *root, struct alloc_stat *data, - sort_fn_t sort_fn) + struct list_head *sort_list) { struct rb_node **new = &(root->rb_node); struct rb_node *parent = NULL; + struct sort_dimension *sort; while (*new) { struct alloc_stat *this; - int cmp; + int cmp = 0; this = rb_entry(*new, struct alloc_stat, node); parent = *new; - cmp = sort_fn(data, this); + list_for_each_entry(sort, sort_list, list) { + cmp = sort->cmp(data, this); + if (cmp) + break; + } if (cmp > 0) new = &((*new)->rb_left); @@ -397,7 +410,7 @@ static void sort_insert(struct rb_root *root, struct alloc_stat *data, } static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, - sort_fn_t sort_fn) + struct list_head *sort_list) { struct rb_node *node; struct alloc_stat *data; @@ -409,14 +422,14 @@ static void __sort_result(struct rb_root *root, struct rb_root *root_sorted, rb_erase(node, root); data = rb_entry(node, struct alloc_stat, node); - sort_insert(root_sorted, data, sort_fn); + sort_insert(root_sorted, data, sort_list); } } static void sort_result(void) { - __sort_result(&root_alloc_stat, &root_alloc_sorted, alloc_sort_fn); - __sort_result(&root_caller_stat, &root_caller_sorted, caller_sort_fn); + __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort); + __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort); } static int __cmd_kmem(void) @@ -434,7 +447,6 @@ static const char * const kmem_usage[] = { NULL }; - static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) { if (l->ptr < r->ptr) @@ -444,6 +456,11 @@ static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static struct sort_dimension ptr_sort_dimension = { + .name = "ptr", + .cmp = ptr_cmp, +}; + static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) { if (l->call_site < r->call_site) @@ -453,6 +470,11 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static struct sort_dimension callsite_sort_dimension = { + .name = "callsite", + .cmp = callsite_cmp, +}; + static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) { if (l->hit < r->hit) @@ -462,6 +484,11 @@ static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static struct sort_dimension hit_sort_dimension = { + .name = "hit", + .cmp = hit_cmp, +}; + static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) { if (l->bytes_alloc < r->bytes_alloc) @@ -471,6 +498,11 @@ static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static struct sort_dimension bytes_sort_dimension = { + .name = "bytes", + .cmp = bytes_cmp, +}; + static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) { double x, y; @@ -485,31 +517,73 @@ static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r) return 0; } +static struct sort_dimension frag_sort_dimension = { + .name = "frag", + .cmp = frag_cmp, +}; + +static struct sort_dimension *avail_sorts[] = { + &ptr_sort_dimension, + &callsite_sort_dimension, + &hit_sort_dimension, + &bytes_sort_dimension, + &frag_sort_dimension, +}; + +#define NUM_AVAIL_SORTS \ + (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *)) + +static int sort_dimension__add(const char *tok, struct list_head *list) +{ + struct sort_dimension *sort; + int i; + + for (i = 0; i < NUM_AVAIL_SORTS; i++) { + if (!strcmp(avail_sorts[i]->name, tok)) { + sort = malloc(sizeof(*sort)); + if (!sort) + die("malloc"); + memcpy(sort, avail_sorts[i], sizeof(*sort)); + list_add_tail(&sort->list, list); + return 0; + } + } + + return -1; +} + +static int setup_sorting(struct list_head *sort_list, const char *arg) +{ + char *tok; + char *str = strdup(arg); + + if (!str) + die("strdup"); + + while (true) { + tok = strsep(&str, ","); + if (!tok) + break; + if (sort_dimension__add(tok, sort_list) < 0) { + error("Unknown --sort key: '%s'", tok); + return -1; + } + } + + free(str); + return 0; +} + static int parse_sort_opt(const struct option *opt __used, const char *arg, int unset __used) { - sort_fn_t sort_fn; - if (!arg) return -1; - if (strcmp(arg, "ptr") == 0) - sort_fn = ptr_cmp; - else if (strcmp(arg, "call_site") == 0) - sort_fn = callsite_cmp; - else if (strcmp(arg, "hit") == 0) - sort_fn = hit_cmp; - else if (strcmp(arg, "bytes") == 0) - sort_fn = bytes_cmp; - else if (strcmp(arg, "frag") == 0) - sort_fn = frag_cmp; - else - return -1; - if (caller_flag > alloc_flag) - caller_sort_fn = sort_fn; + return setup_sorting(&caller_sort, arg); else - alloc_sort_fn = sort_fn; + return setup_sorting(&alloc_sort, arg); return 0; } @@ -553,8 +627,8 @@ static const struct option kmem_options[] = { OPT_CALLBACK(0, "stat", NULL, "|", "stat selector, Pass 'alloc' or 'caller'.", parse_stat_opt), - OPT_CALLBACK('s', "sort", NULL, "key", - "sort by key: ptr, call_site, hit, bytes, frag", + OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", + "sort by key(s): ptr, call_site, bytes, hit, frag", parse_sort_opt), OPT_CALLBACK('l', "line", NULL, "num", "show n lins", @@ -606,10 +680,10 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used) else if (argc) usage_with_options(kmem_usage, kmem_options); - if (!alloc_sort_fn) - alloc_sort_fn = bytes_cmp; - if (!caller_sort_fn) - caller_sort_fn = bytes_cmp; + if (list_empty(&caller_sort)) + setup_sorting(&caller_sort, default_sort_order); + if (list_empty(&alloc_sort)) + setup_sorting(&alloc_sort, default_sort_order); return __cmd_kmem(); } From 7d0d39459dab20bf60cac30a1a7d50b286c60cc1 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 24 Nov 2009 13:26:31 +0800 Subject: [PATCH 373/470] perf kmem: Collect cross node allocation statistics Show cross node memory allocations: # ./perf kmem SUMMARY ======= ... Cross node allocations: 0/3633 Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: linux-mm@kvack.org LKML-Reference: <4B0B6E87.10906@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 81 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index dc86f1e64b6..1ecf3f4415c 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -36,6 +36,9 @@ static char default_sort_order[] = "frag,hit,bytes"; static char *cwd; static int cwdlen; +static int *cpunode_map; +static int max_cpu_num; + struct alloc_stat { union { u64 call_site; @@ -54,12 +57,74 @@ static struct rb_root root_caller_stat; static struct rb_root root_caller_sorted; static unsigned long total_requested, total_allocated; +static unsigned long nr_allocs, nr_cross_allocs; struct raw_event_sample { u32 size; char data[0]; }; +#define PATH_SYS_NODE "/sys/devices/system/node" + +static void init_cpunode_map(void) +{ + FILE *fp; + int i; + + fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); + if (!fp) { + max_cpu_num = 4096; + return; + } + + if (fscanf(fp, "%d", &max_cpu_num) < 1) + die("Failed to read 'kernel_max' from sysfs"); + max_cpu_num++; + + cpunode_map = calloc(max_cpu_num, sizeof(int)); + if (!cpunode_map) + die("calloc"); + for (i = 0; i < max_cpu_num; i++) + cpunode_map[i] = -1; + fclose(fp); +} + +static void setup_cpunode_map(void) +{ + struct dirent *dent1, *dent2; + DIR *dir1, *dir2; + unsigned int cpu, mem; + char buf[PATH_MAX]; + + init_cpunode_map(); + + dir1 = opendir(PATH_SYS_NODE); + if (!dir1) + return; + + while (true) { + dent1 = readdir(dir1); + if (!dent1) + break; + + if (sscanf(dent1->d_name, "node%u", &mem) < 1) + continue; + + snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name); + dir2 = opendir(buf); + if (!dir2) + continue; + while (true) { + dent2 = readdir(dir2); + if (!dent2) + break; + if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1) + continue; + cpunode_map[cpu] = mem; + } + } +} + static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { @@ -157,15 +222,16 @@ static void insert_caller_stat(unsigned long call_site, static void process_alloc_event(struct raw_event_sample *raw, struct event *event, - int cpu __used, + int cpu, u64 timestamp __used, struct thread *thread __used, - int node __used) + int node) { unsigned long call_site; unsigned long ptr; int bytes_req; int bytes_alloc; + int node1, node2; ptr = raw_field_value(event, "ptr", raw->data); call_site = raw_field_value(event, "call_site", raw->data); @@ -177,6 +243,14 @@ static void process_alloc_event(struct raw_event_sample *raw, total_requested += bytes_req; total_allocated += bytes_alloc; + + if (node) { + node1 = cpunode_map[cpu]; + node2 = raw_field_value(event, "node", raw->data); + if (node1 != node2) + nr_cross_allocs++; + } + nr_allocs++; } static void process_free_event(struct raw_event_sample *raw __used, @@ -359,6 +433,7 @@ static void print_summary(void) total_allocated - total_requested); printf("Internal fragmentation: %f%%\n", fragmentation(total_requested, total_allocated)); + printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs); } static void print_result(void) @@ -685,6 +760,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used) if (list_empty(&alloc_sort)) setup_sorting(&alloc_sort, default_sort_order); + setup_cpunode_map(); + return __cmd_kmem(); } From 079d3f653134e2f2ac99dae28b08c0cc64268103 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 24 Nov 2009 13:26:55 +0800 Subject: [PATCH 374/470] perf kmem: Measure kmalloc/kfree CPU ping-pong call-sites Show statistics for allocations and frees on different cpus: ------------------------------------------------------------------------------------------------------ Callsite | Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag ------------------------------------------------------------------------------------------------------ perf_event_alloc.clone.0+0 | 7504/682 | 7128/648 | 11 | 0 | 5.011% alloc_buffer_head+16 | 288/57 | 280/56 | 5 | 0 | 2.778% radix_tree_preload+51 | 296/296 | 288/288 | 1 | 0 | 2.703% tracepoint_add_probe+32e | 157/31 | 154/30 | 5 | 0 | 1.911% do_maps_open+0 | 796/12 | 792/12 | 66 | 0 | 0.503% sock_alloc_send_pskb+16e | 23780/495 | 23744/494 | 48 | 38 | 0.151% anon_vma_prepare+9a | 3744/44 | 3740/44 | 85 | 0 | 0.107% d_alloc+21 | 64948/164 | 64944/164 | 396 | 0 | 0.006% proc_alloc_inode+23 | 262292/676 | 262288/676 | 388 | 0 | 0.002% create_object+28 | 459600/200 | 459600/200 | 2298 | 71 | 0.000% journal_start+67 | 14440/40 | 14440/40 | 361 | 0 | 0.000% get_empty_filp+df | 53504/256 | 53504/256 | 209 | 0 | 0.000% getname+2a | 823296/4096 | 823296/4096 | 201 | 0 | 0.000% seq_read+2b0 | 544768/4096 | 544768/4096 | 133 | 0 | 0.000% seq_open+6d | 17024/128 | 17024/128 | 133 | 0 | 0.000% mmap_region+2e6 | 11704/88 | 11704/88 | 133 | 0 | 0.000% single_open+0 | 1072/16 | 1072/16 | 67 | 0 | 0.000% __alloc_skb+2e | 12544/256 | 12544/256 | 49 | 38 | 0.000% __sigqueue_alloc+4a | 1296/144 | 1296/144 | 9 | 8 | 0.000% tracepoint_add_probe+6f | 80/16 | 80/16 | 5 | 0 | 0.000% ------------------------------------------------------------------------------------------------------ ... Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: linux-mm@kvack.org LKML-Reference: <4B0B6E9F.6020309@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-kmem.c | 122 +++++++++++++++++++++++++++++--------- 1 file changed, 94 insertions(+), 28 deletions(-) diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 1ecf3f4415c..173d6db42ec 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -40,13 +40,14 @@ static int *cpunode_map; static int max_cpu_num; struct alloc_stat { - union { - u64 call_site; - u64 ptr; - }; + u64 call_site; + u64 ptr; u64 bytes_req; u64 bytes_alloc; u32 hit; + u32 pingpong; + + short alloc_cpu; struct rb_node node; }; @@ -144,16 +145,13 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static void insert_alloc_stat(unsigned long ptr, - int bytes_req, int bytes_alloc) +static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, + int bytes_req, int bytes_alloc, int cpu) { struct rb_node **node = &root_alloc_stat.rb_node; struct rb_node *parent = NULL; struct alloc_stat *data = NULL; - if (!alloc_flag) - return; - while (*node) { parent = *node; data = rb_entry(*node, struct alloc_stat, node); @@ -172,7 +170,10 @@ static void insert_alloc_stat(unsigned long ptr, data->bytes_alloc += bytes_req; } else { data = malloc(sizeof(*data)); + if (!data) + die("malloc"); data->ptr = ptr; + data->pingpong = 0; data->hit = 1; data->bytes_req = bytes_req; data->bytes_alloc = bytes_alloc; @@ -180,6 +181,8 @@ static void insert_alloc_stat(unsigned long ptr, rb_link_node(&data->node, parent, node); rb_insert_color(&data->node, &root_alloc_stat); } + data->call_site = call_site; + data->alloc_cpu = cpu; } static void insert_caller_stat(unsigned long call_site, @@ -189,9 +192,6 @@ static void insert_caller_stat(unsigned long call_site, struct rb_node *parent = NULL; struct alloc_stat *data = NULL; - if (!caller_flag) - return; - while (*node) { parent = *node; data = rb_entry(*node, struct alloc_stat, node); @@ -210,7 +210,10 @@ static void insert_caller_stat(unsigned long call_site, data->bytes_alloc += bytes_req; } else { data = malloc(sizeof(*data)); + if (!data) + die("malloc"); data->call_site = call_site; + data->pingpong = 0; data->hit = 1; data->bytes_req = bytes_req; data->bytes_alloc = bytes_alloc; @@ -238,7 +241,7 @@ static void process_alloc_event(struct raw_event_sample *raw, bytes_req = raw_field_value(event, "bytes_req", raw->data); bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data); - insert_alloc_stat(ptr, bytes_req, bytes_alloc); + insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); insert_caller_stat(call_site, bytes_req, bytes_alloc); total_requested += bytes_req; @@ -253,12 +256,58 @@ static void process_alloc_event(struct raw_event_sample *raw, nr_allocs++; } -static void process_free_event(struct raw_event_sample *raw __used, - struct event *event __used, - int cpu __used, +static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); +static int callsite_cmp(struct alloc_stat *, struct alloc_stat *); + +static struct alloc_stat *search_alloc_stat(unsigned long ptr, + unsigned long call_site, + struct rb_root *root, + sort_fn_t sort_fn) +{ + struct rb_node *node = root->rb_node; + struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; + + while (node) { + struct alloc_stat *data; + int cmp; + + data = rb_entry(node, struct alloc_stat, node); + + cmp = sort_fn(&key, data); + if (cmp < 0) + node = node->rb_left; + else if (cmp > 0) + node = node->rb_right; + else + return data; + } + return NULL; +} + +static void process_free_event(struct raw_event_sample *raw, + struct event *event, + int cpu, u64 timestamp __used, struct thread *thread __used) { + unsigned long ptr; + struct alloc_stat *s_alloc, *s_caller; + + ptr = raw_field_value(event, "ptr", raw->data); + + s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); + if (!s_alloc) + return; + + if (cpu != s_alloc->alloc_cpu) { + s_alloc->pingpong++; + + s_caller = search_alloc_stat(0, s_alloc->call_site, + &root_caller_stat, callsite_cmp); + assert(s_caller); + s_caller->pingpong++; + } + s_alloc->alloc_cpu = -1; } static void @@ -379,10 +428,10 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) { struct rb_node *next; - printf("%.78s\n", graph_dotted_line); - printf("%-28s|", is_caller ? "Callsite": "Alloc Ptr"); - printf("Total_alloc/Per | Total_req/Per | Hit | Frag\n"); - printf("%.78s\n", graph_dotted_line); + printf("%.102s\n", graph_dotted_line); + printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); + printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); + printf("%.102s\n", graph_dotted_line); next = rb_first(root); @@ -390,7 +439,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) struct alloc_stat *data = rb_entry(next, struct alloc_stat, node); struct symbol *sym = NULL; - char bf[BUFSIZ]; + char buf[BUFSIZ]; u64 addr; if (is_caller) { @@ -402,26 +451,28 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) addr = data->ptr; if (sym != NULL) - snprintf(bf, sizeof(bf), "%s+%Lx", sym->name, + snprintf(buf, sizeof(buf), "%s+%Lx", sym->name, addr - sym->start); else - snprintf(bf, sizeof(bf), "%#Lx", addr); + snprintf(buf, sizeof(buf), "%#Lx", addr); + printf(" %-34s |", buf); - printf("%-28s|%8llu/%-6lu |%8llu/%-6lu|%6lu|%8.3f%%\n", - bf, (unsigned long long)data->bytes_alloc, + printf(" %9llu/%-5lu | %9llu/%-5lu | %6lu | %8lu | %6.3f%%\n", + (unsigned long long)data->bytes_alloc, (unsigned long)data->bytes_alloc / data->hit, (unsigned long long)data->bytes_req, (unsigned long)data->bytes_req / data->hit, (unsigned long)data->hit, + (unsigned long)data->pingpong, fragmentation(data->bytes_req, data->bytes_alloc)); next = rb_next(next); } if (n_lines == -1) - printf(" ... | ... | ... | ... | ... \n"); + printf(" ... | ... | ... | ... | ... | ... \n"); - printf("%.78s\n", graph_dotted_line); + printf("%.102s\n", graph_dotted_line); } static void print_summary(void) @@ -597,12 +648,27 @@ static struct sort_dimension frag_sort_dimension = { .cmp = frag_cmp, }; +static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r) +{ + if (l->pingpong < r->pingpong) + return -1; + else if (l->pingpong > r->pingpong) + return 1; + return 0; +} + +static struct sort_dimension pingpong_sort_dimension = { + .name = "pingpong", + .cmp = pingpong_cmp, +}; + static struct sort_dimension *avail_sorts[] = { &ptr_sort_dimension, &callsite_sort_dimension, &hit_sort_dimension, &bytes_sort_dimension, &frag_sort_dimension, + &pingpong_sort_dimension, }; #define NUM_AVAIL_SORTS \ @@ -703,7 +769,7 @@ static const struct option kmem_options[] = { "stat selector, Pass 'alloc' or 'caller'.", parse_stat_opt), OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", - "sort by key(s): ptr, call_site, bytes, hit, frag", + "sort by keys: ptr, call_site, bytes, hit, pingpong, frag", parse_sort_opt), OPT_CALLBACK('l', "line", NULL, "num", "show n lins", From b23d5767a5818caec8547d0bce1588b02bdecd30 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 24 Nov 2009 13:27:11 +0800 Subject: [PATCH 375/470] perf kmem: Add help file Add Documentation/perf-kmem.txt Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Eduard - Gabriel Munteanu Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: linux-mm@kvack.org LKML-Reference: <4B0B6EAF.80802@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-kmem.txt | 44 ++++++++++++++++++++++++++ tools/perf/command-list.txt | 1 + 2 files changed, 45 insertions(+) create mode 100644 tools/perf/Documentation/perf-kmem.txt diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt new file mode 100644 index 00000000000..44b0ce35c28 --- /dev/null +++ b/tools/perf/Documentation/perf-kmem.txt @@ -0,0 +1,44 @@ +perf-kmem(1) +============== + +NAME +---- +perf-kmem - Tool to trace/measure kernel memory(slab) properties + +SYNOPSIS +-------- +[verse] +'perf kmem' {record} [] + +DESCRIPTION +----------- +There's two variants of perf kmem: + + 'perf kmem record ' to record the kmem events + of an arbitrary workload. + + 'perf kmem' to report kernel memory statistics. + +OPTIONS +------- +-i :: +--input=:: + Select the input file (default: perf.data) + +--stat=:: + Select per callsite or per allocation statistics + +-s :: +--sort=:: + Sort the output (default: frag,hit,bytes) + +-l :: +--line=:: + Print n lines only + +--raw-ip:: + Print raw ip instead of symbol + +SEE ALSO +-------- +linkperf:perf-record[1] diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt index d3a6e18e4a5..02b09ea17a3 100644 --- a/tools/perf/command-list.txt +++ b/tools/perf/command-list.txt @@ -14,3 +14,4 @@ perf-timechart mainporcelain common perf-top mainporcelain common perf-trace mainporcelain common perf-probe mainporcelain common +perf-kmem mainporcelain common From 184d3da8ef0ca552dffa0fdd35c046e058a2cf9a Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 23 Nov 2009 21:40:49 -0800 Subject: [PATCH 376/470] perf_events: Fix bogus copy_to_user() in perf_event_read_group() When using an event group, the value and id for non leaders events were wrong due to invalid offset into the outgoing buffer. Signed-off-by: Stephane Eranian Acked-by: Peter Zijlstra Cc: paulus@samba.org Cc: perfmon2-devel@lists.sourceforge.net LKML-Reference: <4b0b71e1.0508d00a.075e.ffff84a3@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9425c9600c8..accfd7bfe38 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1831,7 +1831,7 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); - if (copy_to_user(buf + size, values, size)) { + if (copy_to_user(buf + ret, values, size)) { ret = -EFAULT; goto unlock; } From c9c7ccaf3a2686ed3a44d69bb1f8b55eeead8a4e Mon Sep 17 00:00:00 2001 From: John Kacur Date: Tue, 24 Nov 2009 15:35:00 +0100 Subject: [PATCH 377/470] perf tools: Add perf.data to .gitignore Signed-off-by: John Kacur Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: acme@redhat.com LKML-Reference: <1259073301-11506-2-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 0854f110bf7..fe08660ce0b 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -12,6 +12,7 @@ perf*.1 perf*.xml perf*.html common-cmds.h +perf.data tags TAGS cscope* From e74328d3a17ed75ffdf72b86f289965823a47240 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Tue, 24 Nov 2009 15:35:01 +0100 Subject: [PATCH 378/470] perf tools: Use common process_event functions for annotate and report Prevent bit-rot in perf-annotate by using common functions where possible. Here we create process_events.[ch] to hold the common functions. Signed-off-by: John Kacur Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: acme@redhat.com LKML-Reference: <1259073301-11506-3-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-annotate.c | 56 +--------------------------- tools/perf/builtin-report.c | 63 +------------------------------ tools/perf/util/process_event.c | 53 ++++++++++++++++++++++++++ tools/perf/util/process_event.h | 29 +++++++++++++++ tools/perf/util/process_events.c | 64 ++++++++++++++++++++++++++++++++ tools/perf/util/process_events.h | 35 +++++++++++++++++ 7 files changed, 186 insertions(+), 116 deletions(-) create mode 100644 tools/perf/util/process_event.c create mode 100644 tools/perf/util/process_event.h create mode 100644 tools/perf/util/process_events.c create mode 100644 tools/perf/util/process_events.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index f1537a94a05..de37d492e10 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -369,6 +369,7 @@ LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h LIB_H += util/data_map.h +LIB_H += util/process_events.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -411,6 +412,7 @@ LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/data_map.o +LIB_OBJS += util/process_events.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 6b13a1ecf1e..59b6123abec 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -24,6 +24,7 @@ #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" +#include "util/process_events.h" static char const *input_name = "perf.data"; @@ -201,32 +202,6 @@ got_map: return 0; } -static int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct map *map = map__new(&event->mmap, NULL, 0); - struct thread *thread = threads__findnew(event->mmap.pid); - - dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; -} - static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { @@ -247,33 +222,6 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_fork_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->fork.pid); - struct thread *parent = threads__findnew(event->fork.ppid); - - dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->fork.pid, event->fork.ppid); - - /* - * A thread clone will have the same PID for both - * parent and child. - */ - if (thread == parent) - return 0; - - if (!thread || !parent || thread__fork(thread, parent)) { - dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); - return -1; - } - total_fork++; - - return 0; -} - static int process_event(event_t *event, unsigned long offset, unsigned long head) { @@ -288,7 +236,7 @@ process_event(event_t *event, unsigned long offset, unsigned long head) return process_comm_event(event, offset, head); case PERF_RECORD_FORK: - return process_fork_event(event, offset, head); + return process_task_event(event, offset, head); /* * We dont process them right now but they are fine: */ diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index fe474b7f8ad..1826be719b5 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -30,6 +30,7 @@ #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" +#include "util/process_events.h" static char const *input_name = "perf.data"; @@ -54,9 +55,6 @@ static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; const char *vmlinux_name; -static char *cwd; -static int cwdlen; - static struct perf_header *header; static u64 sample_type; @@ -750,33 +748,6 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct map *map = map__new(&event->mmap, cwd, cwdlen); - struct thread *thread = threads__findnew(event->mmap.pid); - - dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - event->mmap.tid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; -} - static int process_comm_event(event_t *event, unsigned long offset, unsigned long head) { @@ -797,38 +768,6 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_task_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->fork.pid); - struct thread *parent = threads__findnew(event->fork.ppid); - - dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", - event->fork.pid, event->fork.tid, - event->fork.ppid, event->fork.ptid); - - /* - * A thread clone will have the same PID for both - * parent and child. - */ - if (thread == parent) - return 0; - - if (event->header.type == PERF_RECORD_EXIT) - return 0; - - if (!thread || !parent || thread__fork(thread, parent)) { - dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); - return -1; - } - total_fork++; - - return 0; -} - static int process_lost_event(event_t *event, unsigned long offset, unsigned long head) { diff --git a/tools/perf/util/process_event.c b/tools/perf/util/process_event.c new file mode 100644 index 00000000000..a970789581a --- /dev/null +++ b/tools/perf/util/process_event.c @@ -0,0 +1,53 @@ +#include "process_event.h" + +char *cwd; +int cwdlen; + +int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct map *map = map__new(&event->mmap, cwd, cwdlen); + struct thread *thread = threads__findnew(event->mmap.pid); + + dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->mmap.pid, + event->mmap.tid, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); + return 0; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; + +} + +int +process_comm_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->comm.pid); + + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->comm.comm, event->comm.pid); + + if (thread == NULL || + thread__set_comm_adjust(thread, event->comm.comm)) { + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); + return -1; + } + total_comm++; + + return 0; +} + diff --git a/tools/perf/util/process_event.h b/tools/perf/util/process_event.h new file mode 100644 index 00000000000..6f68c69736c --- /dev/null +++ b/tools/perf/util/process_event.h @@ -0,0 +1,29 @@ +#ifndef __PROCESS_EVENT_H +#define __PROCESS_EVENT_H + +#include "../builtin.h" +#include "util.h" + +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" + +#include "../perf.h" +#include "debug.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "thread.h" +#include "sort.h" +#include "hist.h" + +extern char *cwd; +extern int cwdlen; +extern int process_mmap_event(event_t *, unsigned long, unsigned long); +extern int process_comm_event(event_t *, unsigned long , unsigned long); + +#endif /* __PROCESS_H */ diff --git a/tools/perf/util/process_events.c b/tools/perf/util/process_events.c new file mode 100644 index 00000000000..a9204363efd --- /dev/null +++ b/tools/perf/util/process_events.c @@ -0,0 +1,64 @@ +#include "process_events.h" + +char *cwd; +int cwdlen; + +int +process_mmap_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct map *map = map__new(&event->mmap, cwd, cwdlen); + struct thread *thread = threads__findnew(event->mmap.pid); + + dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->mmap.pid, + event->mmap.tid, + (void *)(long)event->mmap.start, + (void *)(long)event->mmap.len, + (void *)(long)event->mmap.pgoff, + event->mmap.filename); + + if (thread == NULL || map == NULL) { + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); + return 0; + } + + thread__insert_map(thread, map); + total_mmap++; + + return 0; +} + +int +process_task_event(event_t *event, unsigned long offset, unsigned long head) +{ + struct thread *thread = threads__findnew(event->fork.pid); + struct thread *parent = threads__findnew(event->fork.ppid); + + dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", + (void *)(offset + head), + (void *)(long)(event->header.size), + event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", + event->fork.pid, event->fork.tid, + event->fork.ppid, event->fork.ptid); + + /* + * A thread clone will have the same PID for both + * parent and child. + */ + if (thread == parent) + return 0; + + if (event->header.type == PERF_RECORD_EXIT) + return 0; + + if (!thread || !parent || thread__fork(thread, parent)) { + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); + return -1; + } + total_fork++; + + return 0; +} + diff --git a/tools/perf/util/process_events.h b/tools/perf/util/process_events.h new file mode 100644 index 00000000000..73d092f8328 --- /dev/null +++ b/tools/perf/util/process_events.h @@ -0,0 +1,35 @@ +#ifndef __PROCESS_EVENTS_H +#define __PROCESS_EVENTS_H + +#include "../builtin.h" + +#include "util.h" +#include "color.h" +#include +#include "cache.h" +#include +#include "symbol.h" +#include "string.h" +#include "callchain.h" +#include "strlist.h" +#include "values.h" + +#include "../perf.h" +#include "debug.h" +#include "header.h" + +#include "parse-options.h" +#include "parse-events.h" + +#include "data_map.h" +#include "thread.h" +#include "sort.h" +#include "hist.h" + +extern char *cwd; +extern int cwdlen; + +extern int process_mmap_event(event_t *, unsigned long , unsigned long); +extern int process_task_event(event_t *, unsigned long, unsigned long); + +#endif /* __PROCESS_EVENTS_H */ From 7cc017edb9459193d3b581155a14029e4bef0c49 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 24 Nov 2009 12:05:14 -0200 Subject: [PATCH 379/470] perf top: Always show the DSO column, even if its all the same MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ingo found it confusing, and I agree with that, for 'perf report' its OK because it is static, but for a tool refreshing it the eventual switch from column to summary at the top may seem confusing. Suggested-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259071517-3242-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 38 +++++++++++--------------------------- 1 file changed, 11 insertions(+), 27 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 6a5de90e9b8..b9a321fd184 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -451,9 +451,8 @@ static void print_sym_table(void) struct sym_entry *syme, *n; struct rb_root tmp = RB_ROOT; struct rb_node *nd; - int sym_width = 0, dso_width = 0; + int sym_width = 0, dso_width = 0, max_dso_width; const int win_width = winsize.ws_col - 1; - struct dso *unique_dso = NULL, *first_dso = NULL; samples = userspace_samples = 0; @@ -539,11 +538,6 @@ static void print_sym_table(void) (int)syme->snap_count < count_filter) continue; - if (first_dso == NULL) - unique_dso = first_dso = syme->map->dso; - else if (syme->map->dso != first_dso) - unique_dso = NULL; - if (syme->map->dso->long_name_len > dso_width) dso_width = syme->map->dso->long_name_len; @@ -553,14 +547,10 @@ static void print_sym_table(void) printed = 0; - if (unique_dso) - printf("DSO: %s\n", unique_dso->long_name); - else { - int max_dso_width = winsize.ws_col - sym_width - 29; - if (dso_width > max_dso_width) - dso_width = max_dso_width; - putchar('\n'); - } + max_dso_width = winsize.ws_col - sym_width - 29; + if (dso_width > max_dso_width) + dso_width = max_dso_width; + putchar('\n'); if (nr_counters == 1) printf(" samples pcnt"); else @@ -568,17 +558,13 @@ static void print_sym_table(void) if (verbose) printf(" RIP "); - printf(" %-*.*s", sym_width, sym_width, "function"); - if (!unique_dso) - printf(" DSO"); - putchar('\n'); + printf(" %-*.*s DSO\n", sym_width, sym_width, "function"); printf(" %s _______ _____", nr_counters == 1 ? " " : "______"); if (verbose) printf(" ________________"); printf(" %-*.*s", sym_width, sym_width, graph_line); - if (!unique_dso) - printf(" %-*.*s", dso_width, dso_width, graph_line); + printf(" %-*.*s", dso_width, dso_width, graph_line); puts("\n"); for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { @@ -603,12 +589,10 @@ static void print_sym_table(void) if (verbose) printf(" %016llx", sym->start); printf(" %-*.*s", sym_width, sym_width, sym->name); - if (!unique_dso) - printf(" %-*.*s", dso_width, dso_width, - dso_width >= syme->map->dso->long_name_len ? - syme->map->dso->long_name : - syme->map->dso->short_name); - printf("\n"); + printf(" %-*.*s\n", dso_width, dso_width, + dso_width >= syme->map->dso->long_name_len ? + syme->map->dso->long_name : + syme->map->dso->short_name); } } From b32d133aec5dc882cf783a293f393bfb3f4379e1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 24 Nov 2009 12:05:15 -0200 Subject: [PATCH 380/470] perf symbols: Simplify symbol machinery setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And also express its configuration toggles via a struct. Now all one has to do is to call symbol__init(NULL) if the defaults are OK, or pass a struct symbol_conf pointer with the desired configuration. If a tool uses kernel_maps__find_symbol() to look at the kernel and modules mappings for a symbol but didn't call symbol__init() first, that will generate a one time warning too, alerting the subcommand developer that symbol__init() must be called. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259071517-3242-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 20 ++++++++++---------- tools/perf/builtin-kmem.c | 2 +- tools/perf/builtin-report.c | 15 ++++++++------- tools/perf/builtin-sched.c | 2 +- tools/perf/builtin-top.c | 24 +++++++++++++----------- tools/perf/builtin-trace.c | 2 +- tools/perf/util/data_map.c | 8 -------- tools/perf/util/data_map.h | 2 -- tools/perf/util/header.c | 6 ------ tools/perf/util/include/asm/bug.h | 22 ++++++++++++++++++++++ tools/perf/util/symbol.c | 31 +++++++++++++++++++++---------- tools/perf/util/symbol.h | 11 ++++++++--- 12 files changed, 85 insertions(+), 60 deletions(-) create mode 100644 tools/perf/util/include/asm/bug.h diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 59b6123abec..cd97c2b1cc3 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -34,11 +34,9 @@ static int input; static int full_paths; static int print_line; -static bool use_modules; static unsigned long page_size; static unsigned long mmap_window = 32; -const char *vmlinux_name; struct sym_hist { u64 sum; @@ -56,6 +54,11 @@ struct sym_priv { struct sym_ext *ext; }; +static struct symbol_conf symbol_conf = { + .priv_size = sizeof(struct sym_priv), + .try_vmlinux_path = true, +}; + static const char *sym_hist_filter; static int symbol_filter(struct map *map __used, struct symbol *sym) @@ -586,11 +589,6 @@ static int __cmd_annotate(void) exit(0); } - if (kernel_maps__init(vmlinux_name, true, use_modules) < 0) { - pr_err("failed to create kernel maps for symbol resolution\b"); - return -1; - } - remap: buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, MAP_SHARED, input, offset); @@ -691,8 +689,9 @@ static const struct option options[] = { "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), - OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), - OPT_BOOLEAN('m', "modules", &use_modules, + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, + "file", "vmlinux pathname"), + OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('l', "print-line", &print_line, "print matching source lines (may be slow)"), @@ -718,7 +717,8 @@ static void setup_sorting(void) int cmd_annotate(int argc, const char **argv, const char *prefix __used) { - symbol__init(sizeof(struct sym_priv)); + if (symbol__init(&symbol_conf) < 0) + return -1; page_size = getpagesize(); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 173d6db42ec..330dbc762f9 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -412,7 +412,7 @@ static int read_events(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0, + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 1826be719b5..0ee3d05a040 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -39,7 +39,6 @@ static char *dso_list_str, *comm_list_str, *sym_list_str, static struct strlist *dso_list, *comm_list, *sym_list; static int force; -static bool use_modules; static int full_paths; static int show_nr_samples; @@ -53,12 +52,13 @@ static char *pretty_printing_style = default_pretty_printing_style; static int exclude_other = 1; static char callchain_default_opt[] = "fractal,0.5"; -const char *vmlinux_name; static struct perf_header *header; static u64 sample_type; +struct symbol_conf symbol_conf; + static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) @@ -865,8 +865,7 @@ static int __cmd_report(void) register_perf_file_handler(&file_handler); - ret = mmap_dispatch_perf_file(&header, input_name, vmlinux_name, - !vmlinux_name, force, + ret = mmap_dispatch_perf_file(&header, input_name, force, full_paths, &cwdlen, &cwd); if (ret) return ret; @@ -963,9 +962,10 @@ static const struct option options[] = { "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), - OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, + "file", "vmlinux pathname"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), - OPT_BOOLEAN('m', "modules", &use_modules, + OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, "Show a column with the number of samples"), @@ -1035,7 +1035,8 @@ static void setup_list(struct strlist **list, const char *list_str, int cmd_report(int argc, const char **argv, const char *prefix __used) { - symbol__init(0); + if (symbol__init(&symbol_conf) < 0) + return -1; argc = parse_options(argc, argv, options, report_usage, 0); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 260f57a72ee..dbf089b12de 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1718,7 +1718,7 @@ static int read_events(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, NULL, false, 0, 0, + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b9a321fd184..a21247543fc 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -79,7 +79,7 @@ static int dump_symtab = 0; static bool hide_kernel_symbols = false; static bool hide_user_symbols = false; static struct winsize winsize; -const char *vmlinux_name; +struct symbol_conf symbol_conf; /* * Source @@ -128,7 +128,7 @@ struct sym_entry { static inline struct symbol *sym_entry__symbol(struct sym_entry *self) { - return ((void *)self) + symbol__priv_size; + return ((void *)self) + symbol_conf.priv_size; } static void get_term_dimensions(struct winsize *ws) @@ -695,7 +695,7 @@ static void print_mapped_keys(void) fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); - if (vmlinux_name) { + if (symbol_conf.vmlinux_name) { fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); fprintf(stdout, "\t[S] stop annotation.\n"); @@ -732,7 +732,7 @@ static int key_mapped(int c) case 'F': case 's': case 'S': - return vmlinux_name ? 1 : 0; + return symbol_conf.vmlinux_name ? 1 : 0; default: break; } @@ -1261,7 +1261,8 @@ static const struct option options[] = { "system-wide collection from all CPUs"), OPT_INTEGER('C', "CPU", &profile_cpu, "CPU to profile on"), - OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"), + OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, + "file", "vmlinux pathname"), OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols, "hide kernel symbols"), OPT_INTEGER('m', "mmap-pages", &mmap_pages, @@ -1295,7 +1296,7 @@ static const struct option options[] = { int cmd_top(int argc, const char **argv, const char *prefix __used) { - int counter, err; + int counter; page_size = sysconf(_SC_PAGE_SIZE); @@ -1313,15 +1314,16 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) if (!nr_counters) nr_counters = 1; - symbol__init(sizeof(struct sym_entry) + - (nr_counters + 1) * sizeof(unsigned long)); + symbol_conf.priv_size = (sizeof(struct sym_entry) + + (nr_counters + 1) * sizeof(unsigned long)); + if (symbol_conf.vmlinux_name == NULL) + symbol_conf.try_vmlinux_path = true; + if (symbol__init(&symbol_conf) < 0) + return -1; if (delay_secs < 1) delay_secs = 1; - err = kernel_maps__init(vmlinux_name, !vmlinux_name, true); - if (err < 0) - return err; parse_source(sym_filter_entry); /* diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index b71198e5dc1..75972fd073d 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -131,7 +131,7 @@ static int __cmd_trace(void) register_idle_thread(); register_perf_file_handler(&file_handler); - return mmap_dispatch_perf_file(&header, input_name, NULL, false, + return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd); } diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index f318d19b256..b238462b898 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -101,8 +101,6 @@ out: int mmap_dispatch_perf_file(struct perf_header **pheader, const char *input_name, - const char *vmlinux_name, - bool try_vmlinux_path, int force, int full_paths, int *cwdlen, @@ -172,12 +170,6 @@ int mmap_dispatch_perf_file(struct perf_header **pheader, curr_handler->sample_type_check(sample_type) < 0) goto out_delete; - err = -ENOMEM; - if (kernel_maps__init(vmlinux_name, try_vmlinux_path, true) < 0) { - pr_err("failed to setup the kernel maps to resolve symbols\n"); - goto out_delete; - } - if (!full_paths) { if (getcwd(__cwd, sizeof(__cwd)) == NULL) { pr_err("failed to get the current directory\n"); diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index 3f0d21b3819..ae036ecd762 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -23,8 +23,6 @@ struct perf_file_handler { void register_perf_file_handler(struct perf_file_handler *handler); int mmap_dispatch_perf_file(struct perf_header **pheader, const char *input_name, - const char *vmlinux_name, - bool try_vmlinux_path, int force, int full_paths, int *cwdlen, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1332f8ec04a..271a1600e6f 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -253,12 +253,6 @@ static int perf_header__adds_write(struct perf_header *self, int fd) buildid_sec = &feat_sec[idx++]; - /* - * Read the kernel buildid nad the list of loaded modules with - * its build_ids: - */ - kernel_maps__init(NULL, false, true); - /* Write build-ids */ buildid_sec->offset = lseek(fd, 0, SEEK_CUR); err = dsos__write_buildid_table(fd); diff --git a/tools/perf/util/include/asm/bug.h b/tools/perf/util/include/asm/bug.h new file mode 100644 index 00000000000..7fcc6810adc --- /dev/null +++ b/tools/perf/util/include/asm/bug.h @@ -0,0 +1,22 @@ +#ifndef _PERF_ASM_GENERIC_BUG_H +#define _PERF_ASM_GENERIC_BUG_H + +#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0) + +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN_ONCE(condition, format...) ({ \ + static int __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN(!__warned, format)) \ + __warned = 1; \ + unlikely(__ret_warn_once); \ +}) +#endif diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 44d81d5ae8c..c4ca974b36e 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -6,6 +6,7 @@ #include "debug.h" +#include #include #include #include @@ -37,6 +38,11 @@ unsigned int symbol__priv_size; static int vmlinux_path__nr_entries; static char **vmlinux_path; +static struct symbol_conf symbol_conf__defaults = { + .use_modules = true, + .try_vmlinux_path = true, +}; + static struct rb_root kernel_maps; static void dso__fixup_sym_end(struct dso *self) @@ -1166,7 +1172,9 @@ struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp, if (map) { ip = map->map_ip(map, ip); return map__find_symbol(map, ip, filter); - } + } else + WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps), + "Empty kernel_maps, was symbol__init() called?\n"); return NULL; } @@ -1485,9 +1493,9 @@ size_t dsos__fprintf_buildid(FILE *fp) return ret; } -static int kernel_maps__create_kernel_map(const char *vmlinux_name) +static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) { - struct dso *kernel = dso__new(vmlinux_name ?: "[kernel.kallsyms]"); + struct dso *kernel = dso__new(conf->vmlinux_name ?: "[kernel.kallsyms]"); if (kernel == NULL) return -1; @@ -1577,18 +1585,21 @@ out_fail: return -1; } -int kernel_maps__init(const char *vmlinux_name, bool try_vmlinux_path, - bool use_modules) +static int kernel_maps__init(const struct symbol_conf *conf) { - if (try_vmlinux_path && vmlinux_path__init() < 0) + const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults; + + symbol__priv_size = pconf->priv_size; + + if (pconf->try_vmlinux_path && vmlinux_path__init() < 0) return -1; - if (kernel_maps__create_kernel_map(vmlinux_name) < 0) { + if (kernel_maps__create_kernel_map(pconf) < 0) { vmlinux_path__exit(); return -1; } - if (use_modules && kernel_maps__create_module_maps() < 0) + if (pconf->use_modules && kernel_maps__create_module_maps() < 0) pr_debug("Failed to load list of modules in use, " "continuing...\n"); /* @@ -1598,8 +1609,8 @@ int kernel_maps__init(const char *vmlinux_name, bool try_vmlinux_path, return 0; } -void symbol__init(unsigned int priv_size) +int symbol__init(struct symbol_conf *conf) { elf_version(EV_CURRENT); - symbol__priv_size = priv_size; + return kernel_maps__init(conf); } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 8c4d026e067..5538691494a 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -49,6 +49,13 @@ struct symbol { char name[0]; }; +struct symbol_conf { + unsigned short priv_size; + bool try_vmlinux_path, + use_modules; + const char *vmlinux_name; +}; + extern unsigned int symbol__priv_size; static inline void *symbol__priv(struct symbol *self) @@ -93,11 +100,9 @@ int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool dsos__read_build_ids(void); int build_id__sprintf(u8 *self, int len, char *bf); -int kernel_maps__init(const char *vmlinux_name, bool try_vmlinux_path, - bool use_modules); size_t kernel_maps__fprintf(FILE *fp); -void symbol__init(unsigned int priv_size); +int symbol__init(struct symbol_conf *conf); extern struct list_head dsos; extern struct map *kernel_map; From 364794845cbc49e638b83d7ef739524291e1e961 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 24 Nov 2009 12:05:16 -0200 Subject: [PATCH 381/470] perf tools: Introduce zalloc() for the common calloc(1, N) case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we type less characters and it looks more like the kzalloc kernel counterpart. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259071517-3242-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/bench/mem-memcpy.c | 4 ++-- tools/perf/builtin-help.c | 4 ++-- tools/perf/builtin-probe.c | 4 ++-- tools/perf/builtin-sched.c | 14 +++++--------- tools/perf/builtin-top.c | 2 +- tools/perf/util/header.c | 2 +- tools/perf/util/parse-events.c | 2 +- tools/perf/util/symbol.c | 11 +++++------ tools/perf/util/thread.c | 2 +- tools/perf/util/util.h | 5 +++++ 10 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 5165fd1d8d2..89773178e89 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -127,11 +127,11 @@ int bench_mem_memcpy(int argc, const char **argv, return 1; } - dst = calloc(length, sizeof(char)); + dst = zalloc(length); if (!dst) die("memory allocation failed - maybe length is too large?\n"); - src = calloc(length, sizeof(char)); + src = zalloc(length); if (!src) die("memory allocation failed - maybe length is too large?\n"); diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 768f9c82631..9f810b17c25 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -179,7 +179,7 @@ static void add_man_viewer(const char *name) while (*p) p = &((*p)->next); - *p = calloc(1, (sizeof(**p) + len + 1)); + *p = zalloc(sizeof(**p) + len + 1); strncpy((*p)->name, name, len); } @@ -194,7 +194,7 @@ static void do_add_man_viewer_info(const char *name, size_t len, const char *value) { - struct man_viewer_info_list *new = calloc(1, sizeof(*new) + len + 1); + struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1); strncpy(new->name, name, len); new->info = strdup(value); diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index d78a3d94549..a2f6daf01ec 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -309,9 +309,9 @@ static int synthesize_probe_event(struct probe_point *pp) { char *buf; int i, len, ret; - pp->probes[0] = buf = (char *)calloc(MAX_CMDLEN, sizeof(char)); + pp->probes[0] = buf = zalloc(MAX_CMDLEN); if (!buf) - die("Failed to allocate memory by calloc."); + die("Failed to allocate memory by zalloc."); ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); if (ret <= 0 || ret >= MAX_CMDLEN) goto error; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index dbf089b12de..19eb708a706 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -225,7 +225,7 @@ static void calibrate_sleep_measurement_overhead(void) static struct sched_atom * get_new_event(struct task_desc *task, u64 timestamp) { - struct sched_atom *event = calloc(1, sizeof(*event)); + struct sched_atom *event = zalloc(sizeof(*event)); unsigned long idx = task->nr_events; size_t size; @@ -293,7 +293,7 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, return; } - wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem)); + wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); sem_init(wakee_event->wait_sem, 0, 0); wakee_event->specific_wait = 1; event->wait_sem = wakee_event->wait_sem; @@ -323,7 +323,7 @@ static struct task_desc *register_pid(unsigned long pid, const char *comm) if (task) return task; - task = calloc(1, sizeof(*task)); + task = zalloc(sizeof(*task)); task->pid = pid; task->nr = nr_tasks; strcpy(task->comm, comm); @@ -962,9 +962,7 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, static void thread_atoms_insert(struct thread *thread) { - struct work_atoms *atoms; - - atoms = calloc(sizeof(*atoms), 1); + struct work_atoms *atoms = zalloc(sizeof(*atoms)); if (!atoms) die("No memory"); @@ -996,9 +994,7 @@ add_sched_out_event(struct work_atoms *atoms, char run_state, u64 timestamp) { - struct work_atom *atom; - - atom = calloc(sizeof(*atom), 1); + struct work_atom *atom = zalloc(sizeof(*atom)); if (!atom) die("Non memory"); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a21247543fc..4c8653a86aa 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -181,7 +181,7 @@ static void parse_source(struct sym_entry *syme) return; if (syme->src == NULL) { - syme->src = calloc(1, sizeof(*source)); + syme->src = zalloc(sizeof(*source)); if (syme->src == NULL) return; pthread_mutex_init(&syme->src->lock, NULL); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 271a1600e6f..4b586569bb0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -63,7 +63,7 @@ int perf_header_attr__add_id(struct perf_header_attr *self, u64 id) */ struct perf_header *perf_header__new(void) { - struct perf_header *self = calloc(sizeof(*self), 1); + struct perf_header *self = zalloc(sizeof(*self)); if (self != NULL) { self->size = 1; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 07002746927..9e5dbd66d34 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -197,7 +197,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) if (id == config) { closedir(evt_dir); closedir(sys_dir); - path = calloc(1, sizeof(path)); + path = zalloc(sizeof(path)); path->system = malloc(MAX_EVENT_LENGTH); if (!path->system) { free(path); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index c4ca974b36e..8db85b4f553 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -94,15 +94,14 @@ static void kernel_maps__fixup_end(void) static struct symbol *symbol__new(u64 start, u64 len, const char *name) { size_t namelen = strlen(name) + 1; - struct symbol *self = calloc(1, (symbol__priv_size + - sizeof(*self) + namelen)); - if (!self) + struct symbol *self = zalloc(symbol__priv_size + + sizeof(*self) + namelen); + if (self == NULL) return NULL; - if (symbol__priv_size) { - memset(self, 0, symbol__priv_size); + if (symbol__priv_size) self = ((void *)self) + symbol__priv_size; - } + self->start = start; self->end = len ? start + len - 1 : start; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 0f6d78c9863..1796625f778 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -11,7 +11,7 @@ static struct thread *last_match; static struct thread *thread__new(pid_t pid) { - struct thread *self = calloc(1, sizeof(*self)); + struct thread *self = zalloc(sizeof(*self)); if (self != NULL) { self->pid = pid; diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index e1c623e0c99..30c5517f2f9 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -302,6 +302,11 @@ extern int xdup(int fd); extern FILE *xfdopen(int fd, const char *mode); extern int xmkstemp(char *template); +static inline void *zalloc(size_t size) +{ + return calloc(1, size); +} + static inline size_t xsize_t(off_t len) { return (size_t)len; From 727dad10c17cbaade3cb6a56bd4863a4630f4d13 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 24 Nov 2009 12:05:17 -0200 Subject: [PATCH 382/470] perf tools: Remove unused wrapper routines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And also make xrealloc and xmalloc weak symbols so that we don't have this problem: /usr/lib/gcc/x86_64-redhat-linux/4.4.1/../../../../lib64/libiberty.a(xmalloc.o): In function `xrealloc': (.text+0xc0): multiple definition of `xrealloc' libperf.a(wrapper.o):/home/acme_unencrypted/git/linux-2.6-tip/tools/perf/util/wrapper.c:67: first defined here collect2: ld returned 1 exit status Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259071517-3242-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/util.h | 11 ++----- tools/perf/util/wrapper.c | 61 ++------------------------------------- 2 files changed, 4 insertions(+), 68 deletions(-) diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 30c5517f2f9..c673d882588 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -290,17 +290,10 @@ static inline char *gitstrchrnul(const char *s, int c) * Wrappers: */ extern char *xstrdup(const char *str); -extern void *xmalloc(size_t size); +extern void *xmalloc(size_t size) __attribute__((weak)); extern void *xmemdupz(const void *data, size_t len); extern char *xstrndup(const char *str, size_t len); -extern void *xrealloc(void *ptr, size_t size); -extern void *xcalloc(size_t nmemb, size_t size); -extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset); -extern ssize_t xread(int fd, void *buf, size_t len); -extern ssize_t xwrite(int fd, const void *buf, size_t len); -extern int xdup(int fd); -extern FILE *xfdopen(int fd, const char *mode); -extern int xmkstemp(char *template); +extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); static inline void *zalloc(size_t size) { diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c index 4574ac28396..bf44ca85d23 100644 --- a/tools/perf/util/wrapper.c +++ b/tools/perf/util/wrapper.c @@ -79,43 +79,12 @@ void *xrealloc(void *ptr, size_t size) return ret; } -void *xcalloc(size_t nmemb, size_t size) -{ - void *ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) { - release_pack_memory(nmemb * size, -1); - ret = calloc(nmemb, size); - if (!ret && (!nmemb || !size)) - ret = calloc(1, 1); - if (!ret) - die("Out of memory, calloc failed"); - } - return ret; -} - -void *xmmap(void *start, size_t length, - int prot, int flags, int fd, off_t offset) -{ - void *ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) { - if (!length) - return NULL; - release_pack_memory(length, fd); - ret = mmap(start, length, prot, flags, fd, offset); - if (ret == MAP_FAILED) - die("Out of memory? mmap failed: %s", strerror(errno)); - } - return ret; -} - /* * xread() is the same a read(), but it automatically restarts read() * operations with a recoverable error (EAGAIN and EINTR). xread() * DOES NOT GUARANTEE that "len" bytes is read even if the data is available. */ -ssize_t xread(int fd, void *buf, size_t len) +static ssize_t xread(int fd, void *buf, size_t len) { ssize_t nr; while (1) { @@ -131,7 +100,7 @@ ssize_t xread(int fd, void *buf, size_t len) * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT * GUARANTEE that "len" bytes is written even if the operation is successful. */ -ssize_t xwrite(int fd, const void *buf, size_t len) +static ssize_t xwrite(int fd, const void *buf, size_t len) { ssize_t nr; while (1) { @@ -179,29 +148,3 @@ ssize_t write_in_full(int fd, const void *buf, size_t count) return total; } - -int xdup(int fd) -{ - int ret = dup(fd); - if (ret < 0) - die("dup failed: %s", strerror(errno)); - return ret; -} - -FILE *xfdopen(int fd, const char *mode) -{ - FILE *stream = fdopen(fd, mode); - if (stream == NULL) - die("Out of memory? fdopen failed: %s", strerror(errno)); - return stream; -} - -int xmkstemp(char *template) -{ - int fd; - - fd = mkstemp(template); - if (fd < 0) - die("Unable to create temporary file: %s", strerror(errno)); - return fd; -} From fcf1203a919c3a3d212c0ed01f5240fd592bf5ae Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 24 Nov 2009 13:01:52 -0200 Subject: [PATCH 383/470] perf symbols: Rename find_symbol routines to find_function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Paving the way for supporting variable in adition to function symbols. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259074912-5924-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 +-- tools/perf/builtin-kmem.c | 3 +- tools/perf/builtin-report.c | 8 ++--- tools/perf/builtin-top.c | 4 +-- tools/perf/util/event.h | 7 ++-- tools/perf/util/map.c | 14 ++++---- tools/perf/util/symbol.c | 68 +++++++++++++++++++---------------- tools/perf/util/symbol.h | 6 ++-- tools/perf/util/thread.h | 4 +-- 9 files changed, 62 insertions(+), 56 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index cd97c2b1cc3..18ac5eaefc3 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -162,7 +162,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (event->header.misc & PERF_RECORD_MISC_KERNEL) { level = 'k'; - sym = kernel_maps__find_symbol(ip, &map, symbol_filter); + sym = kernel_maps__find_function(ip, &map, symbol_filter); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { @@ -171,7 +171,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (map != NULL) { got_map: ip = map->map_ip(map, ip); - sym = map__find_symbol(map, ip, symbol_filter); + sym = map__find_function(map, ip, symbol_filter); } else { /* * If this is outside of all known maps, diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 330dbc762f9..35722fafc4d 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -445,8 +445,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) if (is_caller) { addr = data->call_site; if (!raw_ip) - sym = kernel_maps__find_symbol(addr, - NULL, NULL); + sym = kernel_maps__find_function(addr, NULL, NULL); } else addr = data->ptr; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 0ee3d05a040..e4b1004e76e 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -448,14 +448,14 @@ got_map: * trick of looking in the whole kernel symbol list. */ if ((long long)ip < 0) - return kernel_maps__find_symbol(ip, mapp, NULL); + return kernel_maps__find_function(ip, mapp, NULL); } dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - return map ? map__find_symbol(map, ip, NULL) : NULL; + return map ? map__find_function(map, ip, NULL) : NULL; } static int call__match(struct symbol *sym) @@ -495,7 +495,7 @@ static struct symbol **resolve_callchain(struct thread *thread, case PERF_CONTEXT_HV: break; case PERF_CONTEXT_KERNEL: - sym = kernel_maps__find_symbol(ip, NULL, NULL); + sym = kernel_maps__find_function(ip, NULL, NULL); break; default: sym = resolve_symbol(thread, NULL, &ip); @@ -715,7 +715,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) if (cpumode == PERF_RECORD_MISC_KERNEL) { level = 'k'; - sym = kernel_maps__find_symbol(ip, &map, NULL); + sym = kernel_maps__find_function(ip, &map, NULL); dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); } else if (cpumode == PERF_RECORD_MISC_USER) { diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 4c8653a86aa..ded6cf65ad9 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -948,7 +948,7 @@ static void event__process_sample(const event_t *self, int counter) map = thread__find_map(thread, ip); if (map != NULL) { ip = map->map_ip(map, ip); - sym = map__find_symbol(map, ip, symbol_filter); + sym = map__find_function(map, ip, symbol_filter); if (sym == NULL) return; userspace_samples++; @@ -968,7 +968,7 @@ static void event__process_sample(const event_t *self, int counter) if (hide_kernel_symbols) return; - sym = kernel_maps__find_symbol(ip, &map, symbol_filter); + sym = kernel_maps__find_function(ip, &map, symbol_filter); if (sym == NULL) return; break; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index f1e39261265..882a9531db9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -119,9 +119,10 @@ void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -struct symbol *map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter); -void map__fixup_start(struct map *self); -void map__fixup_end(struct map *self); +struct symbol *map__find_function(struct map *self, u64 ip, + symbol_filter_t filter); +void map__fixup_start(struct map *self, struct rb_root *symbols); +void map__fixup_end(struct map *self, struct rb_root *symbols); int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); void event__synthesize_threads(int (*process)(event_t *event)); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 09412321a80..41c5c4a2001 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -80,18 +80,18 @@ void map__delete(struct map *self) free(self); } -void map__fixup_start(struct map *self) +void map__fixup_start(struct map *self, struct rb_root *symbols) { - struct rb_node *nd = rb_first(&self->dso->syms); + struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->start = sym->start; } } -void map__fixup_end(struct map *self) +void map__fixup_end(struct map *self, struct rb_root *symbols) { - struct rb_node *nd = rb_last(&self->dso->syms); + struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->end = sym->end; @@ -100,8 +100,8 @@ void map__fixup_end(struct map *self) #define DSO__DELETED "(deleted)" -struct symbol * -map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) +struct symbol *map__find_function(struct map *self, u64 ip, + symbol_filter_t filter) { if (!self->dso->loaded) { int nr = dso__load(self->dso, self, filter); @@ -136,7 +136,7 @@ map__find_symbol(struct map *self, u64 ip, symbol_filter_t filter) } } - return self->dso->find_symbol(self->dso, ip); + return self->dso->find_function(self->dso, ip); } struct map *map__clone(struct map *self) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8db85b4f553..4ed379b915f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -45,9 +45,9 @@ static struct symbol_conf symbol_conf__defaults = { static struct rb_root kernel_maps; -static void dso__fixup_sym_end(struct dso *self) +static void symbols__fixup_end(struct rb_root *self) { - struct rb_node *nd, *prevnd = rb_first(&self->syms); + struct rb_node *nd, *prevnd = rb_first(self); struct symbol *curr, *prev; if (prevnd == NULL) @@ -144,8 +144,8 @@ struct dso *dso__new(const char *name) strcpy(self->name, name); dso__set_long_name(self, self->name); self->short_name = self->name; - self->syms = RB_ROOT; - self->find_symbol = dso__find_symbol; + self->functions = RB_ROOT; + self->find_function = dso__find_function; self->slen_calculated = 0; self->origin = DSO__ORIG_NOT_FOUND; self->loaded = 0; @@ -155,22 +155,22 @@ struct dso *dso__new(const char *name) return self; } -static void dso__delete_symbols(struct dso *self) +static void symbols__delete(struct rb_root *self) { struct symbol *pos; - struct rb_node *next = rb_first(&self->syms); + struct rb_node *next = rb_first(self); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, &self->syms); + rb_erase(&pos->rb_node, self); symbol__delete(pos); } } void dso__delete(struct dso *self) { - dso__delete_symbols(self); + symbols__delete(&self->functions); if (self->long_name != self->name) free(self->long_name); free(self); @@ -182,9 +182,9 @@ void dso__set_build_id(struct dso *self, void *build_id) self->has_build_id = 1; } -static void dso__insert_symbol(struct dso *self, struct symbol *sym) +static void symbols__insert(struct rb_root *self, struct symbol *sym) { - struct rb_node **p = &self->syms.rb_node; + struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; const u64 ip = sym->start; struct symbol *s; @@ -198,17 +198,17 @@ static void dso__insert_symbol(struct dso *self, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, &self->syms); + rb_insert_color(&sym->rb_node, self); } -struct symbol *dso__find_symbol(struct dso *self, u64 ip) +static struct symbol *symbols__find(struct rb_root *self, u64 ip) { struct rb_node *n; if (self == NULL) return NULL; - n = self->syms.rb_node; + n = self->rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -224,6 +224,11 @@ struct symbol *dso__find_symbol(struct dso *self, u64 ip) return NULL; } +struct symbol *dso__find_function(struct dso *self, u64 ip) +{ + return symbols__find(&self->functions, ip); +} + int build_id__sprintf(u8 *self, int len, char *bf) { char *bid = bf; @@ -253,9 +258,9 @@ size_t dso__fprintf(struct dso *self, FILE *fp) size_t ret = fprintf(fp, "dso: %s (", self->short_name); ret += dso__fprintf_buildid(self, fp); - ret += fprintf(fp, ")\n"); + ret += fprintf(fp, ")\nFunctions:\n"); - for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->functions); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } @@ -320,7 +325,7 @@ static int kernel_maps__load_all_kallsyms(void) * kernel_maps__split_kallsyms, when we have split the * maps per module */ - dso__insert_symbol(kernel_map->dso, sym); + symbols__insert(&kernel_map->dso->functions, sym); } free(line); @@ -344,7 +349,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) struct map *map = kernel_map; struct symbol *pos; int count = 0; - struct rb_node *next = rb_first(&kernel_map->dso->syms); + struct rb_node *next = rb_first(&kernel_map->dso->functions); int kernel_range = 0; while (next) { @@ -394,12 +399,13 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) } if (filter && filter(map, pos)) { - rb_erase(&pos->rb_node, &kernel_map->dso->syms); + rb_erase(&pos->rb_node, &kernel_map->dso->functions); symbol__delete(pos); } else { if (map != kernel_map) { - rb_erase(&pos->rb_node, &kernel_map->dso->syms); - dso__insert_symbol(map->dso, pos); + rb_erase(&pos->rb_node, + &kernel_map->dso->functions); + symbols__insert(&map->dso->functions, pos); } count++; } @@ -414,7 +420,7 @@ static int kernel_maps__load_kallsyms(symbol_filter_t filter) if (kernel_maps__load_all_kallsyms()) return -1; - dso__fixup_sym_end(kernel_map->dso); + symbols__fixup_end(&kernel_map->dso->functions); kernel_map->dso->origin = DSO__ORIG_KERNEL; return kernel_maps__split_kallsyms(filter); @@ -485,7 +491,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, if (filter && filter(map, sym)) symbol__delete(sym); else { - dso__insert_symbol(self, sym); + symbols__insert(&self->functions, sym); nr_syms++; } } @@ -683,7 +689,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - dso__insert_symbol(self, f); + symbols__insert(&self->functions, f); ++nr; } } @@ -705,7 +711,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - dso__insert_symbol(self, f); + symbols__insert(&self->functions, f); ++nr; } } @@ -879,7 +885,7 @@ new_symbol: if (filter && filter(curr_map, f)) symbol__delete(f); else { - dso__insert_symbol(curr_dso, f); + symbols__insert(&curr_dso->functions, f); nr++; } } @@ -888,7 +894,7 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) - dso__fixup_sym_end(self); + symbols__fixup_end(&self->functions); err = nr; out_elf_end: elf_end(elf); @@ -1160,8 +1166,8 @@ static void kernel_maps__insert(struct map *map) maps__insert(&kernel_maps, map); } -struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp, - symbol_filter_t filter) +struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, + symbol_filter_t filter) { struct map *map = maps__find(&kernel_maps, ip); @@ -1170,7 +1176,7 @@ struct symbol *kernel_maps__find_symbol(u64 ip, struct map **mapp, if (map) { ip = map->map_ip(map, ip); - return map__find_symbol(map, ip, filter); + return map__find_function(map, ip, filter); } else WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps), "Empty kernel_maps, was symbol__init() called?\n"); @@ -1432,8 +1438,8 @@ do_kallsyms: if (err > 0) { out_fixup: - map__fixup_start(map); - map__fixup_end(map); + map__fixup_start(map, &map->dso->functions); + map__fixup_end(map, &map->dso->functions); } return err; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5538691494a..65846d0c5df 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -65,8 +65,8 @@ static inline void *symbol__priv(struct symbol *self) struct dso { struct list_head node; - struct rb_root syms; - struct symbol *(*find_symbol)(struct dso *, u64 ip); + struct rb_root functions; + struct symbol *(*find_function)(struct dso *, u64 ip); u8 adjust_symbols:1; u8 slen_calculated:1; u8 loaded:1; @@ -83,7 +83,7 @@ struct dso { struct dso *dso__new(const char *name); void dso__delete(struct dso *self); -struct symbol *dso__find_symbol(struct dso *self, u64 ip); +struct symbol *dso__find_function(struct dso *self, u64 ip); struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index e4b8d437725..74cba6487ed 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -26,8 +26,8 @@ size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); struct map *maps__find(struct rb_root *maps, u64 ip); -struct symbol *kernel_maps__find_symbol(const u64 ip, struct map **mapp, - symbol_filter_t filter); +struct symbol *kernel_maps__find_function(const u64 ip, struct map **mapp, + symbol_filter_t filter); struct map *kernel_maps__find_by_dso_name(const char *name); static inline struct map *thread__find_map(struct thread *self, u64 ip) From 1261a02a0c0ab8e643125705f0d1d83e5090e4d1 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 24 Nov 2009 05:27:18 -0800 Subject: [PATCH 384/470] perf_events, x86: Fix validate_event bug The validate_event() was failing on valid event combinations. The function was assuming that if x86_schedule_event() returned 0, it meant error. But x86_schedule_event() returns the counter index and 0 is a perfectly valid value. An error is returned if the function returns a negative value. Furthermore, validate_event() was also failing for event groups because the event->pmu was not set until after hw_perf_event_init(). Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: paulus@samba.org Cc: perfmon2-devel@lists.sourceforge.net Cc: eranian@gmail.com LKML-Reference: <4b0bdf36.1818d00a.07cc.25ae@mx.google.com> Signed-off-by: Ingo Molnar -- arch/x86/kernel/cpu/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) --- arch/x86/kernel/cpu/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index bd874302420..c1bbed1021d 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2229,10 +2229,10 @@ validate_event(struct cpu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event fake_event = event->hw; - if (event->pmu != &pmu) + if (event->pmu && event->pmu != &pmu) return 0; - return x86_schedule_event(cpuc, &fake_event); + return x86_schedule_event(cpuc, &fake_event) >= 0; } static int validate_group(struct perf_event *event) From fe6126722718e51fba4879517c11ac12d9775bcc Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 24 Nov 2009 20:38:22 +0100 Subject: [PATCH 385/470] perf_events: Fix bad software/trace event recursion counting Commit 4ed7c92d68a5387ba5f7030dc76eab03558e27f5 (perf_events: Undo some recursion damage) has introduced a bad reference counting of the recursion context. putting the context behaves like getting it, dropping every software/trace events after the first one in a context. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Arjan van de Ven Cc: Li Zefan Cc: Steven Rostedt LKML-Reference: <1259091502-5171-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index accfd7bfe38..35df94e344f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3914,7 +3914,7 @@ void perf_swevent_put_recursion_context(int rctx) { struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); barrier(); - cpuctx->recursion[rctx]++; + cpuctx->recursion[rctx]--; put_cpu_var(perf_cpu_context); } EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); From ff038f5c37c2070829004a0678372766c2b32180 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 18 Nov 2009 20:27:27 -0500 Subject: [PATCH 386/470] tracing: Create new TRACE_EVENT_TEMPLATE There are some places in the kernel that define several tracepoints and they are all identical besides the name. The code to enable, disable and record is created for every trace point even if most of the code is identical. This patch adds TRACE_EVENT_TEMPLATE that lets the developer create a template TRACE_EVENT and create trace points with DEFINE_EVENT, which is based off of a given template. Each trace point used by this will share most of the code, and bring down the size of the kernel when there are several duplicate events. Usage is: TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print); Which would be the same as defining a normal TRACE_EVENT. To create the trace events that the trace points will use: DEFINE_EVENT(template, name, proto, args) is done. The template is the name of the TRACE_EVENT_TEMPLATE to use. The name is the name of the trace point. The parameters proto and args must be the same as the proto and args of the template. If they are not the same, then a compile error will result. I tried hard removing this duplication but the C preprocessor is not powerful enough (or my CPP magic experience points is not at a high enough level) to not need them. A lot of trace events are coming in with new XFS development. Most of the trace points are identical except for the name. The following shows the advantage of having TRACE_EVENT_TEMPLATE: $ size fs/xfs/xfs.o.* text data bss dec hex filename 452114 2788 3520 458422 6feb6 fs/xfs/xfs.o.old 638482 38116 3744 680342 a6196 fs/xfs/xfs.o.template 996954 38116 4480 1039550 fdcbe fs/xfs/xfs.o.trace xfs.o.old is without any tracepoints. xfs.o.template uses the new TRACE_EVENT_TEMPLATE. xfs.o.trace uses the current TRACE_EVENT macros. Requested-by: Christoph Hellwig Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 4 + include/trace/define_trace.h | 6 ++ include/trace/ftrace.h | 149 +++++++++++++++++++++++++---------- 3 files changed, 117 insertions(+), 42 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 2aac8a83e89..88a5b5a809e 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -280,6 +280,10 @@ static inline void tracepoint_synchronize_unregister(void) * TRACE_EVENT_FN to perform any (un)registration work. */ +#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) +#define DEFINE_EVENT(template, name, proto, args) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) + #define TRACE_EVENT(name, proto, args, struct, assign, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FN(name, proto, args, struct, \ diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 2a4b3bf7403..244985814a4 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -31,6 +31,10 @@ assign, print, reg, unreg) \ DEFINE_TRACE_FN(name, reg, unreg) +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ + DEFINE_TRACE(name) + #undef DECLARE_TRACE #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name) @@ -63,6 +67,8 @@ #undef TRACE_EVENT #undef TRACE_EVENT_FN +#undef TRACE_EVENT_TEMPLATE +#undef DEFINE_EVENT #undef TRACE_HEADER_MULTI_READ /* Only undef what we defined in this file */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index c3417c13e3e..2969f65d800 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -18,6 +18,26 @@ #include +/* + * TRACE_EVENT_TEMPLATE can be used to add a generic function + * handlers for events. That is, if all events have the same + * parameters and just have distinct trace points. + * Each tracepoint can be defined with DEFINE_EVENT and that + * will map the TRACE_EVENT_TEMPLATE to the tracepoint. + * + * TRACE_EVENT is a one to one mapping between tracepoint and template. + */ +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ + TRACE_EVENT_TEMPLATE(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); + + #undef __field #define __field(type, item) type item; @@ -36,13 +56,15 @@ #undef TP_STRUCT__entry #define TP_STRUCT__entry(args...) args -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - struct ftrace_raw_##name { \ - struct trace_entry ent; \ - tstruct \ - char __data[0]; \ - }; \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) \ + struct ftrace_raw_##name { \ + struct trace_entry ent; \ + tstruct \ + char __data[0]; \ + }; +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ static struct ftrace_event_call event_##name #undef __cpparg @@ -89,12 +111,15 @@ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ struct ftrace_data_offsets_##call { \ tstruct; \ }; +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -170,8 +195,8 @@ #undef TP_perf_assign #define TP_perf_assign(args...) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \ static int \ ftrace_format_##call(struct ftrace_event_call *unused, \ struct trace_seq *s) \ @@ -186,6 +211,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ return ret; \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -255,10 +283,11 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ ftrace_print_symbols_seq(p, value, symbols); \ }) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ static enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ +ftrace_raw_output_id_##call(int event_id, const char *name, \ + struct trace_iterator *iter, int flags) \ { \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##call *field; \ @@ -268,7 +297,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ \ entry = iter->ent; \ \ - if (entry->type != event_##call.id) { \ + if (entry->type != event_id) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ @@ -277,14 +306,25 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ \ p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ - ret = trace_seq_printf(s, #call ": " print); \ + ret = trace_seq_printf(s, "%s: ", name); \ + if (ret) \ + ret = trace_seq_printf(s, print); \ put_cpu(); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ } - + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ +static enum print_line_t \ +ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ +{ \ + return ftrace_raw_output_id_##template(event_##name.id, \ + #name, iter, flags); \ +} + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef __field_ext @@ -318,8 +358,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \ static int \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ { \ @@ -335,6 +375,9 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ return ret; \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -361,10 +404,10 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ __data_size += (len) * sizeof(type); #undef __string -#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ +#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ static inline int ftrace_get_offsets_##call( \ struct ftrace_data_offsets_##call *__data_offsets, proto) \ { \ @@ -376,6 +419,9 @@ static inline int ftrace_get_offsets_##call( \ return __data_size; \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #ifdef CONFIG_EVENT_PROFILE @@ -397,19 +443,22 @@ static inline int ftrace_get_offsets_##call( \ * */ -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, name, proto, args) \ \ -static void ftrace_profile_##call(proto); \ +static void ftrace_profile_##name(proto); \ \ -static int ftrace_profile_enable_##call(struct ftrace_event_call *unused)\ +static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\ { \ - return register_trace_##call(ftrace_profile_##call); \ + return register_trace_##name(ftrace_profile_##name); \ } \ \ -static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\ +static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ { \ - unregister_trace_##call(ftrace_profile_##call); \ + unregister_trace_##name(ftrace_profile_##name); \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -550,15 +599,13 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *unused)\ #define __assign_str(dst, src) \ strcpy(__get_str(dst), src); -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ \ -static struct ftrace_event_call event_##call; \ - \ -static void ftrace_raw_event_##call(proto) \ +static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ + proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ - struct ftrace_event_call *event_call = &event_##call; \ struct ring_buffer_event *event; \ struct ftrace_raw_##call *entry; \ struct ring_buffer *buffer; \ @@ -572,7 +619,7 @@ static void ftrace_raw_event_##call(proto) \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ \ event = trace_current_buffer_lock_reserve(&buffer, \ - event_##call.id, \ + event_call->id, \ sizeof(*entry) + __data_size, \ irq_flags, pc); \ if (!event) \ @@ -587,6 +634,14 @@ static void ftrace_raw_event_##call(proto) \ if (!filter_current_check_discard(buffer, event_call, entry, event)) \ trace_nowake_buffer_unlock_commit(buffer, \ event, irq_flags, pc); \ +} + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ + \ +static void ftrace_raw_event_##call(proto) \ +{ \ + ftrace_raw_event_id_##template(&event_##call, args); \ } \ \ static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ @@ -630,8 +685,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .raw_init = ftrace_raw_init_event_##call, \ .regfunc = ftrace_raw_reg_event_##call, \ .unregfunc = ftrace_raw_unreg_event_##call, \ - .show_format = ftrace_format_##call, \ - .define_fields = ftrace_define_fields_##call, \ + .show_format = ftrace_format_##template, \ + .define_fields = ftrace_define_fields_##template, \ _TRACE_PROFILE_INIT(call) \ } @@ -719,14 +774,15 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef __perf_count #define __perf_count(c) __count = (c) -#undef TRACE_EVENT -#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ -static void ftrace_profile_##call(proto) \ +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +static void \ +ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ + proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ extern int perf_swevent_get_recursion_context(void); \ extern void perf_swevent_put_recursion_context(int rctx); \ - struct ftrace_event_call *event_call = &event_##call; \ extern void perf_tp_event(int, u64, u64, void *, int); \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ @@ -789,6 +845,15 @@ end_recursion: \ \ } +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ +static void ftrace_profile_##call(proto) \ +{ \ + struct ftrace_event_call *event_call = &event_##call; \ + \ + ftrace_profile_templ_##template(event_call, args); \ +} + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #endif /* CONFIG_EVENT_PROFILE */ From e5bc9721684e9412f3e0465222f317c362a8ab47 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 18 Nov 2009 20:36:26 -0500 Subject: [PATCH 387/470] tracing: Create new DEFINE_EVENT_PRINT After creating the TRACE_EVENT_TEMPLATE I started to look at other trace points to see what duplication was made. I noticed that there are several trace points where they are almost identical except for the name and the output format. Since TRACE_EVENT_TEMPLATE was successful in bringing down the size of trace events, I added a DEFINE_EVENT_PRINT. DEFINE_EVENT_PRINT is used just like DEFINE_EVENT is. That is, the DEFINE_EVENT_PRINT also uses a TRACE_EVENT_TEMPLATE, but it allows the developer to overwrite the print format. If there are two or more TRACE_EVENTS that are identical except for the name and print, then they can be converted to use a TRACE_EVENT_TEMPLATE. Since the TRACE_EVENT_TEMPLATE already does the print output, the first trace event would have its print format held in the TRACE_EVENT_TEMPLATE and be defined with a DEFINE_EVENT. The rest will use the DEFINE_EVENT_PRINT and override the print format. Converting the sched trace points to both DEFINE_EVENT and DEFINE_EVENT_PRINT. Five were converted to DEFINE_EVENT and two were converted to DEFINE_EVENT_PRINT. I was able to get the following: $ size kernel/sched.o-* text data bss dec hex filename 79299 6776 2520 88595 15a13 kernel/sched.o-notrace 101941 11896 2584 116421 1c6c5 kernel/sched.o-templ 104779 11896 2584 119259 1d1db kernel/sched.o-trace sched.o-notrace is the scheduler compiled with no trace points. sched.o-templ is with the use of DEFINE_EVENT and DEFINE_EVENT_PRINT sched.o-trace is the current trace events. Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 2 + include/trace/define_trace.h | 5 ++ include/trace/ftrace.h | 123 +++++++++++++++++++++++++++++++++-- 3 files changed, 126 insertions(+), 4 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 88a5b5a809e..7063383cca1 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -283,6 +283,8 @@ static inline void tracepoint_synchronize_unregister(void) #define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) #define DEFINE_EVENT(template, name, proto, args) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT(name, proto, args, struct, assign, print) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 244985814a4..5d7d855ae21 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -35,6 +35,10 @@ #define DEFINE_EVENT(template, name, proto, args) \ DEFINE_TRACE(name) +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_TRACE(name) + #undef DECLARE_TRACE #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name) @@ -69,6 +73,7 @@ #undef TRACE_EVENT_FN #undef TRACE_EVENT_TEMPLATE #undef DEFINE_EVENT +#undef DEFINE_EVENT_PRINT #undef TRACE_HEADER_MULTI_READ /* Only undef what we defined in this file */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 2969f65d800..b0461772bc8 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -67,6 +67,10 @@ #define DEFINE_EVENT(template, name, proto, args) \ static struct ftrace_event_call event_##name +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #undef __cpparg #define __cpparg(arg...) arg @@ -120,6 +124,10 @@ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -198,15 +206,28 @@ #undef TRACE_EVENT_TEMPLATE #define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \ static int \ -ftrace_format_##call(struct ftrace_event_call *unused, \ - struct trace_seq *s) \ +ftrace_format_setup_##call(struct ftrace_event_call *unused, \ + struct trace_seq *s) \ { \ struct ftrace_raw_##call field __attribute__((unused)); \ int ret = 0; \ \ tstruct; \ \ - trace_seq_printf(s, "\nprint fmt: " print); \ + return ret; \ +} \ + \ +static int \ +ftrace_format_##call(struct ftrace_event_call *unused, \ + struct trace_seq *s) \ +{ \ + int ret = 0; \ + \ + ret = ftrace_format_setup_##call(unused, s); \ + if (!ret) \ + return ret; \ + \ + ret = trace_seq_printf(s, "\nprint fmt: " print); \ \ return ret; \ } @@ -214,6 +235,23 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ +static int \ +ftrace_format_##name(struct ftrace_event_call *unused, \ + struct trace_seq *s) \ +{ \ + int ret = 0; \ + \ + ret = ftrace_format_setup_##template(unused, s); \ + if (!ret) \ + return ret; \ + \ + trace_seq_printf(s, "\nprint fmt: " print); \ + \ + return ret; \ +} + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -325,6 +363,38 @@ ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ #name, iter, flags); \ } +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ +static enum print_line_t \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ +{ \ + struct trace_seq *s = &iter->seq; \ + struct ftrace_raw_##template *field; \ + struct trace_entry *entry; \ + struct trace_seq *p; \ + int ret; \ + \ + entry = iter->ent; \ + \ + if (entry->type != event_##call.id) { \ + WARN_ON_ONCE(1); \ + return TRACE_TYPE_UNHANDLED; \ + } \ + \ + field = (typeof(field))entry; \ + \ + p = &get_cpu_var(ftrace_event_seq); \ + trace_seq_init(p); \ + ret = trace_seq_printf(s, "%s: ", #call); \ + if (ret) \ + ret = trace_seq_printf(s, print); \ + put_cpu(); \ + if (!ret) \ + return TRACE_TYPE_PARTIAL_LINE; \ + \ + return TRACE_TYPE_HANDLED; \ +} + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #undef __field_ext @@ -378,6 +448,10 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -422,6 +496,10 @@ static inline int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #ifdef CONFIG_EVENT_PROFILE @@ -461,6 +539,10 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ unregister_trace_##name(ftrace_profile_##name); \ } +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #endif @@ -674,7 +756,19 @@ static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ event_##call.id = id; \ INIT_LIST_HEAD(&event_##call.fields); \ return 0; \ -} \ +} + +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef TRACE_EVENT_TEMPLATE +#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) + +#undef DEFINE_EVENT +#define DEFINE_EVENT(template, call, proto, args) \ \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ @@ -690,6 +784,23 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ _TRACE_PROFILE_INIT(call) \ } +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ + \ +static struct ftrace_event_call __used \ +__attribute__((__aligned__(4))) \ +__attribute__((section("_ftrace_events"))) event_##call = { \ + .name = #call, \ + .system = __stringify(TRACE_SYSTEM), \ + .event = &ftrace_event_type_##call, \ + .raw_init = ftrace_raw_init_event_##call, \ + .regfunc = ftrace_raw_reg_event_##call, \ + .unregfunc = ftrace_raw_unreg_event_##call, \ + .show_format = ftrace_format_##call, \ + .define_fields = ftrace_define_fields_##template, \ + _TRACE_PROFILE_INIT(call) \ +} + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -854,6 +965,10 @@ static void ftrace_profile_##call(proto) \ ftrace_profile_templ_##template(event_call, args); \ } +#undef DEFINE_EVENT_PRINT +#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #endif /* CONFIG_EVENT_PROFILE */ From 75ec29ab848a7e92a41aaafaeb33d1afbc839be4 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 18 Nov 2009 20:48:08 -0500 Subject: [PATCH 388/470] tracing: Convert some sched trace events to DEFINE_EVENT and _PRINT Converting some of the scheduler trace events to use the TRACE_EVENT_TEMPLATE, DEFINE_EVENT and DEFINE_EVENT_PRINT helped to save some space: $ size kernel/sched.o-* text data bss dec hex filename 79299 6776 2520 88595 15a13 kernel/sched.o-notrace 101941 11896 2584 116421 1c6c5 kernel/sched.o-templ 104779 11896 2584 119259 1d1db kernel/sched.o-trace sched.o-notrace is without any tracepoints compiled sched.o-templ is with this patch sched.o-trace is the tracepoints before this patch The trace events converted to DEFINE_EVENT: sched_wakeup, sched_wakeup_new, sched_process_free, sched_process_exit, and sched_stat_wait. The trace events converted to DEFINE_EVENT_PRINT: sched_stat_sleep and sched_stat_iowait. Note, since the TRACE_EVENT_TEMPLATE always uses a print, the sched_stat_wait print format is defined in the template and this template is used by sched_stat_sleep and sched_stat_iowait. But the later two override the print format. Signed-off-by: Steven Rostedt --- include/trace/events/sched.h | 170 +++++++++++------------------------ 1 file changed, 52 insertions(+), 118 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index b50b9856c59..238f74b5848 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task, * (NOTE: the 'rq' argument is not used by generic trace events, * but used by the latency tracer plugin. ) */ -TRACE_EVENT(sched_wakeup, +TRACE_EVENT_TEMPLATE(sched_wakeup_template, TP_PROTO(struct rq *rq, struct task_struct *p, int success), @@ -110,38 +110,19 @@ TRACE_EVENT(sched_wakeup, __entry->success, __entry->target_cpu) ); +DEFINE_EVENT(sched_wakeup_template, sched_wakeup, + TP_PROTO(struct rq *rq, struct task_struct *p, int success), + TP_ARGS(rq, p, success)); + /* * Tracepoint for waking up a new task: * * (NOTE: the 'rq' argument is not used by generic trace events, * but used by the latency tracer plugin. ) */ -TRACE_EVENT(sched_wakeup_new, - - TP_PROTO(struct rq *rq, struct task_struct *p, int success), - - TP_ARGS(rq, p, success), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - __field( int, success ) - __field( int, target_cpu ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - __entry->success = success; - __entry->target_cpu = task_cpu(p); - ), - - TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", - __entry->comm, __entry->pid, __entry->prio, - __entry->success, __entry->target_cpu) -); +DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, + TP_PROTO(struct rq *rq, struct task_struct *p, int success), + TP_ARGS(rq, p, success)); /* * Tracepoint for task switches, performed by the scheduler: @@ -216,10 +197,7 @@ TRACE_EVENT(sched_migrate_task, __entry->orig_cpu, __entry->dest_cpu) ); -/* - * Tracepoint for freeing a task: - */ -TRACE_EVENT(sched_process_free, +TRACE_EVENT_TEMPLATE(sched_process_template, TP_PROTO(struct task_struct *p), @@ -241,30 +219,20 @@ TRACE_EVENT(sched_process_free, __entry->comm, __entry->pid, __entry->prio) ); +/* + * Tracepoint for freeing a task: + */ +DEFINE_EVENT(sched_process_template, sched_process_free, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + + /* * Tracepoint for a task exiting: */ -TRACE_EVENT(sched_process_exit, - - TP_PROTO(struct task_struct *p), - - TP_ARGS(p), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - ), - - TP_printk("comm=%s pid=%d prio=%d", - __entry->comm, __entry->pid, __entry->prio) -); +DEFINE_EVENT(sched_process_template, sched_process_exit, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); /* * Tracepoint for a waiting task: @@ -348,12 +316,7 @@ TRACE_EVENT(sched_signal_send, * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ - -/* - * Tracepoint for accounting wait time (time the task is runnable - * but not actually running due to scheduler contention). - */ -TRACE_EVENT(sched_stat_wait, +TRACE_EVENT_TEMPLATE(sched_stat_template, TP_PROTO(struct task_struct *tsk, u64 delay), @@ -379,6 +342,37 @@ TRACE_EVENT(sched_stat_wait, (unsigned long long)__entry->delay) ); + +/* + * Tracepoint for accounting wait time (time the task is runnable + * but not actually running due to scheduler contention). + */ +DEFINE_EVENT(sched_stat_template, sched_stat_wait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); + +/* + * Tracepoint for accounting sleep time (time the task is not runnable, + * including iowait, see below). + */ +DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_sleep, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay), + TP_printk("task: %s:%d sleep: %Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->delay)); + +/* + * Tracepoint for accounting iowait time (time the task is not runnable + * due to waiting on IO to complete). + */ +DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_iowait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay), + TP_printk("task: %s:%d iowait: %Lu [ns]", + __entry->comm, __entry->pid, + (unsigned long long)__entry->delay)); + /* * Tracepoint for accounting runtime (time the task is executing * on a CPU). @@ -412,66 +406,6 @@ TRACE_EVENT(sched_stat_runtime, (unsigned long long)__entry->vruntime) ); -/* - * Tracepoint for accounting sleep time (time the task is not runnable, - * including iowait, see below). - */ -TRACE_EVENT(sched_stat_sleep, - - TP_PROTO(struct task_struct *tsk, u64 delay), - - TP_ARGS(tsk, delay), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( u64, delay ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->delay = delay; - ) - TP_perf_assign( - __perf_count(delay); - ), - - TP_printk("comm=%s pid=%d delay=%Lu [ns]", - __entry->comm, __entry->pid, - (unsigned long long)__entry->delay) -); - -/* - * Tracepoint for accounting iowait time (time the task is not runnable - * due to waiting on IO to complete). - */ -TRACE_EVENT(sched_stat_iowait, - - TP_PROTO(struct task_struct *tsk, u64 delay), - - TP_ARGS(tsk, delay), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( u64, delay ) - ), - - TP_fast_assign( - memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); - __entry->pid = tsk->pid; - __entry->delay = delay; - ) - TP_perf_assign( - __perf_count(delay); - ), - - TP_printk("comm=%s pid=%d delay=%Lu [ns]", - __entry->comm, __entry->pid, - (unsigned long long)__entry->delay) -); - #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ From 0d0bea5ea4a0e91feff22ac5e32e14ff3a682247 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:14:58 -0600 Subject: [PATCH 389/470] perf tools: Add 'signed' flag setting back into trace-event-parse.c Commit 13999e59343b042b0807be2df6ae5895d29782a0 (perf tools: Handle the case with and without the "signed" trace field) removed code to set the FIELD_IS_SIGNED flag that was originally added by commit 26a50744b21fff65bd754874072857bee8967f4d (tracing/events: Add 'signed' field to format files). This adds it back. Signed-off-by: Tom Zanussi Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1259133299-23594-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index eae56050308..7021dc1b0ca 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -941,7 +941,8 @@ static int event_read_fields(struct event *event, struct format_field **fields) if (read_expect_type(EVENT_ITEM, &token)) goto fail; - /* add signed type */ + if (strtoul(token, NULL, 0)) + field->flags |= FIELD_IS_SIGNED; free_token(token); if (read_expected(EVENT_OP, ";") < 0) From 99df5a6a215f026e62287083de2b44b22edd3623 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:14:59 -0600 Subject: [PATCH 390/470] trace/syscalls: Change ret param in struct syscall_trace_exit to long Commit ee949a86b3aef15845ea677aa60231008de62672 ("tracing/syscalls: Use long for syscall ret format and field definitions") changed the syscall exit return type to long, but forgot to change it in the struct. Signed-off-by: Tom Zanussi Cc: Steven Rostedt Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1259133299-23594-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4da6ede7440..1d7f4830a80 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -100,7 +100,7 @@ struct syscall_trace_enter { struct syscall_trace_exit { struct trace_entry ent; int nr; - unsigned long ret; + long ret; }; struct kprobe_trace_entry { From 28b4e0d86acf59ae3bc422921138a4958458326e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 25 Nov 2009 22:24:44 +0900 Subject: [PATCH 391/470] x86: Rename global percpu symbol dr7 to cpu_dr7 Percpu symbols now occupy the same namespace as other global symbols and as such short global symbols without subsystem prefix tend to collide with local variables. dr7 percpu variable used by x86 was hit by this. Rename it to cpu_dr7. The rename also makes it more consistent with its fellow cpu_debugreg percpu variable. Signed-off-by: Tejun Heo Cc: Frederic Weisbecker Cc: Peter Zijlstra Cc: Rusty Russell Cc: Christoph Lameter Cc: Linus Torvalds , Cc: Andrew Morton LKML-Reference: <20091125115856.GA17856@elte.hu> Signed-off-by: Ingo Molnar Reported-by: Stephen Rothwell --- arch/x86/include/asm/debugreg.h | 4 ++-- arch/x86/kernel/hw_breakpoint.c | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index fdabd843576..8240f76b531 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -75,7 +75,7 @@ */ #ifdef __KERNEL__ -DECLARE_PER_CPU(unsigned long, dr7); +DECLARE_PER_CPU(unsigned long, cpu_dr7); static inline void hw_breakpoint_disable(void) { @@ -91,7 +91,7 @@ static inline void hw_breakpoint_disable(void) static inline int hw_breakpoint_active(void) { - return __get_cpu_var(dr7) & DR_GLOBAL_ENABLE_MASK; + return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; } extern void aout_dump_debugregs(struct user *dump); diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 4d267fb7782..92ea5aad0b5 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -46,8 +46,8 @@ #include /* Per cpu debug control register value */ -DEFINE_PER_CPU(unsigned long, dr7); -EXPORT_PER_CPU_SYMBOL(dr7); +DEFINE_PER_CPU(unsigned long, cpu_dr7); +EXPORT_PER_CPU_SYMBOL(cpu_dr7); /* Per cpu debug address registers values */ static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); @@ -118,7 +118,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) set_debugreg(info->address, i); __get_cpu_var(cpu_debugreg[i]) = info->address; - dr7 = &__get_cpu_var(dr7); + dr7 = &__get_cpu_var(cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); @@ -153,7 +153,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return; - dr7 = &__get_cpu_var(dr7); + dr7 = &__get_cpu_var(cpu_dr7); *dr7 &= ~encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); @@ -437,7 +437,7 @@ void hw_breakpoint_restore(void) set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2); set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3); set_debugreg(current->thread.debugreg6, 6); - set_debugreg(__get_cpu_var(dr7), 7); + set_debugreg(__get_cpu_var(cpu_dr7), 7); } EXPORT_SYMBOL_GPL(hw_breakpoint_restore); From 091ad3658e3c76c5fb05f65bfb64a0246f8f31b5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Nov 2009 09:04:55 +0100 Subject: [PATCH 392/470] events: Rename TRACE_EVENT_TEMPLATE() to DECLARE_EVENT_CLASS() It is not quite obvious at first sight what TRACE_EVENT_TEMPLATE does: does it define an event as well beyond defining a template? To clarify this, rename it to DECLARE_EVENT_CLASS, which follows the various 'DECLARE_*()' idioms we already have in the kernel: DECLARE_EVENT_CLASS(class) DEFINE_EVENT(class, event1) DEFINE_EVENT(class, event2) DEFINE_EVENT(class, event3) To complete this logic we should also rename TRACE_EVENT() to: DEFINE_SINGLE_EVENT(single_event) ... but in a more quiet moment of the kernel cycle. Cc: Pekka Enberg Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E286A.2000405@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/tracepoint.h | 2 +- include/trace/define_trace.h | 2 +- include/trace/events/sched.h | 6 ++--- include/trace/ftrace.h | 46 ++++++++++++++++++------------------ 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 7063383cca1..f59604ed0ec 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -280,7 +280,7 @@ static inline void tracepoint_synchronize_unregister(void) * TRACE_EVENT_FN to perform any (un)registration work. */ -#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) +#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) #define DEFINE_EVENT(template, name, proto, args) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 5d7d855ae21..5acfb1eb4df 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -71,7 +71,7 @@ #undef TRACE_EVENT #undef TRACE_EVENT_FN -#undef TRACE_EVENT_TEMPLATE +#undef DECLARE_EVENT_CLASS #undef DEFINE_EVENT #undef DEFINE_EVENT_PRINT #undef TRACE_HEADER_MULTI_READ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 238f74b5848..5ce79502185 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -83,7 +83,7 @@ TRACE_EVENT(sched_wait_task, * (NOTE: the 'rq' argument is not used by generic trace events, * but used by the latency tracer plugin. ) */ -TRACE_EVENT_TEMPLATE(sched_wakeup_template, +DECLARE_EVENT_CLASS(sched_wakeup_template, TP_PROTO(struct rq *rq, struct task_struct *p, int success), @@ -197,7 +197,7 @@ TRACE_EVENT(sched_migrate_task, __entry->orig_cpu, __entry->dest_cpu) ); -TRACE_EVENT_TEMPLATE(sched_process_template, +DECLARE_EVENT_CLASS(sched_process_template, TP_PROTO(struct task_struct *p), @@ -316,7 +316,7 @@ TRACE_EVENT(sched_signal_send, * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ -TRACE_EVENT_TEMPLATE(sched_stat_template, +DECLARE_EVENT_CLASS(sched_stat_template, TP_PROTO(struct task_struct *tsk, u64 delay), diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index b0461772bc8..2c9c073e45a 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -19,17 +19,17 @@ #include /* - * TRACE_EVENT_TEMPLATE can be used to add a generic function + * DECLARE_EVENT_CLASS can be used to add a generic function * handlers for events. That is, if all events have the same * parameters and just have distinct trace points. * Each tracepoint can be defined with DEFINE_EVENT and that - * will map the TRACE_EVENT_TEMPLATE to the tracepoint. + * will map the DECLARE_EVENT_CLASS to the tracepoint. * * TRACE_EVENT is a one to one mapping between tracepoint and template. */ #undef TRACE_EVENT #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - TRACE_EVENT_TEMPLATE(name, \ + DECLARE_EVENT_CLASS(name, \ PARAMS(proto), \ PARAMS(args), \ PARAMS(tstruct), \ @@ -56,8 +56,8 @@ #undef TP_STRUCT__entry #define TP_STRUCT__entry(args...) args -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(name, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ struct ftrace_raw_##name { \ struct trace_entry ent; \ tstruct \ @@ -115,8 +115,8 @@ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ struct ftrace_data_offsets_##call { \ tstruct; \ }; @@ -203,8 +203,8 @@ #undef TP_perf_assign #define TP_perf_assign(args...) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ static int \ ftrace_format_setup_##call(struct ftrace_event_call *unused, \ struct trace_seq *s) \ @@ -321,8 +321,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ ftrace_print_symbols_seq(p, value, symbols); \ }) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static enum print_line_t \ ftrace_raw_output_id_##call(int event_id, const char *name, \ struct trace_iterator *iter, int flags) \ @@ -428,8 +428,8 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ #undef __string #define __string(item, src) __dynamic_array(char, item, -1) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, func, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ static int \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ { \ @@ -480,8 +480,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ #undef __string #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static inline int ftrace_get_offsets_##call( \ struct ftrace_data_offsets_##call *__data_offsets, proto) \ { \ @@ -521,8 +521,8 @@ static inline int ftrace_get_offsets_##call( \ * */ -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ @@ -681,8 +681,8 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ #define __assign_str(dst, src) \ strcpy(__get_str(dst), src); -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ \ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ proto) \ @@ -764,8 +764,8 @@ static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ @@ -885,8 +885,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef __perf_count #define __perf_count(c) __count = (c) -#undef TRACE_EVENT_TEMPLATE -#define TRACE_EVENT_TEMPLATE(call, proto, args, tstruct, assign, print) \ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static void \ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ proto) \ From 925684d6d589e40e41007edf47c69e729d911263 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:03:23 +0800 Subject: [PATCH 393/470] tracing: Convert module refcnt events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 29854 1980 128 31962 7cda kernel/module.o.old 28750 1980 128 30858 788a kernel/module.o Two events are converted: module_refcnt: module_get, module_put No change in functionality. Signed-off-by: Li Zefan Cc: Rusty Russell Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E283B.3010508@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/module.h | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 84160fb1847..4b0f48ba16a 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -51,7 +51,7 @@ TRACE_EVENT(module_free, TP_printk("%s", __get_str(name)) ); -TRACE_EVENT(module_get, +DECLARE_EVENT_CLASS(module_refcnt, TP_PROTO(struct module *mod, unsigned long ip, int refcnt), @@ -73,26 +73,18 @@ TRACE_EVENT(module_get, __get_str(name), (void *)__entry->ip, __entry->refcnt) ); -TRACE_EVENT(module_put, +DEFINE_EVENT(module_refcnt, module_get, TP_PROTO(struct module *mod, unsigned long ip, int refcnt), - TP_ARGS(mod, ip, refcnt), + TP_ARGS(mod, ip, refcnt) +); - TP_STRUCT__entry( - __field( unsigned long, ip ) - __field( int, refcnt ) - __string( name, mod->name ) - ), +DEFINE_EVENT(module_refcnt, module_put, - TP_fast_assign( - __entry->ip = ip; - __entry->refcnt = refcnt; - __assign_str(name, mod->name); - ), + TP_PROTO(struct module *mod, unsigned long ip, int refcnt), - TP_printk("%s call_site=%pf refcnt=%d", - __get_str(name), (void *)__entry->ip, __entry->refcnt) + TP_ARGS(mod, ip, refcnt) ); TRACE_EVENT(module_request, From 53d0422c2d10808fddb2c30859193bfea164c7e3 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:04:10 +0800 Subject: [PATCH 394/470] tracing: Convert some kmem events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 333987 69800 27228 431015 693a7 mm/built-in.o.old 330030 69800 27228 427058 68432 mm/built-in.o 8 events are converted: kmem_alloc: kmalloc, kmem_cache_alloc kmem_alloc_node: kmalloc_node, kmem_cache_alloc_node kmem_free: kfree, kmem_cache_free mm_page: mm_page_alloc_zone_locked, mm_page_pcpu_drain No change in functionality. Signed-off-by: Li Zefan Acked-by: Pekka Enberg Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mel Gorman LKML-Reference: <4B0E286A.2000405@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/kmem.h | 144 ++++++++++++------------------------ mm/page_alloc.c | 4 +- mm/util.c | 3 - 3 files changed, 50 insertions(+), 101 deletions(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index eaf46bdd18a..3adca0ca9db 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -44,7 +44,7 @@ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ ) : "GFP_NOWAIT" -TRACE_EVENT(kmalloc, +DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, const void *ptr, @@ -78,41 +78,23 @@ TRACE_EVENT(kmalloc, show_gfp_flags(__entry->gfp_flags)) ); -TRACE_EVENT(kmem_cache_alloc, +DEFINE_EVENT(kmem_alloc, kmalloc, - TP_PROTO(unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags), + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), - - TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) - __field( size_t, bytes_req ) - __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) - ), - - TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; - __entry->bytes_req = bytes_req; - __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; - ), - - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", - __entry->call_site, - __entry->ptr, - __entry->bytes_req, - __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags)) + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) ); -TRACE_EVENT(kmalloc_node, +DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) +); + +DECLARE_EVENT_CLASS(kmem_alloc_node, TP_PROTO(unsigned long call_site, const void *ptr, @@ -150,45 +132,25 @@ TRACE_EVENT(kmalloc_node, __entry->node) ); -TRACE_EVENT(kmem_cache_alloc_node, +DEFINE_EVENT(kmem_alloc_node, kmalloc_node, - TP_PROTO(unsigned long call_site, - const void *ptr, - size_t bytes_req, - size_t bytes_alloc, - gfp_t gfp_flags, - int node), + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), - TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), - - TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) - __field( size_t, bytes_req ) - __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) - __field( int, node ) - ), - - TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; - __entry->bytes_req = bytes_req; - __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; - __entry->node = node; - ), - - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", - __entry->call_site, - __entry->ptr, - __entry->bytes_req, - __entry->bytes_alloc, - show_gfp_flags(__entry->gfp_flags), - __entry->node) + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) ); -TRACE_EVENT(kfree, +DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, + + TP_PROTO(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, + gfp_t gfp_flags, int node), + + TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) +); + +DECLARE_EVENT_CLASS(kmem_free, TP_PROTO(unsigned long call_site, const void *ptr), @@ -207,23 +169,18 @@ TRACE_EVENT(kfree, TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ); -TRACE_EVENT(kmem_cache_free, +DEFINE_EVENT(kmem_free, kfree, TP_PROTO(unsigned long call_site, const void *ptr), - TP_ARGS(call_site, ptr), + TP_ARGS(call_site, ptr) +); - TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) - ), +DEFINE_EVENT(kmem_free, kmem_cache_free, - TP_fast_assign( - __entry->call_site = call_site; - __entry->ptr = ptr; - ), + TP_PROTO(unsigned long call_site, const void *ptr), - TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) + TP_ARGS(call_site, ptr) ); TRACE_EVENT(mm_page_free_direct, @@ -299,7 +256,7 @@ TRACE_EVENT(mm_page_alloc, show_gfp_flags(__entry->gfp_flags)) ); -TRACE_EVENT(mm_page_alloc_zone_locked, +DECLARE_EVENT_CLASS(mm_page, TP_PROTO(struct page *page, unsigned int order, int migratetype), @@ -325,29 +282,22 @@ TRACE_EVENT(mm_page_alloc_zone_locked, __entry->order == 0) ); -TRACE_EVENT(mm_page_pcpu_drain, +DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, - TP_PROTO(struct page *page, int order, int migratetype), + TP_PROTO(struct page *page, unsigned int order, int migratetype), + + TP_ARGS(page, order, migratetype) +); + +DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, + + TP_PROTO(struct page *page, unsigned int order, int migratetype), TP_ARGS(page, order, migratetype), - TP_STRUCT__entry( - __field( struct page *, page ) - __field( int, order ) - __field( int, migratetype ) - ), - - TP_fast_assign( - __entry->page = page; - __entry->order = order; - __entry->migratetype = migratetype; - ), - TP_printk("page=%p pfn=%lu order=%d migratetype=%d", - __entry->page, - page_to_pfn(__entry->page), - __entry->order, - __entry->migratetype) + __entry->page, page_to_pfn(__entry->page), + __entry->order, __entry->migratetype) ); TRACE_EVENT(mm_page_alloc_extfrag, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2bc2ac63f41..bdb22f55d00 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,12 +48,14 @@ #include #include #include -#include #include #include #include "internal.h" +#define CREATE_TRACE_POINTS +#include + /* * Array of node states. */ diff --git a/mm/util.c b/mm/util.c index 7c35ad95f92..15d197571b4 100644 --- a/mm/util.c +++ b/mm/util.c @@ -6,9 +6,6 @@ #include #include -#define CREATE_TRACE_POINTS -#include - /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate From c467307c1a812c3150b27a68c2b2d3397bb40a4f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:04:31 +0800 Subject: [PATCH 395/470] tracing: Convert softirq events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 12781 952 36 13769 35c9 kernel/softirq.o.old 11981 952 32 12965 32a5 kernel/softirq.o Two events are converted: softirq: softirq_entry, softirq_exit No change in functionality. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E287F.4030708@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/irq.h | 46 +++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index dcfcd440762..0e4cfb694fe 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h @@ -82,18 +82,7 @@ TRACE_EVENT(irq_handler_exit, __entry->irq, __entry->ret ? "handled" : "unhandled") ); -/** - * softirq_entry - called immediately before the softirq handler - * @h: pointer to struct softirq_action - * @vec: pointer to first struct softirq_action in softirq_vec array - * - * The @h parameter, contains a pointer to the struct softirq_action - * which has a pointer to the action handler that is called. By subtracting - * the @vec pointer from the @h pointer, we can determine the softirq - * number. Also, when used in combination with the softirq_exit tracepoint - * we can determine the softirq latency. - */ -TRACE_EVENT(softirq_entry, +DECLARE_EVENT_CLASS(softirq, TP_PROTO(struct softirq_action *h, struct softirq_action *vec), @@ -111,6 +100,24 @@ TRACE_EVENT(softirq_entry, show_softirq_name(__entry->vec)) ); +/** + * softirq_entry - called immediately before the softirq handler + * @h: pointer to struct softirq_action + * @vec: pointer to first struct softirq_action in softirq_vec array + * + * The @h parameter, contains a pointer to the struct softirq_action + * which has a pointer to the action handler that is called. By subtracting + * the @vec pointer from the @h pointer, we can determine the softirq + * number. Also, when used in combination with the softirq_exit tracepoint + * we can determine the softirq latency. + */ +DEFINE_EVENT(softirq, softirq_entry, + + TP_PROTO(struct softirq_action *h, struct softirq_action *vec), + + TP_ARGS(h, vec) +); + /** * softirq_exit - called immediately after the softirq handler returns * @h: pointer to struct softirq_action @@ -122,22 +129,11 @@ TRACE_EVENT(softirq_entry, * combination with the softirq_entry tracepoint we can determine the softirq * latency. */ -TRACE_EVENT(softirq_exit, +DEFINE_EVENT(softirq, softirq_exit, TP_PROTO(struct softirq_action *h, struct softirq_action *vec), - TP_ARGS(h, vec), - - TP_STRUCT__entry( - __field( int, vec ) - ), - - TP_fast_assign( - __entry->vec = (int)(h - vec); - ), - - TP_printk("vec=%d [action=%s]", __entry->vec, - show_softirq_name(__entry->vec)) + TP_ARGS(h, vec) ); #endif /* _TRACE_IRQ_H */ From 382ece710bf88b08440b598731361e5a47582b62 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:05:03 +0800 Subject: [PATCH 396/470] tracing: Convert some workqueue events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 13171 800 72 14043 36db kernel/workqueue.o.old 12243 800 68 13111 3337 kernel/workqueue.o Two events are converted: workqueue: workqueue_insertion, workqueue_execution No change in functionality. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E289F.5010104@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/workqueue.h | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h index e4612dbd7ba..d6c974474e7 100644 --- a/include/trace/events/workqueue.h +++ b/include/trace/events/workqueue.h @@ -8,7 +8,7 @@ #include #include -TRACE_EVENT(workqueue_insertion, +DECLARE_EVENT_CLASS(workqueue, TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), @@ -30,26 +30,18 @@ TRACE_EVENT(workqueue_insertion, __entry->thread_pid, __entry->func) ); -TRACE_EVENT(workqueue_execution, +DEFINE_EVENT(workqueue, workqueue_insertion, TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - TP_ARGS(wq_thread, work), + TP_ARGS(wq_thread, work) +); - TP_STRUCT__entry( - __array(char, thread_comm, TASK_COMM_LEN) - __field(pid_t, thread_pid) - __field(work_func_t, func) - ), +DEFINE_EVENT(workqueue, workqueue_execution, - TP_fast_assign( - memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); - __entry->thread_pid = wq_thread->pid; - __entry->func = work->func; - ), + TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), - TP_printk("thread=%s:%d func=%pf", __entry->thread_comm, - __entry->thread_pid, __entry->func) + TP_ARGS(wq_thread, work) ); /* Trace the creation of one workqueue thread on a cpu */ From 7703466b4c0a21b88d701882bef0d45bcb0a0281 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:05:38 +0800 Subject: [PATCH 397/470] tracing: Convert some power events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 4312 524 12 4848 12f0 kernel/trace/power-traces.o.old 3455 524 8 3987 f93 kernel/trace/power-traces.o Two events are converted: power: power_start, power_frequency No change in functionality. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Arjan van de Ven LKML-Reference: <4B0E28C2.1090906@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/power.h | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 9bb96e5a284..c4efe9b8280 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -16,7 +16,7 @@ enum { }; #endif -TRACE_EVENT(power_start, +DECLARE_EVENT_CLASS(power, TP_PROTO(unsigned int type, unsigned int state), @@ -35,6 +35,20 @@ TRACE_EVENT(power_start, TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long)__entry->state) ); +DEFINE_EVENT(power, power_start, + + TP_PROTO(unsigned int type, unsigned int state), + + TP_ARGS(type, state) +); + +DEFINE_EVENT(power, power_frequency, + + TP_PROTO(unsigned int type, unsigned int state), + + TP_ARGS(type, state) +); + TRACE_EVENT(power_end, TP_PROTO(int dummy), @@ -53,26 +67,6 @@ TRACE_EVENT(power_end, ); - -TRACE_EVENT(power_frequency, - - TP_PROTO(unsigned int type, unsigned int state), - - TP_ARGS(type, state), - - TP_STRUCT__entry( - __field( u64, type ) - __field( u64, state ) - ), - - TP_fast_assign( - __entry->type = type; - __entry->state = state; - ), - - TP_printk("type=%lu state=%lu", (unsigned long)__entry->type, (unsigned long) __entry->state) -); - #endif /* _TRACE_POWER_H */ /* This part must be outside protection */ From 77ca1e0294f25fc26053ba14353e703158acef26 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:06:14 +0800 Subject: [PATCH 398/470] tracing: Convert some block events to DEFINE_EVENT use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 53570 3284 184 57038 dece block/blk-core.o.old 43702 3284 144 47130 b81a block/blk-core.o 12 events are converted: block_rq: block_rq_insert, block_rq_issue block_rq_with_error: block_rq_{abort, requeue, complete} block_bio: block_bio_{backmerge, frontmerge, queue} block_get_rq: block_getrq, block_sleeprq block_unplug: block_unplug_timer, block_unplug_io No change in functionality. Signed-off-by: Li Zefan Cc: Jens Axboe Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E28E6.7060609@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/block.h | 224 +++++++++-------------------------- 1 file changed, 53 insertions(+), 171 deletions(-) diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 00405b5f624..5fb72733331 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -8,7 +8,7 @@ #include #include -TRACE_EVENT(block_rq_abort, +DECLARE_EVENT_CLASS(block_rq_with_error, TP_PROTO(struct request_queue *q, struct request *rq), @@ -40,7 +40,28 @@ TRACE_EVENT(block_rq_abort, __entry->nr_sector, __entry->errors) ); -TRACE_EVENT(block_rq_insert, +DEFINE_EVENT(block_rq_with_error, block_rq_abort, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +DEFINE_EVENT(block_rq_with_error, block_rq_requeue, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +DEFINE_EVENT(block_rq_with_error, block_rq_complete, + + TP_PROTO(struct request_queue *q, struct request *rq), + + TP_ARGS(q, rq) +); + +DECLARE_EVENT_CLASS(block_rq, TP_PROTO(struct request_queue *q, struct request *rq), @@ -74,102 +95,18 @@ TRACE_EVENT(block_rq_insert, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_rq_issue, +DEFINE_EVENT(block_rq, block_rq_insert, TP_PROTO(struct request_queue *q, struct request *rq), - TP_ARGS(q, rq), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( unsigned int, bytes ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), - - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; - - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), - - TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __entry->bytes, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, rq) ); -TRACE_EVENT(block_rq_requeue, +DEFINE_EVENT(block_rq, block_rq_issue, TP_PROTO(struct request_queue *q, struct request *rq), - TP_ARGS(q, rq), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( int, errors ) - __array( char, rwbs, 6 ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), - - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->errors = rq->errors; - - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - ), - - TP_printk("%d,%d %s (%s) %llu + %u [%d]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->errors) -); - -TRACE_EVENT(block_rq_complete, - - TP_PROTO(struct request_queue *q, struct request *rq), - - TP_ARGS(q, rq), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __field( int, errors ) - __array( char, rwbs, 6 ) - __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) - ), - - TP_fast_assign( - __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->errors = rq->errors; - - blk_fill_rwbs_rq(__entry->rwbs, rq); - blk_dump_cmd(__get_str(cmd), rq); - ), - - TP_printk("%d,%d %s (%s) %llu + %u [%d]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, __get_str(cmd), - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->errors) + TP_ARGS(q, rq) ); TRACE_EVENT(block_bio_bounce, @@ -228,7 +165,7 @@ TRACE_EVENT(block_bio_complete, __entry->nr_sector, __entry->error) ); -TRACE_EVENT(block_bio_backmerge, +DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -256,63 +193,28 @@ TRACE_EVENT(block_bio_backmerge, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_bio_frontmerge, +DEFINE_EVENT(block_bio, block_bio_backmerge, TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), - - TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), - - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio) ); -TRACE_EVENT(block_bio_queue, +DEFINE_EVENT(block_bio, block_bio_frontmerge, TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), - - TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_sector; - __entry->nr_sector = bio->bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), - - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio) ); -TRACE_EVENT(block_getrq, +DEFINE_EVENT(block_bio, block_bio_queue, + + TP_PROTO(struct request_queue *q, struct bio *bio), + + TP_ARGS(q, bio) +); + +DECLARE_EVENT_CLASS(block_get_rq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), @@ -341,33 +243,18 @@ TRACE_EVENT(block_getrq, __entry->nr_sector, __entry->comm) ); -TRACE_EVENT(block_sleeprq, +DEFINE_EVENT(block_get_rq, block_getrq, TP_PROTO(struct request_queue *q, struct bio *bio, int rw), - TP_ARGS(q, bio, rw), + TP_ARGS(q, bio, rw) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, sector ) - __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) - __array( char, comm, TASK_COMM_LEN ) - ), +DEFINE_EVENT(block_get_rq, block_sleeprq, - TP_fast_assign( - __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; - __entry->sector = bio ? bio->bi_sector : 0; - __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; - blk_fill_rwbs(__entry->rwbs, - bio ? bio->bi_rw : 0, __entry->nr_sector); - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_PROTO(struct request_queue *q, struct bio *bio, int rw), - TP_printk("%d,%d %s %llu + %u [%s]", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->comm) + TP_ARGS(q, bio, rw) ); TRACE_EVENT(block_plug, @@ -387,7 +274,7 @@ TRACE_EVENT(block_plug, TP_printk("[%s]", __entry->comm) ); -TRACE_EVENT(block_unplug_timer, +DECLARE_EVENT_CLASS(block_unplug, TP_PROTO(struct request_queue *q), @@ -406,23 +293,18 @@ TRACE_EVENT(block_unplug_timer, TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ); -TRACE_EVENT(block_unplug_io, +DEFINE_EVENT(block_unplug, block_unplug_timer, TP_PROTO(struct request_queue *q), - TP_ARGS(q), + TP_ARGS(q) +); - TP_STRUCT__entry( - __field( int, nr_rq ) - __array( char, comm, TASK_COMM_LEN ) - ), +DEFINE_EVENT(block_unplug, block_unplug_io, - TP_fast_assign( - __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; - memcpy(__entry->comm, current->comm, TASK_COMM_LEN); - ), + TP_PROTO(struct request_queue *q), - TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) + TP_ARGS(q) ); TRACE_EVENT(block_split, From 071688f36e7eba3e37b2fc48e35bfdab99b80b4d Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:06:55 +0800 Subject: [PATCH 399/470] tracing: Convert some jbd2 events to DEFINE_EVENT Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 34903 1693 448 37044 90b4 fs/jbd2/journal.o.old 31931 1693 416 34040 84f8 fs/jbd2/journal.o Four events are converted: jbd2_commit: jbd2_start_commit, jbd2_commit_{locking, flushing, logging} No change in functionality. Signed-off-by: Li Zefan Cc: Theodore Ts'o Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E290F.7030909@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/jbd2.h | 63 +++++++------------------------------ 1 file changed, 11 insertions(+), 52 deletions(-) diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 3c60b75adb9..96b370a050d 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -30,7 +30,7 @@ TRACE_EVENT(jbd2_checkpoint, jbd2_dev_to_name(__entry->dev), __entry->result) ); -TRACE_EVENT(jbd2_start_commit, +DECLARE_EVENT_CLASS(jbd2_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), @@ -53,73 +53,32 @@ TRACE_EVENT(jbd2_start_commit, __entry->sync_commit) ); -TRACE_EVENT(jbd2_commit_locking, +DEFINE_EVENT(jbd2_commit, jbd2_start_commit, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), - TP_ARGS(journal, commit_transaction), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) - ), - - TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; - ), - - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_ARGS(journal, commit_transaction) ); -TRACE_EVENT(jbd2_commit_flushing, +DEFINE_EVENT(jbd2_commit, jbd2_commit_locking, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), - TP_ARGS(journal, commit_transaction), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) - ), - - TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; - ), - - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_ARGS(journal, commit_transaction) ); -TRACE_EVENT(jbd2_commit_logging, +DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing, TP_PROTO(journal_t *journal, transaction_t *commit_transaction), - TP_ARGS(journal, commit_transaction), + TP_ARGS(journal, commit_transaction) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( char, sync_commit ) - __field( int, transaction ) - ), +DEFINE_EVENT(jbd2_commit, jbd2_commit_logging, - TP_fast_assign( - __entry->dev = journal->j_fs_dev->bd_dev; - __entry->sync_commit = commit_transaction->t_synchronous_commit; - __entry->transaction = commit_transaction->t_tid; - ), + TP_PROTO(journal_t *journal, transaction_t *commit_transaction), - TP_printk("dev %s transaction %d sync %d", - jbd2_dev_to_name(__entry->dev), __entry->transaction, - __entry->sync_commit) + TP_ARGS(journal, commit_transaction) ); TRACE_EVENT(jbd2_end_commit, From b5eb34c3592545c756e50d882c08417eb60740a7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:07:36 +0800 Subject: [PATCH 400/470] tracing: Convert some ext4 events to DEFINE_TRACE Use DECLARE_EVENT_CLASS to remove duplicate code: text data bss dec hex filename 294695 6104 340 301139 49853 fs/ext4/ext4.o.old 289983 6104 324 296411 485db fs/ext4/ext4.o 5 events are convertd: ext4__write_begin: ext4_write_begin, ext4_da_write_begin ext4__write_end: ext4_{ordered, writeback, journalled}_write_end No change in functionality. Signed-off-by: Li Zefan Cc: Theodore Ts'o Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E2938.2040708@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/ext4.h | 135 ++++++++++-------------------------- 1 file changed, 38 insertions(+), 97 deletions(-) diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index d09550bf3f9..318f76535bd 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -90,7 +90,7 @@ TRACE_EVENT(ext4_allocate_inode, (unsigned long) __entry->dir, __entry->mode) ); -TRACE_EVENT(ext4_write_begin, +DECLARE_EVENT_CLASS(ext4__write_begin, TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, unsigned int flags), @@ -118,7 +118,23 @@ TRACE_EVENT(ext4_write_begin, __entry->pos, __entry->len, __entry->flags) ); -TRACE_EVENT(ext4_ordered_write_end, +DEFINE_EVENT(ext4__write_begin, ext4_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int flags), + + TP_ARGS(inode, pos, len, flags) +); + +DECLARE_EVENT_CLASS(ext4__write_end, TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, unsigned int copied), @@ -145,57 +161,36 @@ TRACE_EVENT(ext4_ordered_write_end, __entry->pos, __entry->len, __entry->copied) ); -TRACE_EVENT(ext4_writeback_write_end, +DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, unsigned int copied), - TP_ARGS(inode, pos, len, copied), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, copied ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->copied = copied; - ), - - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->copied) + TP_ARGS(inode, pos, len, copied) ); -TRACE_EVENT(ext4_journalled_write_end, +DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end, + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, unsigned int copied), - TP_ARGS(inode, pos, len, copied), - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, copied ) - ), + TP_ARGS(inode, pos, len, copied) +); - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->copied = copied; - ), +DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end, - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->copied) + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) +); + +DEFINE_EVENT(ext4__write_end, ext4_da_write_end, + + TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, + unsigned int copied), + + TP_ARGS(inode, pos, len, copied) ); TRACE_EVENT(ext4_writepage, @@ -337,60 +332,6 @@ TRACE_EVENT(ext4_da_writepages_result, (unsigned long) __entry->writeback_index) ); -TRACE_EVENT(ext4_da_write_begin, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int flags), - - TP_ARGS(inode, pos, len, flags), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->flags = flags; - ), - - TP_printk("dev %s ino %lu pos %llu len %u flags %u", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->flags) -); - -TRACE_EVENT(ext4_da_write_end, - TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, - unsigned int copied), - - TP_ARGS(inode, pos, len, copied), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned int, len ) - __field( unsigned int, copied ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = pos; - __entry->len = len; - __entry->copied = copied; - ), - - TP_printk("dev %s ino %lu pos %llu len %u copied %u", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->copied) -); - TRACE_EVENT(ext4_discard_blocks, TP_PROTO(struct super_block *sb, unsigned long long blk, unsigned long long count), From 470dda7417f284b9cfc96560b2acd98df63798a2 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 Nov 2009 15:08:01 +0800 Subject: [PATCH 401/470] tracing: Restore original format of sched events The original format for sched_stat_iowait and sched_stat_sleep: $ cat events/sched/sched_stat_iowait/format ... print fmt: "comm=%s pid=%d delay=%Lu [ns]", ... $ cat events/sched/sched_stat_sleep/format ... print fmt: "comm=%s pid=%d delay=%Lu [ns]", ... But commit commit 75ec29ab848a7e92a41aaafaeb33d1afbc839be4 ("tracing: Convert some sched trace events to DEFINE_EVENT and _PRINT") broke the format: $ cat events/sched/sched_stat_iowait/format print fmt: "task: %s:%d iowait: %Lu [ns]", ... $ cat events/sched/sched_stat_sleep/format print fmt: "task: %s:%d sleep: %Lu [ns]", ... No change in functionality. Signed-off-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0E2951.9050800@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/trace/events/sched.h | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5ce79502185..9d316b22388 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -355,23 +355,17 @@ DEFINE_EVENT(sched_stat_template, sched_stat_wait, * Tracepoint for accounting sleep time (time the task is not runnable, * including iowait, see below). */ -DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_sleep, - TP_PROTO(struct task_struct *tsk, u64 delay), - TP_ARGS(tsk, delay), - TP_printk("task: %s:%d sleep: %Lu [ns]", - __entry->comm, __entry->pid, - (unsigned long long)__entry->delay)); +DEFINE_EVENT(sched_stat_template, sched_stat_sleep, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); /* * Tracepoint for accounting iowait time (time the task is not runnable * due to waiting on IO to complete). */ -DEFINE_EVENT_PRINT(sched_stat_template, sched_stat_iowait, - TP_PROTO(struct task_struct *tsk, u64 delay), - TP_ARGS(tsk, delay), - TP_printk("task: %s:%d iowait: %Lu [ns]", - __entry->comm, __entry->pid, - (unsigned long long)__entry->delay)); +DEFINE_EVENT(sched_stat_template, sched_stat_iowait, + TP_PROTO(struct task_struct *tsk, u64 delay), + TP_ARGS(tsk, delay)); /* * Tracepoint for accounting runtime (time the task is executing From d99be40aff88722ab03ee295e4f6c13a4cca9a3d Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 26 Nov 2009 05:35:40 +0100 Subject: [PATCH 402/470] ksym_tracer: Fix breakpoint removal after modification The error path of a breakpoint modification is broken in the ksym tracer. A modified breakpoint hlist node is immediately released after its removal. Also we leak a breakpoint in this case. Fix the path. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Prasad LKML-Reference: <1259210142-5714-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_ksym.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 11935b53a6c..9f040e42f51 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -339,14 +339,20 @@ static ssize_t ksym_trace_filter_write(struct file *file, ksym_hbp_handler, true); if (IS_ERR(entry->ksym_hbp)) entry->ksym_hbp = NULL; - if (!entry->ksym_hbp) + + /* modified without problem */ + if (entry->ksym_hbp) { + ret = 0; goto out; + } + } else { + ret = 0; } + /* Error or "symbol:---" case: drop it */ ksym_filter_entry_count--; hlist_del_rcu(&(entry->ksym_hlist)); synchronize_rcu(); kfree(entry); - ret = 0; goto out; } else { /* Check for malformed request: (4) */ From c6567f642e20bcc79abed030f44be5b0d6da2ded Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 26 Nov 2009 05:35:41 +0100 Subject: [PATCH 403/470] hw-breakpoints: Improve in-kernel event creation error granularity In fail case, perf_event_create_kernel_counter() returns NULL instead of an error, which doesn't help us to inform the user about the origin of the problem from the outer most callers. Often we can just return -EINVAL, which doesn't help anyone when it's eventually about a memory allocation failure. Then, this patch makes perf_event_create_kernel_counter() always return a detailed error code. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Prasad LKML-Reference: <1259210142-5714-2-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 35df94e344f..34a1b9d7633 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4780,14 +4780,17 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, */ ctx = find_get_context(pid, cpu); - if (IS_ERR(ctx)) - return NULL; + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_exit; + } event = perf_event_alloc(attr, cpu, ctx, NULL, NULL, callback, GFP_KERNEL); - err = PTR_ERR(event); - if (IS_ERR(event)) + if (IS_ERR(event)) { + err = PTR_ERR(event); goto err_put_context; + } event->filp = NULL; WARN_ON_ONCE(ctx->parent_ctx); @@ -4804,11 +4807,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, return event; -err_put_context: - if (err < 0) - put_ctx(ctx); - - return NULL; + err_put_context: + put_ctx(ctx); + err_exit: + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); From 605bfaee9078cd0b01d83402315389839ee4bb5c Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 26 Nov 2009 05:35:42 +0100 Subject: [PATCH 404/470] hw-breakpoints: Simplify error handling in breakpoint creation requests This simplifies the error handling when we create a breakpoint. We don't need to check the NULL return value corner case anymore since we have improved perf_event_create_kernel_counter() to always return an error code in the failure case. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Steven Rostedt Cc: Prasad LKML-Reference: <1259210142-5714-3-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 8 +------- kernel/hw_breakpoint.c | 4 ++-- kernel/trace/trace_ksym.c | 18 +++++------------- samples/hw_breakpoint/data_breakpoint.c | 3 --- 4 files changed, 8 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b25f8947ed7..75e0cd847bd 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -657,10 +657,7 @@ restore: tsk, true); thread->ptrace_bps[i] = NULL; - if (!bp) { /* incorrect bp, or we have a bug in bp API */ - rc = -EINVAL; - break; - } + /* Incorrect bp, or we have a bug in bp API */ if (IS_ERR(bp)) { rc = PTR_ERR(bp); bp = NULL; @@ -729,9 +726,6 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, tsk, bp->attr.disabled); } - - if (!bp) - return -EIO; /* * CHECKME: the previous code returned -EIO if the addr wasn't a * valid task virtual addr. The new one will return -EINVAL in this diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 06d372fc026..dd3fb4a999d 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -442,7 +442,7 @@ register_wide_hw_breakpoint(unsigned long addr, *pevent = bp; - if (IS_ERR(bp) || !bp) { + if (IS_ERR(bp)) { err = PTR_ERR(bp); goto fail; } @@ -453,7 +453,7 @@ register_wide_hw_breakpoint(unsigned long addr, fail: for_each_possible_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); - if (IS_ERR(*pevent) || !*pevent) + if (IS_ERR(*pevent)) break; unregister_hw_breakpoint(*pevent); } diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index 9f040e42f51..c538b15b95d 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -200,12 +200,9 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) entry->ksym_hbp = register_wide_hw_breakpoint(entry->ksym_addr, entry->len, entry->type, ksym_hbp_handler, true); - if (IS_ERR(entry->ksym_hbp)) { - entry->ksym_hbp = NULL; - ret = PTR_ERR(entry->ksym_hbp); - } - if (!entry->ksym_hbp) { + if (IS_ERR(entry->ksym_hbp)) { + ret = PTR_ERR(entry->ksym_hbp); printk(KERN_INFO "ksym_tracer request failed. Try again" " later!!\n"); goto err; @@ -332,21 +329,16 @@ static ssize_t ksym_trace_filter_write(struct file *file, if (changed) { unregister_wide_hw_breakpoint(entry->ksym_hbp); entry->type = op; + ret = 0; if (op > 0) { entry->ksym_hbp = register_wide_hw_breakpoint(entry->ksym_addr, entry->len, entry->type, ksym_hbp_handler, true); if (IS_ERR(entry->ksym_hbp)) - entry->ksym_hbp = NULL; - - /* modified without problem */ - if (entry->ksym_hbp) { - ret = 0; + ret = PTR_ERR(entry->ksym_hbp); + else goto out; - } - } else { - ret = 0; } /* Error or "symbol:---" case: drop it */ ksym_filter_entry_count--; diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index 95063818bcf..ee7f9fbaffb 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -61,9 +61,6 @@ static int __init hw_break_module_init(void) if (IS_ERR(sample_hbp)) { ret = PTR_ERR(sample_hbp); goto fail; - } else if (!sample_hbp) { - ret = -EINVAL; - goto fail; } printk(KERN_INFO "HW Breakpoint for %s write installed\n", ksym_name); From 2c31b7958fd21df9fa04e5c36cda0f063ac70b27 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 26 Nov 2009 06:04:38 +0100 Subject: [PATCH 405/470] x86/hw-breakpoints: Don't lose GE flag while disabling a breakpoint When we schedule out a breakpoint from the cpu, we also incidentally remove the "Global exact breakpoint" flag from the breakpoint control register. It makes us losing the fine grained precision about the origin of the instructions that may trigger breakpoint exceptions for the other breakpoints running in this cpu. Reported-by: Prasad Signed-off-by: Frederic Weisbecker LKML-Reference: <1259211878-6013-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/hw_breakpoint.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 92ea5aad0b5..d42f65ac492 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -59,19 +59,25 @@ static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]); static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); +static inline unsigned long +__encode_dr7(int drnum, unsigned int len, unsigned int type) +{ + unsigned long bp_info; + + bp_info = (len | type) & 0xf; + bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); + bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)); + + return bp_info; +} + /* * Encode the length, type, Exact, and Enable bits for a particular breakpoint * as stored in debug register 7. */ unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) { - unsigned long bp_info; - - bp_info = (len | type) & 0xf; - bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); - bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)) | - DR_GLOBAL_SLOWDOWN; - return bp_info; + return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN; } /* @@ -154,7 +160,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) return; dr7 = &__get_cpu_var(cpu_dr7); - *dr7 &= ~encode_dr7(i, info->len, info->type); + *dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); } From 11e6635763bdc0e24b39a38876574660755acffc Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 25 Nov 2009 23:01:50 -0800 Subject: [PATCH 406/470] kernel/hw_breakpoint.c: Fix local/global shadowing If the new percpu tree is combined with the perf events tree the following new warning triggers: kernel/hw_breakpoint.c: In function 'toggle_bp_task_slot': kernel/hw_breakpoint.c:151: warning: 'task_bp_pinned' is used uninitialized in this function Because it's not valid anymore to define a local variable and a percpu variable (even if it's file scope local) with the same name. Rename the local variable to resolve this. Signed-off-by: Andrew Morton Cc: Frederic Weisbecker Cc: K.Prasad Cc: Tejun Heo Cc: Linus Torvalds LKML-Reference: <200911260701.nAQ71owx016356@imap1.linux-foundation.org> [ v2: added changelog ] Signed-off-by: Ingo Molnar --- kernel/hw_breakpoint.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index dd3fb4a999d..32e1018191b 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -121,7 +121,7 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) int count = 0; struct perf_event *bp; struct perf_event_context *ctx = tsk->perf_event_ctxp; - unsigned int *task_bp_pinned; + unsigned int *tsk_pinned; struct list_head *list; unsigned long flags; @@ -146,15 +146,15 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) return; - task_bp_pinned = per_cpu(task_bp_pinned, cpu); + tsk_pinned = per_cpu(task_bp_pinned, cpu); if (enable) { - task_bp_pinned[count]++; + tsk_pinned[count]++; if (count > 0) - task_bp_pinned[count-1]--; + tsk_pinned[count-1]--; } else { - task_bp_pinned[count]--; + tsk_pinned[count]--; if (count > 0) - task_bp_pinned[count-1]++; + tsk_pinned[count-1]++; } } From 767df1bdd8cbff2c8c40c9ac8295bbdaa5fb24c4 Mon Sep 17 00:00:00 2001 From: Hidetoshi Seto Date: Thu, 26 Nov 2009 17:29:02 +0900 Subject: [PATCH 407/470] x86, mce: Add __cpuinit to hotplug callback functions The mce_disable_cpu() and mce_reenable_cpu() are called only from mce_cpu_callback() which is marked as __cpuinit. So these functions can be __cpuinit too. Signed-off-by: Hidetoshi Seto Cc: Andi Kleen LKML-Reference: <4B0E3C4E.4090809@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5f277cad2ed..0bcaa387586 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1953,13 +1953,14 @@ static __cpuinit void mce_remove_device(unsigned int cpu) } /* Make sure there are no machine checks on offlined CPUs. */ -static void mce_disable_cpu(void *h) +static void __cpuinit mce_disable_cpu(void *h) { unsigned long action = *(unsigned long *)h; int i; if (!mce_available(¤t_cpu_data)) return; + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); for (i = 0; i < banks; i++) { @@ -1970,7 +1971,7 @@ static void mce_disable_cpu(void *h) } } -static void mce_reenable_cpu(void *h) +static void __cpuinit mce_reenable_cpu(void *h) { unsigned long action = *(unsigned long *)h; int i; From 80bbf6b641c8843b9d751a1f299aa7ee073ab9d4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 25 Nov 2009 21:20:53 +0100 Subject: [PATCH 408/470] hw-breakpoints: Fix unused function in off-case bp_perf_event_destroy() is unused in its off-case version, let's remove it to fix the following warning reported by Stephen Rothwell in linux-next: kernel/perf_event.c:4306: warning: 'bp_perf_event_destroy' defined but not used Reported-by: Stephen Rothwell Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra LKML-Reference: <1259180453-5813-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 34a1b9d7633..f8c7939c57c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4303,10 +4303,6 @@ void perf_bp_event(struct perf_event *bp, void *data) perf_swevent_add(bp, 1, 1, &sample, regs); } #else -static void bp_perf_event_destroy(struct perf_event *event) -{ -} - static const struct pmu *bp_perf_event_init(struct perf_event *bp) { return NULL; From d1eb650ff4130972fa21462fa49cd35a2865403b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 24 Nov 2009 16:56:45 -0500 Subject: [PATCH 409/470] tracepoint: Move signal sending tracepoint to events/signal.h Move signal sending event to events/signal.h. This patch also renames sched_signal_send event to signal_generate. Changes in v4: - Fix a typo of task_struct pointer. Changes in v3: - Add docbook style comments Changes in v2: - Add siginfo argument - Add siginfo storing macro Signed-off-by: Masami Hiramatsu Reviewed-by: Jason Baron Acked-by: Roland McGrath Cc: systemtap Cc: DLE Cc: Oleg Nesterov LKML-Reference: <20091124215645.30449.60208.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- Documentation/DocBook/tracepoint.tmpl | 5 ++ include/trace/events/sched.h | 25 ---------- include/trace/events/signal.h | 66 +++++++++++++++++++++++++++ kernel/signal.c | 5 +- 4 files changed, 74 insertions(+), 27 deletions(-) create mode 100644 include/trace/events/signal.h diff --git a/Documentation/DocBook/tracepoint.tmpl b/Documentation/DocBook/tracepoint.tmpl index b0756d0fd57..8bca1d5cec0 100644 --- a/Documentation/DocBook/tracepoint.tmpl +++ b/Documentation/DocBook/tracepoint.tmpl @@ -86,4 +86,9 @@ !Iinclude/trace/events/irq.h + + SIGNAL +!Iinclude/trace/events/signal.h + + diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9d316b22388..cfceb0b73e2 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -287,31 +287,6 @@ TRACE_EVENT(sched_process_fork, __entry->child_comm, __entry->child_pid) ); -/* - * Tracepoint for sending a signal: - */ -TRACE_EVENT(sched_signal_send, - - TP_PROTO(int sig, struct task_struct *p), - - TP_ARGS(sig, p), - - TP_STRUCT__entry( - __field( int, sig ) - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->sig = sig; - ), - - TP_printk("sig=%d comm=%s pid=%d", - __entry->sig, __entry->comm, __entry->pid) -); - /* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h new file mode 100644 index 00000000000..ef51756a801 --- /dev/null +++ b/include/trace/events/signal.h @@ -0,0 +1,66 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM signal + +#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SIGNAL_H + +#include +#include +#include + +#define TP_STORE_SIGINFO(__entry, info) \ + do { \ + if (info == SEND_SIG_NOINFO) { \ + __entry->errno = 0; \ + __entry->code = SI_USER; \ + } else if (info == SEND_SIG_PRIV) { \ + __entry->errno = 0; \ + __entry->code = SI_KERNEL; \ + } else { \ + __entry->errno = info->si_errno; \ + __entry->code = info->si_code; \ + } \ + } while (0) + +/** + * signal_generate - called when a signal is generated + * @sig: signal number + * @info: pointer to struct siginfo + * @task: pointer to struct task_struct + * + * Current process sends a 'sig' signal to 'task' process with + * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV, + * 'info' is not a pointer and you can't access its field. Instead, + * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV + * means that si_code is SI_KERNEL. + */ +TRACE_EVENT(signal_generate, + + TP_PROTO(int sig, struct siginfo *info, struct task_struct *task), + + TP_ARGS(sig, info, task), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task->pid; + ), + + TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d", + __entry->sig, __entry->errno, __entry->code, + __entry->comm, __entry->pid) +); + +#endif /* _TRACE_SIGNAL_H */ + +/* This part must be outside protection */ +#include diff --git a/kernel/signal.c b/kernel/signal.c index 6705320784f..a1e0cc6b32c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -27,7 +27,8 @@ #include #include #include -#include +#define CREATE_TRACE_POINTS +#include #include #include @@ -834,7 +835,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, struct sigqueue *q; int override_rlimit; - trace_sched_signal_send(sig, t); + trace_signal_generate(sig, info, t); assert_spin_locked(&t->sighand->siglock); From f9d4257e01d266e67420cc99d456b6d4c8464f54 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 24 Nov 2009 16:56:51 -0500 Subject: [PATCH 410/470] tracepoint: Add signal deliver event Add a tracepoint where a process gets a signal. This tracepoint shows signal-number, sa-handler and sa-flag. Changes in v3: - Add docbook style comments Changes in v2: - Add siginfo argument - Fix comment Signed-off-by: Masami Hiramatsu Reviewed-by: Jason Baron Acked-by: Roland McGrath Cc: systemtap Cc: DLE Cc: Oleg Nesterov LKML-Reference: <20091124215651.30449.20926.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- include/trace/events/signal.h | 39 +++++++++++++++++++++++++++++++++++ kernel/signal.c | 3 +++ 2 files changed, 42 insertions(+) diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index ef51756a801..a6d71de0dc0 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -60,6 +60,45 @@ TRACE_EVENT(signal_generate, __entry->comm, __entry->pid) ); +/** + * signal_deliver - called when a signal is delivered + * @sig: signal number + * @info: pointer to struct siginfo + * @ka: pointer to struct k_sigaction + * + * A 'sig' signal is delivered to current process with 'info' siginfo, + * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or + * SIG_DFL. + * Note that some signals reported by signal_generate tracepoint can be + * lost, ignored or modified (by debugger) before hitting this tracepoint. + * This means, this can show which signals are actually delivered, but + * matching generated signals and delivered signals may not be correct. + */ +TRACE_EVENT(signal_deliver, + + TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), + + TP_ARGS(sig, info, ka), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, errno ) + __field( int, code ) + __field( unsigned long, sa_handler ) + __field( unsigned long, sa_flags ) + ), + + TP_fast_assign( + __entry->sig = sig; + TP_STORE_SIGINFO(__entry, info); + __entry->sa_handler = (unsigned long)ka->sa.sa_handler; + __entry->sa_flags = ka->sa.sa_flags; + ), + + TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx", + __entry->sig, __entry->errno, __entry->code, + __entry->sa_handler, __entry->sa_flags) +); #endif /* _TRACE_SIGNAL_H */ /* This part must be outside protection */ diff --git a/kernel/signal.c b/kernel/signal.c index a1e0cc6b32c..349d4493740 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1840,6 +1840,9 @@ relock: ka = &sighand->action[signr-1]; } + /* Trace actually delivered signals. */ + trace_signal_deliver(signr, info, ka); + if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { From ba005e1f417295d28cd1563ab82bc33af07fb16a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 24 Nov 2009 16:56:58 -0500 Subject: [PATCH 411/470] tracepoint: Add signal loss events Add signal_overflow_fail and signal_lose_info tracepoints for signal-lost events. Changes in v3: - Add docbook style comments Changes in v2: - Use siginfo string macro Suggested-by: Roland McGrath Reviewed-by: Jason Baron Signed-off-by: Masami Hiramatsu Acked-by: Roland McGrath Cc: systemtap Cc: DLE Cc: Oleg Nesterov LKML-Reference: <20091124215658.30449.9934.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Ingo Molnar --- include/trace/events/signal.h | 68 +++++++++++++++++++++++++++++++++++ kernel/signal.c | 19 +++++++--- 2 files changed, 82 insertions(+), 5 deletions(-) diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a6d71de0dc0..a510b75ac30 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -99,6 +99,74 @@ TRACE_EVENT(signal_deliver, __entry->sig, __entry->errno, __entry->code, __entry->sa_handler, __entry->sa_flags) ); + +/** + * signal_overflow_fail - called when signal queue is overflow + * @sig: signal number + * @group: signal to process group or not (bool) + * @info: pointer to struct siginfo + * + * Kernel fails to generate 'sig' signal with 'info' siginfo, because + * siginfo queue is overflow, and the signal is dropped. + * 'group' is not 0 if the signal will be sent to a process group. + * 'sig' is always one of RT signals. + */ +TRACE_EVENT(signal_overflow_fail, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, group ) + __field( int, errno ) + __field( int, code ) + ), + + TP_fast_assign( + __entry->sig = sig; + __entry->group = group; + TP_STORE_SIGINFO(__entry, info); + ), + + TP_printk("sig=%d group=%d errno=%d code=%d", + __entry->sig, __entry->group, __entry->errno, __entry->code) +); + +/** + * signal_lose_info - called when siginfo is lost + * @sig: signal number + * @group: signal to process group or not (bool) + * @info: pointer to struct siginfo + * + * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo + * queue is overflow. + * 'group' is not 0 if the signal will be sent to a process group. + * 'sig' is always one of non-RT signals. + */ +TRACE_EVENT(signal_lose_info, + + TP_PROTO(int sig, int group, struct siginfo *info), + + TP_ARGS(sig, group, info), + + TP_STRUCT__entry( + __field( int, sig ) + __field( int, group ) + __field( int, errno ) + __field( int, code ) + ), + + TP_fast_assign( + __entry->sig = sig; + __entry->group = group; + TP_STORE_SIGINFO(__entry, info); + ), + + TP_printk("sig=%d group=%d errno=%d code=%d", + __entry->sig, __entry->group, __entry->errno, __entry->code) +); #endif /* _TRACE_SIGNAL_H */ /* This part must be outside protection */ diff --git a/kernel/signal.c b/kernel/signal.c index 349d4493740..93e72e5feae 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -897,12 +897,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, break; } } else if (!is_si_special(info)) { - if (sig >= SIGRTMIN && info->si_code != SI_USER) - /* - * Queue overflow, abort. We may abort if the signal was rt - * and sent by user using something other than kill(). - */ + if (sig >= SIGRTMIN && info->si_code != SI_USER) { + /* + * Queue overflow, abort. We may abort if the + * signal was rt and sent by user using something + * other than kill(). + */ + trace_signal_overflow_fail(sig, group, info); return -EAGAIN; + } else { + /* + * This is a silent loss of information. We still + * send the signal, but the *info bits are lost. + */ + trace_signal_lose_info(sig, group, info); + } } out_set: From 4d795fb17a02a87e35782773b88b7a63acfbeaae Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 26 Nov 2009 13:11:46 +0100 Subject: [PATCH 412/470] tracing: Fix kmem event exports Commit 53d0422 ("tracing: Convert some kmem events to DEFINE_EVENT") moved the kmem tracepoint creation from util.c to page_alloc.c, but forgot to move the exports. Move them back. Cc: Li Zefan Cc: Pekka Enberg Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mel Gorman LKML-Reference: <4B0E286A.2000405@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- mm/page_alloc.c | 4 +--- mm/util.c | 3 +++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bdb22f55d00..2bc2ac63f41 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,14 +48,12 @@ #include #include #include +#include #include #include #include "internal.h" -#define CREATE_TRACE_POINTS -#include - /* * Array of node states. */ diff --git a/mm/util.c b/mm/util.c index 15d197571b4..7c35ad95f92 100644 --- a/mm/util.c +++ b/mm/util.c @@ -6,6 +6,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate From b2e74a265ded1a185f762ebaab967e9e0d008dd8 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Thu, 26 Nov 2009 09:24:30 -0800 Subject: [PATCH 413/470] perf_events: Fix read() bogus counts when in error state When a pinned group cannot be scheduled it goes into error state. Normally a group cannot go out of error state without being explicitly re-enabled or disabled. There was a bug in per-thread mode, whereby upon termination of the thread, the group would transition from error to off leading to bogus counts and timing information returned by read(). Fix it by clearing the error state. Signed-off-by: Stephane Eranian Acked-by: Peter Zijlstra Cc: Paul Mackerras Cc: perfmon2-devel@lists.sourceforge.net LKML-Reference: <4b0eb9ce.0508d00a.573b.ffffeab6@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index f8c7939c57c..0b9ca2d834d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -338,7 +338,16 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) event->group_leader->nr_siblings--; update_event_times(event); - event->state = PERF_EVENT_STATE_OFF; + + /* + * If event was in error state, then keep it + * that way, otherwise bogus counts will be + * returned on read(). The only way to get out + * of error state is by explicit re-enabling + * of the event + */ + if (event->state > PERF_EVENT_STATE_OFF) + event->state = PERF_EVENT_STATE_OFF; /* * If this was a group event with sibling events then From 5fa10b28e57f94a90535cfeafe89dcee9f47d540 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 27 Nov 2009 04:55:53 +0100 Subject: [PATCH 414/470] hw-breakpoints: Use struct perf_event_attr to define user breakpoints In-kernel user breakpoints are created using functions in which we pass breakpoint parameters as individual variables: address, length and type. Although it fits well for x86, this just does not scale across archictectures that may support this api later as these may have more or different needs. Pass in a perf_event_attr structure instead because it is meant to evolve as much as possible into a generic hardware breakpoint parameter structure. Reported-by: K.Prasad Signed-off-by: Frederic Weisbecker LKML-Reference: <1259294154-5197-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 74 ++++++++++++++++------------- include/linux/hw_breakpoint.h | 36 +++++++-------- kernel/hw_breakpoint.c | 87 ++++++++--------------------------- 3 files changed, 75 insertions(+), 122 deletions(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 75e0cd847bd..2941b32ea66 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -593,6 +593,34 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) return dr7; } +static struct perf_event * +ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, + struct task_struct *tsk) +{ + int err; + int gen_len, gen_type; + DEFINE_BREAKPOINT_ATTR(attr); + + /* + * We shoud have at least an inactive breakpoint at this + * slot. It means the user is writing dr7 without having + * written the address register first + */ + if (!bp) + return ERR_PTR(-EINVAL); + + err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); + if (err) + return ERR_PTR(err); + + attr = bp->attr; + attr.bp_len = gen_len; + attr.bp_type = gen_type; + attr.disabled = 0; + + return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); +} + /* * Handle ptrace writes to debug register 7. */ @@ -603,7 +631,6 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) int i, orig_ret = 0, rc = 0; int enabled, second_pass = 0; unsigned len, type; - int gen_len, gen_type; struct perf_event *bp; data &= ~DR_CONTROL_RESERVED; @@ -634,33 +661,12 @@ restore: continue; } - /* - * We shoud have at least an inactive breakpoint at this - * slot. It means the user is writing dr7 without having - * written the address register first - */ - if (!bp) { - rc = -EINVAL; - break; - } - - rc = arch_bp_generic_fields(len, type, &gen_len, &gen_type); - if (rc) - break; - - /* - * This is a temporary thing as bp is unregistered/registered - * to simulate modification - */ - bp = modify_user_hw_breakpoint(bp, bp->attr.bp_addr, gen_len, - gen_type, bp->callback, - tsk, true); - thread->ptrace_bps[i] = NULL; + bp = ptrace_modify_breakpoint(bp, len, type, tsk); /* Incorrect bp, or we have a bug in bp API */ if (IS_ERR(bp)) { rc = PTR_ERR(bp); - bp = NULL; + thread->ptrace_bps[i] = NULL; break; } thread->ptrace_bps[i] = bp; @@ -707,24 +713,26 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, { struct perf_event *bp; struct thread_struct *t = &tsk->thread; + DEFINE_BREAKPOINT_ATTR(attr); if (!t->ptrace_bps[nr]) { /* * Put stub len and type to register (reserve) an inactive but * correct bp */ - bp = register_user_hw_breakpoint(addr, HW_BREAKPOINT_LEN_1, - HW_BREAKPOINT_W, - ptrace_triggered, tsk, - false); + attr.bp_addr = addr; + attr.bp_len = HW_BREAKPOINT_LEN_1; + attr.bp_type = HW_BREAKPOINT_W; + attr.disabled = 1; + + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); } else { bp = t->ptrace_bps[nr]; t->ptrace_bps[nr] = NULL; - bp = modify_user_hw_breakpoint(bp, addr, bp->attr.bp_len, - bp->attr.bp_type, - bp->callback, - tsk, - bp->attr.disabled); + + attr = bp->attr; + attr.bp_addr = addr; + bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); } /* * CHECKME: the previous code returned -EIO if the addr wasn't a diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index c9f7f7c7b0e..5da472e434b 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -20,6 +20,14 @@ enum { #ifdef CONFIG_HAVE_HW_BREAKPOINT +/* As it's for in-kernel or ptrace use, we want it to be pinned */ +#define DEFINE_BREAKPOINT_ATTR(name) \ +struct perf_event_attr name = { \ + .type = PERF_TYPE_BREAKPOINT, \ + .size = sizeof(name), \ + .pinned = 1, \ +}; + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; @@ -36,22 +44,16 @@ static inline int hw_breakpoint_len(struct perf_event *bp) } extern struct perf_event * -register_user_hw_breakpoint(unsigned long addr, - int len, - int type, +register_user_hw_breakpoint(struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active); + struct task_struct *tsk); /* FIXME: only change from the attr, and don't unregister */ extern struct perf_event * modify_user_hw_breakpoint(struct perf_event *bp, - unsigned long addr, - int len, - int type, + struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active); + struct task_struct *tsk); /* * Kernel breakpoints are not associated with any particular thread. @@ -89,20 +91,14 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) #else /* !CONFIG_HAVE_HW_BREAKPOINT */ static inline struct perf_event * -register_user_hw_breakpoint(unsigned long addr, - int len, - int type, +register_user_hw_breakpoint(struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active) { return NULL; } + struct task_struct *tsk) { return NULL; } static inline struct perf_event * modify_user_hw_breakpoint(struct perf_event *bp, - unsigned long addr, - int len, - int type, + struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active) { return NULL; } + struct task_struct *tsk) { return NULL; } static inline struct perf_event * register_wide_hw_breakpoint_cpu(unsigned long addr, int len, diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 32e1018191b..2a47514f12f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -289,90 +289,32 @@ int register_perf_hw_breakpoint(struct perf_event *bp) return __register_perf_hw_breakpoint(bp); } -/* - * Register a breakpoint bound to a task and a given cpu. - * If cpu is -1, the breakpoint is active for the task in every cpu - * If the task is -1, the breakpoint is active for every tasks in the given - * cpu. - */ -static struct perf_event * -register_user_hw_breakpoint_cpu(unsigned long addr, - int len, - int type, - perf_callback_t triggered, - pid_t pid, - int cpu, - bool active) -{ - struct perf_event_attr *attr; - struct perf_event *bp; - - attr = kzalloc(sizeof(*attr), GFP_KERNEL); - if (!attr) - return ERR_PTR(-ENOMEM); - - attr->type = PERF_TYPE_BREAKPOINT; - attr->size = sizeof(*attr); - attr->bp_addr = addr; - attr->bp_len = len; - attr->bp_type = type; - /* - * Such breakpoints are used by debuggers to trigger signals when - * we hit the excepted memory op. We can't miss such events, they - * must be pinned. - */ - attr->pinned = 1; - - if (!active) - attr->disabled = 1; - - bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered); - kfree(attr); - - return bp; -} - /** * register_user_hw_breakpoint - register a hardware breakpoint for user space - * @addr: is the memory address that triggers the breakpoint - * @len: the length of the access to the memory (1 byte, 2 bytes etc...) - * @type: the type of the access to the memory (read/write/exec) + * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs - * @active: should we activate it while registering it - * */ struct perf_event * -register_user_hw_breakpoint(unsigned long addr, - int len, - int type, +register_user_hw_breakpoint(struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active) + struct task_struct *tsk) { - return register_user_hw_breakpoint_cpu(addr, len, type, triggered, - tsk->pid, -1, active); + return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); /** * modify_user_hw_breakpoint - modify a user-space hardware breakpoint * @bp: the breakpoint structure to modify - * @addr: is the memory address that triggers the breakpoint - * @len: the length of the access to the memory (1 byte, 2 bytes etc...) - * @type: the type of the access to the memory (read/write/exec) + * @attr: new breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint * @tsk: pointer to 'task_struct' of the process to which the address belongs - * @active: should we activate it while registering it */ struct perf_event * -modify_user_hw_breakpoint(struct perf_event *bp, - unsigned long addr, - int len, - int type, +modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, perf_callback_t triggered, - struct task_struct *tsk, - bool active) + struct task_struct *tsk) { /* * FIXME: do it without unregistering @@ -381,8 +323,7 @@ modify_user_hw_breakpoint(struct perf_event *bp, */ unregister_hw_breakpoint(bp); - return register_user_hw_breakpoint(addr, len, type, triggered, - tsk, active); + return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); @@ -406,8 +347,16 @@ register_kernel_hw_breakpoint_cpu(unsigned long addr, int cpu, bool active) { - return register_user_hw_breakpoint_cpu(addr, len, type, triggered, - -1, cpu, active); + DEFINE_BREAKPOINT_ATTR(attr); + + attr.bp_addr = addr; + attr.bp_len = len; + attr.bp_type = type; + + if (!active) + attr.disabled = 1; + + return perf_event_create_kernel_counter(&attr, cpu, -1, triggered); } /** From dd1853c3f493f6d22d9e5390b192a07b73d2ac0a Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 27 Nov 2009 04:55:54 +0100 Subject: [PATCH 415/470] hw-breakpoints: Use struct perf_event_attr to define kernel breakpoints Kernel breakpoints are created using functions in which we pass breakpoint parameters as individual variables: address, length and type. Although it fits well for x86, this just does not scale across architectures that may support this api later as these may have more or different needs. Pass in a perf_event_attr structure instead because it is meant to evolve as much as possible into a generic hardware breakpoint parameter structure. Reported-by: K.Prasad Signed-off-by: Frederic Weisbecker LKML-Reference: <1259294154-5197-2-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/hw_breakpoint.h | 35 +++++++++------------ kernel/hw_breakpoint.c | 35 +++------------------ kernel/trace/trace_ksym.c | 42 ++++++++++++------------- samples/hw_breakpoint/data_breakpoint.c | 10 +++--- 4 files changed, 44 insertions(+), 78 deletions(-) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 5da472e434b..a03daed08c5 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -28,6 +28,13 @@ struct perf_event_attr name = { \ .pinned = 1, \ }; +static inline void hw_breakpoint_init(struct perf_event_attr *attr) +{ + attr->type = PERF_TYPE_BREAKPOINT; + attr->size = sizeof(*attr); + attr->pinned = 1; +} + static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) { return bp->attr.bp_addr; @@ -59,19 +66,13 @@ modify_user_hw_breakpoint(struct perf_event *bp, * Kernel breakpoints are not associated with any particular thread. */ extern struct perf_event * -register_wide_hw_breakpoint_cpu(unsigned long addr, - int len, - int type, +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_callback_t triggered, - int cpu, - bool active); + int cpu); extern struct perf_event ** -register_wide_hw_breakpoint(unsigned long addr, - int len, - int type, - perf_callback_t triggered, - bool active); +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_callback_t triggered); extern int register_perf_hw_breakpoint(struct perf_event *bp); extern int __register_perf_hw_breakpoint(struct perf_event *bp); @@ -100,18 +101,12 @@ modify_user_hw_breakpoint(struct perf_event *bp, perf_callback_t triggered, struct task_struct *tsk) { return NULL; } static inline struct perf_event * -register_wide_hw_breakpoint_cpu(unsigned long addr, - int len, - int type, +register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_callback_t triggered, - int cpu, - bool active) { return NULL; } + int cpu) { return NULL; } static inline struct perf_event ** -register_wide_hw_breakpoint(unsigned long addr, - int len, - int type, - perf_callback_t triggered, - bool active) { return NULL; } +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_callback_t triggered) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline int diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 2a47514f12f..cf5ee162841 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -339,42 +339,16 @@ void unregister_hw_breakpoint(struct perf_event *bp) } EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); -static struct perf_event * -register_kernel_hw_breakpoint_cpu(unsigned long addr, - int len, - int type, - perf_callback_t triggered, - int cpu, - bool active) -{ - DEFINE_BREAKPOINT_ATTR(attr); - - attr.bp_addr = addr; - attr.bp_len = len; - attr.bp_type = type; - - if (!active) - attr.disabled = 1; - - return perf_event_create_kernel_counter(&attr, cpu, -1, triggered); -} - /** * register_wide_hw_breakpoint - register a wide breakpoint in the kernel - * @addr: is the memory address that triggers the breakpoint - * @len: the length of the access to the memory (1 byte, 2 bytes etc...) - * @type: the type of the access to the memory (read/write/exec) + * @attr: breakpoint attributes * @triggered: callback to trigger when we hit the breakpoint - * @active: should we activate it while registering it * * @return a set of per_cpu pointers to perf events */ struct perf_event ** -register_wide_hw_breakpoint(unsigned long addr, - int len, - int type, - perf_callback_t triggered, - bool active) +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_callback_t triggered) { struct perf_event **cpu_events, **pevent, *bp; long err; @@ -386,8 +360,7 @@ register_wide_hw_breakpoint(unsigned long addr, for_each_possible_cpu(cpu) { pevent = per_cpu_ptr(cpu_events, cpu); - bp = register_kernel_hw_breakpoint_cpu(addr, len, type, - triggered, cpu, active); + bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); *pevent = bp; diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index c538b15b95d..ddfa0fd43bc 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c @@ -42,9 +42,7 @@ struct trace_ksym { struct perf_event **ksym_hbp; - unsigned long ksym_addr; - int type; - int len; + struct perf_event_attr attr; #ifdef CONFIG_PROFILE_KSYM_TRACER unsigned long counter; #endif @@ -71,7 +69,7 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) rcu_read_lock(); hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { - if ((entry->ksym_addr == hbp_hit_addr) && + if ((entry->attr.bp_addr == hbp_hit_addr) && (entry->counter <= MAX_UL_INT)) { entry->counter++; break; @@ -192,14 +190,15 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) if (!entry) return -ENOMEM; - entry->type = op; - entry->ksym_addr = addr; - entry->len = HW_BREAKPOINT_LEN_4; + hw_breakpoint_init(&entry->attr); + + entry->attr.bp_type = op; + entry->attr.bp_addr = addr; + entry->attr.bp_len = HW_BREAKPOINT_LEN_4; ret = -EAGAIN; - entry->ksym_hbp = register_wide_hw_breakpoint(entry->ksym_addr, - entry->len, entry->type, - ksym_hbp_handler, true); + entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, + ksym_hbp_handler); if (IS_ERR(entry->ksym_hbp)) { ret = PTR_ERR(entry->ksym_hbp); @@ -236,12 +235,12 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, mutex_lock(&ksym_tracer_mutex); hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - ret = trace_seq_printf(s, "%pS:", (void *)entry->ksym_addr); - if (entry->type == HW_BREAKPOINT_R) + ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); + if (entry->attr.bp_type == HW_BREAKPOINT_R) ret = trace_seq_puts(s, "r--\n"); - else if (entry->type == HW_BREAKPOINT_W) + else if (entry->attr.bp_type == HW_BREAKPOINT_W) ret = trace_seq_puts(s, "-w-\n"); - else if (entry->type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) + else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) ret = trace_seq_puts(s, "rw-\n"); WARN_ON_ONCE(!ret); } @@ -317,9 +316,9 @@ static ssize_t ksym_trace_filter_write(struct file *file, ret = -EINVAL; hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { - if (entry->ksym_addr == ksym_addr) { + if (entry->attr.bp_addr == ksym_addr) { /* Check for malformed request: (6) */ - if (entry->type != op) + if (entry->attr.bp_type != op) changed = 1; else goto out; @@ -328,13 +327,12 @@ static ssize_t ksym_trace_filter_write(struct file *file, } if (changed) { unregister_wide_hw_breakpoint(entry->ksym_hbp); - entry->type = op; + entry->attr.bp_type = op; ret = 0; if (op > 0) { entry->ksym_hbp = - register_wide_hw_breakpoint(entry->ksym_addr, - entry->len, entry->type, - ksym_hbp_handler, true); + register_wide_hw_breakpoint(&entry->attr, + ksym_hbp_handler); if (IS_ERR(entry->ksym_hbp)) ret = PTR_ERR(entry->ksym_hbp); else @@ -489,7 +487,7 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); - access_type = entry->type; + access_type = entry->attr.bp_type; switch (access_type) { case HW_BREAKPOINT_R: @@ -505,7 +503,7 @@ static int ksym_tracer_stat_show(struct seq_file *m, void *v) seq_puts(m, " NA "); } - if (lookup_symbol_name(entry->ksym_addr, fn_name) >= 0) + if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) seq_printf(m, " %-36s", fn_name); else seq_printf(m, " %-36s", ""); diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c index ee7f9fbaffb..29525500df0 100644 --- a/samples/hw_breakpoint/data_breakpoint.c +++ b/samples/hw_breakpoint/data_breakpoint.c @@ -51,13 +51,13 @@ static void sample_hbp_handler(struct perf_event *temp, void *data) static int __init hw_break_module_init(void) { int ret; - unsigned long addr; + DEFINE_BREAKPOINT_ATTR(attr); - addr = kallsyms_lookup_name(ksym_name); + attr.bp_addr = kallsyms_lookup_name(ksym_name); + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; - sample_hbp = register_wide_hw_breakpoint(addr, HW_BREAKPOINT_LEN_4, - HW_BREAKPOINT_W | HW_BREAKPOINT_R, - sample_hbp_handler, true); + sample_hbp = register_wide_hw_breakpoint(&attr, sample_hbp_handler); if (IS_ERR(sample_hbp)) { ret = PTR_ERR(sample_hbp); goto fail; From 0f1ef51d244809f417bdf45cdb00109fb6005672 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 26 Nov 2009 15:49:33 +0800 Subject: [PATCH 416/470] trace_syscalls: Add syscall nr field Field syscall number is missed in syscall_enter_define_fields()/ syscall_exit_define_fields(). Syscall number is also needed for event filter or other users. Signed-off-by: Lai Jiangshan Acked-by: Frederic Weisbecker Cc: Jason Baron Cc: Steven Rostedt LKML-Reference: <4B0E330D.1070206@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_syscalls.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 9189cbe8607..63aa8070365 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -261,6 +261,10 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) if (ret) return ret; + ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); + if (ret) + return ret; + for (i = 0; i < meta->nb_args; i++) { ret = trace_define_field(call, meta->types[i], meta->args[i], offset, @@ -281,6 +285,10 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) if (ret) return ret; + ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); + if (ret) + return ret; + ret = trace_define_field(call, SYSCALL_FIELD(long, ret), FILTER_OTHER); From abab9d37d2a826fcf588c5f30152dbe05c40111c Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 25 Nov 2009 16:32:21 +0800 Subject: [PATCH 417/470] trace_kprobes: Fix memory leak tp->nr_args is not set before we "goto error", it causes memory leak for free_trace_probe() use tp->nr_args to free memory of args. Signed-off-by: Lai Jiangshan Acked-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0CEB95.2060107@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 79ce6a2bd74..82e85836d05 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -704,10 +704,12 @@ static int create_trace_probe(int argc, char **argv) ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); if (ret) { pr_info("Parse error at argument%d. (%d)\n", i, ret); + kfree(tp->args[i].name); goto error; } + + tp->nr_args++; } - tp->nr_args = i; ret = register_trace_probe(tp); if (ret) From 3d9b2e1ddf42dd3df38af7794fa5e39cce760f3b Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 25 Nov 2009 16:32:47 +0800 Subject: [PATCH 418/470] trace_kprobes: Always show group name Sometimes the group name is not "kprobes", It'll be better if we can read it from tracing/kprobe_events. # echo 'r:laijs/vfs_read vfs_read %ax' > kprobe_events # cat kprobe_events r:laijs/vfs_read vfs_read %ax=%ax Signed-off-by: Lai Jiangshan Acked-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0CEBAF.6000104@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 82e85836d05..96e1944229b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -760,7 +760,7 @@ static int probes_seq_show(struct seq_file *m, void *v) char buf[MAX_ARGSTR_LEN + 1]; seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); - seq_printf(m, ":%s", tp->call.name); + seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); if (tp->symbol) seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); From 52a11f354970e7301e1d1a029b87535be45abec9 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 25 Nov 2009 16:33:15 +0800 Subject: [PATCH 419/470] trace_kprobes: Don't output zero offset "symbol_name+0" is not so friendly. It makes the output longer. Signed-off-by: Lai Jiangshan Acked-by: Masami Hiramatsu Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B0CEBCB.7080309@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 96e1944229b..72d0c65c867 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -243,7 +243,11 @@ static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff) ret = snprintf(buf, n, "@0x%p", ff->data); else if (ff->func == fetch_symbol) { struct symbol_cache *sc = ff->data; - ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); + if (sc->offset) + ret = snprintf(buf, n, "@%s%+ld", sc->symbol, + sc->offset); + else + ret = snprintf(buf, n, "@%s", sc->symbol); } else if (ff->func == fetch_retvalue) ret = snprintf(buf, n, "$retval"); else if (ff->func == fetch_stack_address) @@ -762,10 +766,12 @@ static int probes_seq_show(struct seq_file *m, void *v) seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); - if (tp->symbol) + if (!tp->symbol) + seq_printf(m, " 0x%p", tp->rp.kp.addr); + else if (tp->rp.kp.offset) seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); else - seq_printf(m, " 0x%p", tp->rp.kp.addr); + seq_printf(m, " %s", probe_symbol(tp)); for (i = 0; i < tp->nr_args; i++) { ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch); From 3f5ee186f615a720fe78eb33662ae4da57a1eee3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:12 -0200 Subject: [PATCH 420/470] perf symbols: Avoid annoying message about loading symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be properly fixed when we remove the XXX comment in 'perf report', function resolve_symbol. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4ed379b915f..8aab89b2248 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1516,6 +1516,7 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) vdso = dso__new("[vdso]"); if (vdso == NULL) goto out_delete_kernel_map; + vdso->loaded = 1; if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, sizeof(kernel->build_id)) == 0) From 61f37a824d6782503ff66bf653f2e07902b641a1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:13 -0200 Subject: [PATCH 421/470] perf symbols: Rename kernel_mapto kernel_map[s]__functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As we'll have kernel_map[s]__variables too. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- tools/perf/util/symbol.c | 46 +++++++++++++++++------------------ tools/perf/util/symbol.h | 2 +- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 18ac5eaefc3..377cb7c9bdd 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -184,7 +184,7 @@ got_map: * trick of looking in the whole kernel symbol list. */ if ((long long)ip < 0) { - map = kernel_map; + map = kernel_map__functions; goto got_map; } } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8aab89b2248..687fb7f8cc0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -43,7 +43,7 @@ static struct symbol_conf symbol_conf__defaults = { .try_vmlinux_path = true, }; -static struct rb_root kernel_maps; +static struct rb_root kernel_maps__functions; static void symbols__fixup_end(struct rb_root *self) { @@ -71,7 +71,7 @@ static void symbols__fixup_end(struct rb_root *self) static void kernel_maps__fixup_end(void) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&kernel_maps); + struct rb_node *nd, *prevnd = rb_first(&kernel_maps__functions); if (prevnd == NULL) return; @@ -325,7 +325,7 @@ static int kernel_maps__load_all_kallsyms(void) * kernel_maps__split_kallsyms, when we have split the * maps per module */ - symbols__insert(&kernel_map->dso->functions, sym); + symbols__insert(&kernel_map__functions->dso->functions, sym); } free(line); @@ -346,10 +346,10 @@ out_failure: */ static int kernel_maps__split_kallsyms(symbol_filter_t filter) { - struct map *map = kernel_map; + struct map *map = kernel_map__functions; struct symbol *pos; int count = 0; - struct rb_node *next = rb_first(&kernel_map->dso->functions); + struct rb_node *next = rb_first(&kernel_map__functions->dso->functions); int kernel_range = 0; while (next) { @@ -376,7 +376,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) */ pos->start = map->map_ip(map, pos->start); pos->end = map->map_ip(map, pos->end); - } else if (map != kernel_map) { + } else if (map != kernel_map__functions) { char dso_name[PATH_MAX]; struct dso *dso; @@ -399,12 +399,12 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) } if (filter && filter(map, pos)) { - rb_erase(&pos->rb_node, &kernel_map->dso->functions); + rb_erase(&pos->rb_node, &kernel_map__functions->dso->functions); symbol__delete(pos); } else { - if (map != kernel_map) { + if (map != kernel_map__functions) { rb_erase(&pos->rb_node, - &kernel_map->dso->functions); + &kernel_map__functions->dso->functions); symbols__insert(&map->dso->functions, pos); } count++; @@ -420,8 +420,8 @@ static int kernel_maps__load_kallsyms(symbol_filter_t filter) if (kernel_maps__load_all_kallsyms()) return -1; - symbols__fixup_end(&kernel_map->dso->functions); - kernel_map->dso->origin = DSO__ORIG_KERNEL; + symbols__fixup_end(&kernel_map__functions->dso->functions); + kernel_map__functions->dso->origin = DSO__ORIG_KERNEL; return kernel_maps__split_kallsyms(filter); } @@ -431,7 +431,7 @@ size_t kernel_maps__fprintf(FILE *fp) size_t printed = fprintf(fp, "Kernel maps:\n"); struct rb_node *nd; - for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + for (nd = rb_first(&kernel_maps__functions); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); @@ -1159,17 +1159,17 @@ out: return ret; } -struct map *kernel_map; +struct map *kernel_map__functions; static void kernel_maps__insert(struct map *map) { - maps__insert(&kernel_maps, map); + maps__insert(&kernel_maps__functions, map); } struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, symbol_filter_t filter) { - struct map *map = maps__find(&kernel_maps, ip); + struct map *map = maps__find(&kernel_maps__functions, ip); if (mapp) *mapp = map; @@ -1178,7 +1178,7 @@ struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, ip = map->map_ip(map, ip); return map__find_function(map, ip, filter); } else - WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps), + WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps__functions), "Empty kernel_maps, was symbol__init() called?\n"); return NULL; @@ -1188,7 +1188,7 @@ struct map *kernel_maps__find_by_dso_name(const char *name) { struct rb_node *nd; - for (nd = rb_first(&kernel_maps); nd; nd = rb_next(nd)) { + for (nd = rb_first(&kernel_maps__functions); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->name, name) == 0) @@ -1505,11 +1505,11 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) if (kernel == NULL) return -1; - kernel_map = map__new2(0, kernel); - if (kernel_map == NULL) + kernel_map__functions = map__new2(0, kernel); + if (kernel_map__functions == NULL) goto out_delete_kernel_dso; - kernel_map->map_ip = kernel_map->unmap_ip = identity__map_ip; + kernel_map__functions->map_ip = kernel_map__functions->unmap_ip = identity__map_ip; kernel->short_name = "[kernel]"; kernel->kernel = 1; @@ -1522,15 +1522,15 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) sizeof(kernel->build_id)) == 0) kernel->has_build_id = true; - kernel_maps__insert(kernel_map); + kernel_maps__insert(kernel_map__functions); dsos__add(kernel); dsos__add(vdso); return 0; out_delete_kernel_map: - map__delete(kernel_map); - kernel_map = NULL; + map__delete(kernel_map__functions); + kernel_map__functions = NULL; out_delete_kernel_dso: dso__delete(kernel); return -1; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 65846d0c5df..b42d196dd8e 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -105,6 +105,6 @@ size_t kernel_maps__fprintf(FILE *fp); int symbol__init(struct symbol_conf *conf); extern struct list_head dsos; -extern struct map *kernel_map; +extern struct map *kernel_map__functions; extern struct dso *vdso; #endif /* __PERF_SYMBOL */ From b0da954a4759ac19fb80a959e53b613fe376bc12 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:14 -0200 Subject: [PATCH 422/470] perf symbols: Split the dsos list into kernel and user parts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't need to look at modules in dsos__findnew because the kernel events come only with user DSOs. Also we need a way to list just the module DSOs so that we can create multiple sets of maps, now that we will support maps for the variables in a symtab. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/header.c | 12 +++++++-- tools/perf/util/symbol.c | 56 ++++++++++++++++++++++++++-------------- tools/perf/util/symbol.h | 2 +- 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 4b586569bb0..4805e6dfd23 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -185,11 +185,11 @@ static int do_write(int fd, const void *buf, size_t size) return 0; } -static int dsos__write_buildid_table(int fd) +static int __dsos__write_buildid_table(struct list_head *head, int fd) { struct dso *pos; - list_for_each_entry(pos, &dsos, node) { + list_for_each_entry(pos, head, node) { int err; struct build_id_event b; size_t len; @@ -212,6 +212,14 @@ static int dsos__write_buildid_table(int fd) return 0; } +static int dsos__write_buildid_table(int fd) +{ + int err = __dsos__write_buildid_table(&dsos__kernel, fd); + if (err == 0) + err = __dsos__write_buildid_table(&dsos__user, fd); + return err; +} + static int perf_header__adds_write(struct perf_header *self, int fd) { int nr_sections; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 687fb7f8cc0..dc25231813e 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -28,8 +28,7 @@ enum dso_origin { DSO__ORIG_NOT_FOUND, }; -static void dsos__add(struct dso *dso); -static struct dso *dsos__find(const char *name); +static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso); static void kernel_maps__insert(struct map *map); static int dso__load_kernel_sym(struct dso *self, struct map *map, @@ -855,7 +854,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, curr_map->unmap_ip = identity__map_ip; curr_dso->origin = DSO__ORIG_KERNEL; kernel_maps__insert(curr_map); - dsos__add(curr_dso); + dsos__add(&dsos__kernel, curr_dso); } else curr_dso = curr_map->dso; @@ -907,12 +906,12 @@ static bool dso__build_id_equal(const struct dso *self, u8 *build_id) return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; } -bool dsos__read_build_ids(void) +static bool __dsos__read_build_ids(struct list_head *head) { bool have_build_id = false; struct dso *pos; - list_for_each_entry(pos, &dsos, node) + list_for_each_entry(pos, head, node) if (filename__read_build_id(pos->long_name, pos->build_id, sizeof(pos->build_id)) > 0) { have_build_id = true; @@ -922,6 +921,12 @@ bool dsos__read_build_ids(void) return have_build_id; } +bool dsos__read_build_ids(void) +{ + return __dsos__read_build_ids(&dsos__kernel) || + __dsos__read_build_ids(&dsos__user); +} + /* * Align offset to 4 bytes as needed for note name and descriptor data. */ @@ -1343,7 +1348,7 @@ static int kernel_maps__create_module_maps(void) dso->origin = DSO__ORIG_KMODULE; kernel_maps__insert(map); - dsos__add(dso); + dsos__add(&dsos__kernel, dso); } free(line); @@ -1445,19 +1450,20 @@ out_fixup: return err; } -LIST_HEAD(dsos); +LIST_HEAD(dsos__user); +LIST_HEAD(dsos__kernel); struct dso *vdso; -static void dsos__add(struct dso *dso) +static void dsos__add(struct list_head *head, struct dso *dso) { - list_add_tail(&dso->node, &dsos); + list_add_tail(&dso->node, head); } -static struct dso *dsos__find(const char *name) +static struct dso *dsos__find(struct list_head *head, const char *name) { struct dso *pos; - list_for_each_entry(pos, &dsos, node) + list_for_each_entry(pos, head, node) if (strcmp(pos->name, name) == 0) return pos; return NULL; @@ -1465,12 +1471,12 @@ static struct dso *dsos__find(const char *name) struct dso *dsos__findnew(const char *name) { - struct dso *dso = dsos__find(name); + struct dso *dso = dsos__find(&dsos__user, name); if (!dso) { dso = dso__new(name); if (dso != NULL) { - dsos__add(dso); + dsos__add(&dsos__user, dso); dso__set_basename(dso); } } @@ -1478,26 +1484,38 @@ struct dso *dsos__findnew(const char *name) return dso; } -void dsos__fprintf(FILE *fp) +static void __dsos__fprintf(struct list_head *head, FILE *fp) { struct dso *pos; - list_for_each_entry(pos, &dsos, node) + list_for_each_entry(pos, head, node) dso__fprintf(pos, fp); } -size_t dsos__fprintf_buildid(FILE *fp) +void dsos__fprintf(FILE *fp) +{ + __dsos__fprintf(&dsos__kernel, fp); + __dsos__fprintf(&dsos__user, fp); +} + +static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp) { struct dso *pos; size_t ret = 0; - list_for_each_entry(pos, &dsos, node) { + list_for_each_entry(pos, head, node) { ret += dso__fprintf_buildid(pos, fp); ret += fprintf(fp, " %s\n", pos->long_name); } return ret; } +size_t dsos__fprintf_buildid(FILE *fp) +{ + return (__dsos__fprintf_buildid(&dsos__kernel, fp) + + __dsos__fprintf_buildid(&dsos__user, fp)); +} + static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) { struct dso *kernel = dso__new(conf->vmlinux_name ?: "[kernel.kallsyms]"); @@ -1523,8 +1541,8 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) kernel->has_build_id = true; kernel_maps__insert(kernel_map__functions); - dsos__add(kernel); - dsos__add(vdso); + dsos__add(&dsos__kernel, kernel); + dsos__add(&dsos__user, vdso); return 0; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index b42d196dd8e..5d0371fe8a0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -104,7 +104,7 @@ size_t kernel_maps__fprintf(FILE *fp); int symbol__init(struct symbol_conf *conf); -extern struct list_head dsos; +extern struct list_head dsos__user, dsos__kernel; extern struct map *kernel_map__functions; extern struct dso *vdso; #endif /* __PERF_SYMBOL */ From 605ca4ba017455d39ac6991c58eb1e80fb8af48d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:15 -0200 Subject: [PATCH 423/470] perf symbols: Unexport kernel_map__functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit perf annotate was the only user, and it doesn't really need it. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 8 +++----- tools/perf/util/symbol.c | 3 +-- tools/perf/util/symbol.h | 1 - 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 377cb7c9bdd..0846c8a155e 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -169,7 +169,6 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) level = '.'; map = thread__find_map(thread, ip); if (map != NULL) { -got_map: ip = map->map_ip(map, ip); sym = map__find_function(map, ip, symbol_filter); } else { @@ -183,10 +182,9 @@ got_map: * the "[vdso]" dso, but for now lets use the old * trick of looking in the whole kernel symbol list. */ - if ((long long)ip < 0) { - map = kernel_map__functions; - goto got_map; - } + if ((long long)ip < 0) + sym = kernel_maps__find_function(ip, &map, + symbol_filter); } dump_printf(" ...... dso: %s\n", map ? map->dso->long_name : ""); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index dc25231813e..0b8a298d41a 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -36,6 +36,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, unsigned int symbol__priv_size; static int vmlinux_path__nr_entries; static char **vmlinux_path; +static struct map *kernel_map__functions; static struct symbol_conf symbol_conf__defaults = { .use_modules = true, @@ -1164,8 +1165,6 @@ out: return ret; } -struct map *kernel_map__functions; - static void kernel_maps__insert(struct map *map) { maps__insert(&kernel_maps__functions, map); diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5d0371fe8a0..fb0be9e92bf 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -105,6 +105,5 @@ size_t kernel_maps__fprintf(FILE *fp); int symbol__init(struct symbol_conf *conf); extern struct list_head dsos__user, dsos__kernel; -extern struct map *kernel_map__functions; extern struct dso *vdso; #endif /* __PERF_SYMBOL */ From 3610583c29563e23dd038d2870f59c88438bf7a3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:16 -0200 Subject: [PATCH 424/470] perf symbols: Add a 'type' field to struct map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That way we will be able to check if the right symtab is loaded in the underlying DSO. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-top.c | 2 +- tools/perf/util/event.h | 12 +++++++++--- tools/perf/util/map.c | 12 +++++++----- tools/perf/util/process_events.c | 2 +- tools/perf/util/symbol.c | 31 +++++++++++++++++++++---------- tools/perf/util/symbol.h | 4 +++- 6 files changed, 42 insertions(+), 21 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ded6cf65ad9..a0168f260d0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -996,7 +996,7 @@ static void event__process_mmap(event_t *self) struct thread *thread = threads__findnew(self->mmap.pid); if (thread != NULL) { - struct map *map = map__new(&self->mmap, NULL, 0); + struct map *map = map__new(&self->mmap, MAP__FUNCTION, NULL, 0); if (map != NULL) thread__insert_map(thread, map); } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 882a9531db9..29543bd8800 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -80,6 +80,10 @@ typedef union event_union { struct sample_event sample; } event_t; +enum map_type { + MAP__FUNCTION, +}; + struct map { union { struct rb_node rb_node; @@ -87,6 +91,7 @@ struct map { }; u64 start; u64 end; + enum map_type type; u64 pgoff; u64 (*map_ip)(struct map *, u64); u64 (*unmap_ip)(struct map *, u64); @@ -112,9 +117,10 @@ struct symbol; typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); -void map__init(struct map *self, u64 start, u64 end, u64 pgoff, - struct dso *dso); -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen); +void map__init(struct map *self, enum map_type type, + u64 start, u64 end, u64 pgoff, struct dso *dso); +struct map *map__new(struct mmap_event *event, enum map_type, + char *cwd, int cwdlen); void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 41c5c4a2001..52bb4c6cf74 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -20,9 +20,10 @@ static int strcommon(const char *pathname, char *cwd, int cwdlen) return n; } -void map__init(struct map *self, u64 start, u64 end, u64 pgoff, - struct dso *dso) +void map__init(struct map *self, enum map_type type, + u64 start, u64 end, u64 pgoff, struct dso *dso) { + self->type = type; self->start = start; self->end = end; self->pgoff = pgoff; @@ -32,7 +33,8 @@ void map__init(struct map *self, u64 start, u64 end, u64 pgoff, RB_CLEAR_NODE(&self->rb_node); } -struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) +struct map *map__new(struct mmap_event *event, enum map_type type, + char *cwd, int cwdlen) { struct map *self = malloc(sizeof(*self)); @@ -63,7 +65,7 @@ struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen) if (dso == NULL) goto out_delete; - map__init(self, event->start, event->start + event->len, + map__init(self, type, event->start, event->start + event->len, event->pgoff, dso); if (self->dso == vdso || anon) @@ -103,7 +105,7 @@ void map__fixup_end(struct map *self, struct rb_root *symbols) struct symbol *map__find_function(struct map *self, u64 ip, symbol_filter_t filter) { - if (!self->dso->loaded) { + if (!dso__loaded(self->dso, self->type)) { int nr = dso__load(self->dso, self, filter); if (nr < 0) { diff --git a/tools/perf/util/process_events.c b/tools/perf/util/process_events.c index a9204363efd..53778684641 100644 --- a/tools/perf/util/process_events.c +++ b/tools/perf/util/process_events.c @@ -6,7 +6,7 @@ int cwdlen; int process_mmap_event(event_t *event, unsigned long offset, unsigned long head) { - struct map *map = map__new(&event->mmap, cwd, cwdlen); + struct map *map = map__new(&event->mmap, MAP__FUNCTION, cwd, cwdlen); struct thread *thread = threads__findnew(event->mmap.pid); dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 0b8a298d41a..45a4a9a7618 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -29,7 +29,7 @@ enum dso_origin { }; static void dsos__add(struct list_head *head, struct dso *dso); -static struct map *map__new2(u64 start, struct dso *dso); +static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); static void kernel_maps__insert(struct map *map); static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); @@ -45,6 +45,16 @@ static struct symbol_conf symbol_conf__defaults = { static struct rb_root kernel_maps__functions; +bool dso__loaded(const struct dso *self, enum map_type type) +{ + return self->loaded & (1 << type); +} + +static void dso__set_loaded(struct dso *self, enum map_type type) +{ + self->loaded |= (1 << type); +} + static void symbols__fixup_end(struct rb_root *self) { struct rb_node *nd, *prevnd = rb_first(self); @@ -387,7 +397,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) if (dso == NULL) return -1; - map = map__new2(pos->start, dso); + map = map__new2(pos->start, dso, MAP__FUNCTION); if (map == NULL) { dso__delete(dso); return -1; @@ -846,7 +856,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, curr_dso = dso__new(dso_name); if (curr_dso == NULL) goto out_elf_end; - curr_map = map__new2(start, curr_dso); + curr_map = map__new2(start, curr_dso, + MAP__FUNCTION); if (curr_map == NULL) { dso__delete(curr_dso); goto out_elf_end; @@ -1076,7 +1087,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) int ret = -1; int fd; - self->loaded = 1; + dso__set_loaded(self, map->type); if (self->kernel) return dso__load_kernel_sym(self, map, filter); @@ -1275,7 +1286,7 @@ static int dsos__set_modules_path(void) * they are loaded) and for vmlinux, where only after we load all the * symbols we'll know where it starts and ends. */ -static struct map *map__new2(u64 start, struct dso *dso) +static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) { struct map *self = malloc(sizeof(*self)); @@ -1283,7 +1294,7 @@ static struct map *map__new2(u64 start, struct dso *dso) /* * ->end will be filled after we load all the symbols */ - map__init(self, start, 0, 0, dso); + map__init(self, type, start, 0, 0, dso); } return self; @@ -1333,7 +1344,7 @@ static int kernel_maps__create_module_maps(void) if (dso == NULL) goto out_delete_line; - map = map__new2(start, dso); + map = map__new2(start, dso, MAP__FUNCTION); if (map == NULL) { dso__delete(dso); goto out_delete_line; @@ -1394,7 +1405,7 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, if (fd < 0) return -1; - self->loaded = 1; + dso__set_loaded(self, map->type); err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0); close(fd); @@ -1522,7 +1533,7 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) if (kernel == NULL) return -1; - kernel_map__functions = map__new2(0, kernel); + kernel_map__functions = map__new2(0, kernel, MAP__FUNCTION); if (kernel_map__functions == NULL) goto out_delete_kernel_dso; @@ -1533,7 +1544,7 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) vdso = dso__new("[vdso]"); if (vdso == NULL) goto out_delete_kernel_map; - vdso->loaded = 1; + dso__set_loaded(vdso, MAP__FUNCTION); if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id, sizeof(kernel->build_id)) == 0) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index fb0be9e92bf..11d41952ce0 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -69,10 +69,10 @@ struct dso { struct symbol *(*find_function)(struct dso *, u64 ip); u8 adjust_symbols:1; u8 slen_calculated:1; - u8 loaded:1; u8 has_build_id:1; u8 kernel:1; unsigned char origin; + u8 loaded; u8 build_id[BUILD_ID_SIZE]; u16 long_name_len; const char *short_name; @@ -85,6 +85,8 @@ void dso__delete(struct dso *self); struct symbol *dso__find_function(struct dso *self, u64 ip); +bool dso__loaded(const struct dso *self, enum map_type type); + struct dso *dsos__findnew(const char *name); int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); void dsos__fprintf(FILE *fp); From 6a4694a433a218c729d336b348a01bfc720da095 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:17 -0200 Subject: [PATCH 425/470] perf symbols: Better support for multiple symbol tables per dso MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By using an array of rb_roots in struct dso we can, from a struct map instance to get the right symbol rb_tree more easily. This way we can have just one symbol lookup method for struct map instances, map__find_symbol, instead of one per symtab type (functions, variables). Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-report.c | 2 +- tools/perf/builtin-top.c | 2 +- tools/perf/util/event.h | 12 ++++--- tools/perf/util/map.c | 12 ++++--- tools/perf/util/symbol.c | 59 +++++++++++++++++++++-------------- tools/perf/util/symbol.h | 7 ++--- 7 files changed, 56 insertions(+), 40 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 0846c8a155e..c32e7609b77 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -170,7 +170,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) map = thread__find_map(thread, ip); if (map != NULL) { ip = map->map_ip(map, ip); - sym = map__find_function(map, ip, symbol_filter); + sym = map__find_symbol(map, ip, symbol_filter); } else { /* * If this is outside of all known maps, diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index e4b1004e76e..400bef981c6 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -455,7 +455,7 @@ got_map: dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); *ipp = ip; - return map ? map__find_function(map, ip, NULL) : NULL; + return map ? map__find_symbol(map, ip, NULL) : NULL; } static int call__match(struct symbol *sym) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a0168f260d0..abe78bbd154 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -948,7 +948,7 @@ static void event__process_sample(const event_t *self, int counter) map = thread__find_map(thread, ip); if (map != NULL) { ip = map->map_ip(map, ip); - sym = map__find_function(map, ip, symbol_filter); + sym = map__find_symbol(map, ip, symbol_filter); if (sym == NULL) return; userspace_samples++; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 29543bd8800..3ae3c964c90 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -81,7 +81,9 @@ typedef union event_union { } event_t; enum map_type { - MAP__FUNCTION, + MAP__FUNCTION = 0, + + MAP__NR_TYPES, }; struct map { @@ -125,10 +127,10 @@ void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *self, FILE *fp); -struct symbol *map__find_function(struct map *self, u64 ip, - symbol_filter_t filter); -void map__fixup_start(struct map *self, struct rb_root *symbols); -void map__fixup_end(struct map *self, struct rb_root *symbols); +struct symbol *map__find_symbol(struct map *self, u64 addr, + symbol_filter_t filter); +void map__fixup_start(struct map *self); +void map__fixup_end(struct map *self); int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); void event__synthesize_threads(int (*process)(event_t *event)); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 52bb4c6cf74..69f94fe9db2 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -82,8 +82,9 @@ void map__delete(struct map *self) free(self); } -void map__fixup_start(struct map *self, struct rb_root *symbols) +void map__fixup_start(struct map *self) { + struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); @@ -91,8 +92,9 @@ void map__fixup_start(struct map *self, struct rb_root *symbols) } } -void map__fixup_end(struct map *self, struct rb_root *symbols) +void map__fixup_end(struct map *self) { + struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); @@ -102,8 +104,8 @@ void map__fixup_end(struct map *self, struct rb_root *symbols) #define DSO__DELETED "(deleted)" -struct symbol *map__find_function(struct map *self, u64 ip, - symbol_filter_t filter) +struct symbol *map__find_symbol(struct map *self, u64 addr, + symbol_filter_t filter) { if (!dso__loaded(self->dso, self->type)) { int nr = dso__load(self->dso, self, filter); @@ -138,7 +140,7 @@ struct symbol *map__find_function(struct map *self, u64 ip, } } - return self->dso->find_function(self->dso, ip); + return self->dso->find_symbol(self->dso, self->type, addr); } struct map *map__clone(struct map *self) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 45a4a9a7618..9a2dd819dee 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -31,6 +31,7 @@ enum dso_origin { static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); static void kernel_maps__insert(struct map *map); +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); static int dso__load_kernel_sym(struct dso *self, struct map *map, symbol_filter_t filter); unsigned int symbol__priv_size; @@ -151,11 +152,13 @@ struct dso *dso__new(const char *name) struct dso *self = malloc(sizeof(*self) + strlen(name) + 1); if (self != NULL) { + int i; strcpy(self->name, name); dso__set_long_name(self, self->name); self->short_name = self->name; - self->functions = RB_ROOT; - self->find_function = dso__find_function; + for (i = 0; i < MAP__NR_TYPES; ++i) + self->symbols[i] = RB_ROOT; + self->find_symbol = dso__find_symbol; self->slen_calculated = 0; self->origin = DSO__ORIG_NOT_FOUND; self->loaded = 0; @@ -180,7 +183,9 @@ static void symbols__delete(struct rb_root *self) void dso__delete(struct dso *self) { - symbols__delete(&self->functions); + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + symbols__delete(&self->symbols[i]); if (self->long_name != self->name) free(self->long_name); free(self); @@ -234,9 +239,9 @@ static struct symbol *symbols__find(struct rb_root *self, u64 ip) return NULL; } -struct symbol *dso__find_function(struct dso *self, u64 ip) +struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr) { - return symbols__find(&self->functions, ip); + return symbols__find(&self->symbols[type], addr); } int build_id__sprintf(u8 *self, int len, char *bf) @@ -262,17 +267,25 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp) return fprintf(fp, "%s", sbuild_id); } +static const char * map_type__name[MAP__NR_TYPES] = { + [MAP__FUNCTION] = "Functions", +}; + size_t dso__fprintf(struct dso *self, FILE *fp) { + int i; struct rb_node *nd; size_t ret = fprintf(fp, "dso: %s (", self->short_name); ret += dso__fprintf_buildid(self, fp); - ret += fprintf(fp, ")\nFunctions:\n"); + ret += fprintf(fp, ")\n"); + for (i = 0; i < MAP__NR_TYPES; ++i) { + ret += fprintf(fp, "%s:\n", map_type__name[i]); - for (nd = rb_first(&self->functions); nd; nd = rb_next(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - ret += symbol__fprintf(pos, fp); + for (nd = rb_first(&self->symbols[i]); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); + } } return ret; @@ -335,7 +348,7 @@ static int kernel_maps__load_all_kallsyms(void) * kernel_maps__split_kallsyms, when we have split the * maps per module */ - symbols__insert(&kernel_map__functions->dso->functions, sym); + symbols__insert(&kernel_map__functions->dso->symbols[MAP__FUNCTION], sym); } free(line); @@ -359,7 +372,7 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) struct map *map = kernel_map__functions; struct symbol *pos; int count = 0; - struct rb_node *next = rb_first(&kernel_map__functions->dso->functions); + struct rb_node *next = rb_first(&kernel_map__functions->dso->symbols[map->type]); int kernel_range = 0; while (next) { @@ -409,13 +422,13 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) } if (filter && filter(map, pos)) { - rb_erase(&pos->rb_node, &kernel_map__functions->dso->functions); + rb_erase(&pos->rb_node, &kernel_map__functions->dso->symbols[map->type]); symbol__delete(pos); } else { if (map != kernel_map__functions) { rb_erase(&pos->rb_node, - &kernel_map__functions->dso->functions); - symbols__insert(&map->dso->functions, pos); + &kernel_map__functions->dso->symbols[map->type]); + symbols__insert(&map->dso->symbols[map->type], pos); } count++; } @@ -430,7 +443,7 @@ static int kernel_maps__load_kallsyms(symbol_filter_t filter) if (kernel_maps__load_all_kallsyms()) return -1; - symbols__fixup_end(&kernel_map__functions->dso->functions); + symbols__fixup_end(&kernel_map__functions->dso->symbols[MAP__FUNCTION]); kernel_map__functions->dso->origin = DSO__ORIG_KERNEL; return kernel_maps__split_kallsyms(filter); @@ -501,7 +514,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, if (filter && filter(map, sym)) symbol__delete(sym); else { - symbols__insert(&self->functions, sym); + symbols__insert(&self->symbols[map->type], sym); nr_syms++; } } @@ -699,7 +712,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&self->functions, f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -721,7 +734,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&self->functions, f); + symbols__insert(&self->symbols[map->type], f); ++nr; } } @@ -896,7 +909,7 @@ new_symbol: if (filter && filter(curr_map, f)) symbol__delete(f); else { - symbols__insert(&curr_dso->functions, f); + symbols__insert(&curr_dso->symbols[curr_map->type], f); nr++; } } @@ -905,7 +918,7 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) - symbols__fixup_end(&self->functions); + symbols__fixup_end(&self->symbols[map->type]); err = nr; out_elf_end: elf_end(elf); @@ -1191,7 +1204,7 @@ struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, if (map) { ip = map->map_ip(map, ip); - return map__find_function(map, ip, filter); + return map__find_symbol(map, ip, filter); } else WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps__functions), "Empty kernel_maps, was symbol__init() called?\n"); @@ -1453,8 +1466,8 @@ do_kallsyms: if (err > 0) { out_fixup: - map__fixup_start(map, &map->dso->functions); - map__fixup_end(map, &map->dso->functions); + map__fixup_start(map); + map__fixup_end(map); } return err; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 11d41952ce0..8934814d5a6 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -65,8 +65,9 @@ static inline void *symbol__priv(struct symbol *self) struct dso { struct list_head node; - struct rb_root functions; - struct symbol *(*find_function)(struct dso *, u64 ip); + struct rb_root symbols[MAP__NR_TYPES]; + struct symbol *(*find_symbol)(struct dso *self, + enum map_type type, u64 addr); u8 adjust_symbols:1; u8 slen_calculated:1; u8 has_build_id:1; @@ -83,8 +84,6 @@ struct dso { struct dso *dso__new(const char *name); void dso__delete(struct dso *self); -struct symbol *dso__find_function(struct dso *self, u64 ip); - bool dso__loaded(const struct dso *self, enum map_type type); struct dso *dsos__findnew(const char *name); From 4e06255f5cf2acf6a5abfe7df8c9690463259dea Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:18 -0200 Subject: [PATCH 426/470] perf symbols: Make the kallsyms loading routines part of the dso class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that the kallsyms loading routines are the direct counterpart of the vmlinux loading ones, i.e. dso__load_kallsyms is the counterpart of dso__load_vmlinux. In the process make them also use the symbols rb tree indexed by map->type, paving the way for supporting other types of symtabs, such as the next one to be supported: variables. This also allowed removal of yet another global variable: kernel_map__functions. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-7-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 77 ++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 9a2dd819dee..956656fcaab 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -37,7 +37,6 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, unsigned int symbol__priv_size; static int vmlinux_path__nr_entries; static char **vmlinux_path; -static struct map *kernel_map__functions; static struct symbol_conf symbol_conf__defaults = { .use_modules = true, @@ -296,10 +295,11 @@ size_t dso__fprintf(struct dso *self, FILE *fp) * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int kernel_maps__load_all_kallsyms(void) +static int dso__load_all_kallsyms(struct dso *self, struct map *map) { char *line = NULL; size_t n; + struct rb_root *root = &self->symbols[map->type]; FILE *file = fopen("/proc/kallsyms", "r"); if (file == NULL) @@ -342,13 +342,11 @@ static int kernel_maps__load_all_kallsyms(void) if (sym == NULL) goto out_delete_line; - /* * We will pass the symbols to the filter later, in - * kernel_maps__split_kallsyms, when we have split the - * maps per module + * map__split_kallsyms, when we have split the maps per module */ - symbols__insert(&kernel_map__functions->dso->symbols[MAP__FUNCTION], sym); + symbols__insert(root, sym); } free(line); @@ -367,12 +365,14 @@ out_failure: * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int kernel_maps__split_kallsyms(symbol_filter_t filter) +static int dso__split_kallsyms(struct dso *self, struct map *map, + symbol_filter_t filter) { - struct map *map = kernel_map__functions; + struct map *curr_map = map; struct symbol *pos; int count = 0; - struct rb_node *next = rb_first(&kernel_map__functions->dso->symbols[map->type]); + struct rb_root *root = &self->symbols[map->type]; + struct rb_node *next = rb_first(root); int kernel_range = 0; while (next) { @@ -385,9 +385,9 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) if (module) { *module++ = '\0'; - if (strcmp(map->dso->name, module)) { - map = kernel_maps__find_by_dso_name(module); - if (!map) { + if (strcmp(self->name, module)) { + curr_map = kernel_maps__find_by_dso_name(module); + if (curr_map == NULL) { pr_err("/proc/{kallsyms,modules} " "inconsistency!\n"); return -1; @@ -397,9 +397,9 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) * So that we look just like we get from .ko files, * i.e. not prelinked, relative to map->start. */ - pos->start = map->map_ip(map, pos->start); - pos->end = map->map_ip(map, pos->end); - } else if (map != kernel_map__functions) { + pos->start = curr_map->map_ip(curr_map, pos->start); + pos->end = curr_map->map_ip(curr_map, pos->end); + } else if (curr_map != map) { char dso_name[PATH_MAX]; struct dso *dso; @@ -410,25 +410,24 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) if (dso == NULL) return -1; - map = map__new2(pos->start, dso, MAP__FUNCTION); + curr_map = map__new2(pos->start, dso, map->type); if (map == NULL) { dso__delete(dso); return -1; } - map->map_ip = map->unmap_ip = identity__map_ip; - kernel_maps__insert(map); + curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; + kernel_maps__insert(curr_map); ++kernel_range; } - if (filter && filter(map, pos)) { - rb_erase(&pos->rb_node, &kernel_map__functions->dso->symbols[map->type]); + if (filter && filter(curr_map, pos)) { + rb_erase(&pos->rb_node, root); symbol__delete(pos); } else { - if (map != kernel_map__functions) { - rb_erase(&pos->rb_node, - &kernel_map__functions->dso->symbols[map->type]); - symbols__insert(&map->dso->symbols[map->type], pos); + if (curr_map != map) { + rb_erase(&pos->rb_node, root); + symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); } count++; } @@ -438,15 +437,16 @@ static int kernel_maps__split_kallsyms(symbol_filter_t filter) } -static int kernel_maps__load_kallsyms(symbol_filter_t filter) +static int dso__load_kallsyms(struct dso *self, struct map *map, + symbol_filter_t filter) { - if (kernel_maps__load_all_kallsyms()) + if (dso__load_all_kallsyms(self, map) < 0) return -1; - symbols__fixup_end(&kernel_map__functions->dso->symbols[MAP__FUNCTION]); - kernel_map__functions->dso->origin = DSO__ORIG_KERNEL; + symbols__fixup_end(&self->symbols[map->type]); + self->origin = DSO__ORIG_KERNEL; - return kernel_maps__split_kallsyms(filter); + return dso__split_kallsyms(self, map, filter); } size_t kernel_maps__fprintf(FILE *fp) @@ -1457,9 +1457,8 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, if (err <= 0) { pr_info("The file %s cannot be used, " "trying to use /proc/kallsyms...", self->long_name); - sleep(2); do_kallsyms: - err = kernel_maps__load_kallsyms(filter); + err = dso__load_kallsyms(self, map, filter); if (err > 0 && !is_kallsyms) dso__set_long_name(self, strdup("[kernel.kallsyms]")); } @@ -1541,18 +1540,19 @@ size_t dsos__fprintf_buildid(FILE *fp) static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) { + struct map *kmap; struct dso *kernel = dso__new(conf->vmlinux_name ?: "[kernel.kallsyms]"); if (kernel == NULL) return -1; - kernel_map__functions = map__new2(0, kernel, MAP__FUNCTION); - if (kernel_map__functions == NULL) + kmap = map__new2(0, kernel, MAP__FUNCTION); + if (kmap == NULL) goto out_delete_kernel_dso; - kernel_map__functions->map_ip = kernel_map__functions->unmap_ip = identity__map_ip; - kernel->short_name = "[kernel]"; - kernel->kernel = 1; + kmap->map_ip = kmap->unmap_ip = identity__map_ip; + kernel->short_name = "[kernel]"; + kernel->kernel = 1; vdso = dso__new("[vdso]"); if (vdso == NULL) @@ -1563,15 +1563,14 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) sizeof(kernel->build_id)) == 0) kernel->has_build_id = true; - kernel_maps__insert(kernel_map__functions); + kernel_maps__insert(kmap); dsos__add(&dsos__kernel, kernel); dsos__add(&dsos__user, vdso); return 0; out_delete_kernel_map: - map__delete(kernel_map__functions); - kernel_map__functions = NULL; + map__delete(kmap); out_delete_kernel_dso: dso__delete(kernel); return -1; From 23ea4a3fadc6b1692dec935397ea15e2affc1cba Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:19 -0200 Subject: [PATCH 427/470] perf symbols: Kernel_maps should be an array of MAP__NR_TYPES entries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So that we can support multiple symbol table types. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-8-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 51 ++++++++++++++++++++++++++++++---------- tools/perf/util/thread.h | 1 - 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 956656fcaab..581db4c4325 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -29,6 +29,7 @@ enum dso_origin { }; static void dsos__add(struct list_head *head, struct dso *dso); +static struct map *kernel_maps__find_by_dso_name(const char *name); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); static void kernel_maps__insert(struct map *map); struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); @@ -43,7 +44,7 @@ static struct symbol_conf symbol_conf__defaults = { .try_vmlinux_path = true, }; -static struct rb_root kernel_maps__functions; +static struct rb_root kernel_maps[MAP__NR_TYPES]; bool dso__loaded(const struct dso *self, enum map_type type) { @@ -78,10 +79,10 @@ static void symbols__fixup_end(struct rb_root *self) curr->end = roundup(curr->start, 4096); } -static void kernel_maps__fixup_end(void) +static void __kernel_maps__fixup_end(struct rb_root *root) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&kernel_maps__functions); + struct rb_node *nd, *prevnd = rb_first(root); if (prevnd == NULL) return; @@ -101,6 +102,13 @@ static void kernel_maps__fixup_end(void) curr->end = ~0UL; } +static void kernel_maps__fixup_end(void) +{ + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + __kernel_maps__fixup_end(&kernel_maps[i]); +} + static struct symbol *symbol__new(u64 start, u64 len, const char *name) { size_t namelen = strlen(name) + 1; @@ -449,12 +457,12 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, return dso__split_kallsyms(self, map, filter); } -size_t kernel_maps__fprintf(FILE *fp) +static size_t __kernel_maps__fprintf(enum map_type type, FILE *fp) { - size_t printed = fprintf(fp, "Kernel maps:\n"); + size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); struct rb_node *nd; - for (nd = rb_first(&kernel_maps__functions); nd; nd = rb_next(nd)) { + for (nd = rb_first(&kernel_maps[type]); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); @@ -465,6 +473,16 @@ size_t kernel_maps__fprintf(FILE *fp) } } + return printed; +} + +size_t kernel_maps__fprintf(FILE *fp) +{ + size_t printed = fprintf(fp, "Kernel maps:\n"); + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + printed += __kernel_maps__fprintf(i, fp); + return printed + fprintf(fp, "END kernel maps\n"); } @@ -1191,13 +1209,14 @@ out: static void kernel_maps__insert(struct map *map) { - maps__insert(&kernel_maps__functions, map); + maps__insert(&kernel_maps[map->type], map); } -struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, - symbol_filter_t filter) +static struct symbol *kernel_maps__find_symbol(u64 ip, enum map_type type, + struct map **mapp, + symbol_filter_t filter) { - struct map *map = maps__find(&kernel_maps__functions, ip); + struct map *map = maps__find(&kernel_maps[type], ip); if (mapp) *mapp = map; @@ -1206,17 +1225,23 @@ struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, ip = map->map_ip(map, ip); return map__find_symbol(map, ip, filter); } else - WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps__functions), + WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps[type]), "Empty kernel_maps, was symbol__init() called?\n"); return NULL; } -struct map *kernel_maps__find_by_dso_name(const char *name) +struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, + symbol_filter_t filter) +{ + return kernel_maps__find_symbol(ip, MAP__FUNCTION, mapp, filter); +} + +static struct map *kernel_maps__find_by_dso_name(const char *name) { struct rb_node *nd; - for (nd = rb_first(&kernel_maps__functions); nd; nd = rb_next(nd)) { + for (nd = rb_first(&kernel_maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->name, name) == 0) diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 74cba6487ed..54580bb8000 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -28,7 +28,6 @@ struct map *maps__find(struct rb_root *maps, u64 ip); struct symbol *kernel_maps__find_function(const u64 ip, struct map **mapp, symbol_filter_t filter); -struct map *kernel_maps__find_by_dso_name(const char *name); static inline struct map *thread__find_map(struct thread *self, u64 ip) { From 95011c600740837288a3b34b411244a4d9157c4e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:20 -0200 Subject: [PATCH 428/470] perf symbols: Support multiple symtabs in struct thread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Making the routines that were so far specific to the kernel maps useful for all threads. This is done by making the kernel maps be contained in a kernel "thread". This gets the kernel specific routines closer to the userspace counterparts, which will help in reducing the boilerplate for resolving a symbol, as will be demonstrated in the next patches. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-9-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-report.c | 2 +- tools/perf/builtin-top.c | 2 +- tools/perf/util/symbol.c | 157 +++++++++++++--------------------- tools/perf/util/symbol.h | 2 +- tools/perf/util/thread.c | 119 ++++++++++++++++++++------ tools/perf/util/thread.h | 15 +++- 7 files changed, 164 insertions(+), 135 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index c32e7609b77..3ebd70b1ef9 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -167,7 +167,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) map ? map->dso->long_name : ""); } else if (event->header.misc & PERF_RECORD_MISC_USER) { level = '.'; - map = thread__find_map(thread, ip); + map = thread__find_map(thread, MAP__FUNCTION, ip); if (map != NULL) { ip = map->map_ip(map, ip); sym = map__find_symbol(map, ip, symbol_filter); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 400bef981c6..9bd20c2ee3d 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -422,7 +422,7 @@ resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp) if (!thread) return NULL; - map = thread__find_map(thread, ip); + map = thread__find_map(thread, MAP__FUNCTION, ip); if (map != NULL) { /* * We have to do this here as we may have a dso diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index abe78bbd154..bf6730c7603 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -945,7 +945,7 @@ static void event__process_sample(const event_t *self, int counter) if (thread == NULL) return; - map = thread__find_map(thread, ip); + map = thread__find_map(thread, MAP__FUNCTION, ip); if (map != NULL) { ip = map->map_ip(map, ip); sym = map__find_symbol(map, ip, symbol_filter); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 581db4c4325..b6a2941e778 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -29,12 +29,11 @@ enum dso_origin { }; static void dsos__add(struct list_head *head, struct dso *dso); -static struct map *kernel_maps__find_by_dso_name(const char *name); +static struct map *thread__find_map_by_name(struct thread *self, char *name); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -static void kernel_maps__insert(struct map *map); struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); static int dso__load_kernel_sym(struct dso *self, struct map *map, - symbol_filter_t filter); + struct thread *thread, symbol_filter_t filter); unsigned int symbol__priv_size; static int vmlinux_path__nr_entries; static char **vmlinux_path; @@ -44,7 +43,7 @@ static struct symbol_conf symbol_conf__defaults = { .try_vmlinux_path = true, }; -static struct rb_root kernel_maps[MAP__NR_TYPES]; +static struct thread kthread_mem, *kthread = &kthread_mem; bool dso__loaded(const struct dso *self, enum map_type type) { @@ -79,10 +78,10 @@ static void symbols__fixup_end(struct rb_root *self) curr->end = roundup(curr->start, 4096); } -static void __kernel_maps__fixup_end(struct rb_root *root) +static void __thread__fixup_maps_end(struct thread *self, enum map_type type) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(root); + struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); if (prevnd == NULL) return; @@ -102,11 +101,11 @@ static void __kernel_maps__fixup_end(struct rb_root *root) curr->end = ~0UL; } -static void kernel_maps__fixup_end(void) +static void thread__fixup_maps_end(struct thread *self) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - __kernel_maps__fixup_end(&kernel_maps[i]); + __thread__fixup_maps_end(self, i); } static struct symbol *symbol__new(u64 start, u64 len, const char *name) @@ -274,25 +273,16 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp) return fprintf(fp, "%s", sbuild_id); } -static const char * map_type__name[MAP__NR_TYPES] = { - [MAP__FUNCTION] = "Functions", -}; - -size_t dso__fprintf(struct dso *self, FILE *fp) +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) { - int i; struct rb_node *nd; size_t ret = fprintf(fp, "dso: %s (", self->short_name); ret += dso__fprintf_buildid(self, fp); ret += fprintf(fp, ")\n"); - for (i = 0; i < MAP__NR_TYPES; ++i) { - ret += fprintf(fp, "%s:\n", map_type__name[i]); - - for (nd = rb_first(&self->symbols[i]); nd; nd = rb_next(nd)) { - struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - ret += symbol__fprintf(pos, fp); - } + for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { + struct symbol *pos = rb_entry(nd, struct symbol, rb_node); + ret += symbol__fprintf(pos, fp); } return ret; @@ -373,7 +363,7 @@ out_failure: * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *self, struct map *map, +static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread *thread, symbol_filter_t filter) { struct map *curr_map = map; @@ -394,10 +384,10 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, *module++ = '\0'; if (strcmp(self->name, module)) { - curr_map = kernel_maps__find_by_dso_name(module); + curr_map = thread__find_map_by_name(thread, module); if (curr_map == NULL) { - pr_err("/proc/{kallsyms,modules} " - "inconsistency!\n"); + pr_debug("/proc/{kallsyms,modules} " + "inconsistency!\n"); return -1; } } @@ -425,7 +415,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, } curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; - kernel_maps__insert(curr_map); + __thread__insert_map(thread, curr_map); ++kernel_range; } @@ -446,7 +436,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, static int dso__load_kallsyms(struct dso *self, struct map *map, - symbol_filter_t filter) + struct thread *thread, symbol_filter_t filter) { if (dso__load_all_kallsyms(self, map) < 0) return -1; @@ -454,35 +444,13 @@ static int dso__load_kallsyms(struct dso *self, struct map *map, symbols__fixup_end(&self->symbols[map->type]); self->origin = DSO__ORIG_KERNEL; - return dso__split_kallsyms(self, map, filter); -} - -static size_t __kernel_maps__fprintf(enum map_type type, FILE *fp) -{ - size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); - struct rb_node *nd; - - for (nd = rb_first(&kernel_maps[type]); nd; nd = rb_next(nd)) { - struct map *pos = rb_entry(nd, struct map, rb_node); - - printed += fprintf(fp, "Map:"); - printed += map__fprintf(pos, fp); - if (verbose > 1) { - printed += dso__fprintf(pos->dso, fp); - printed += fprintf(fp, "--\n"); - } - } - - return printed; + return dso__split_kallsyms(self, map, thread, filter); } size_t kernel_maps__fprintf(FILE *fp) { size_t printed = fprintf(fp, "Kernel maps:\n"); - int i; - for (i = 0; i < MAP__NR_TYPES; ++i) - printed += __kernel_maps__fprintf(i, fp); - + printed += thread__fprintf_maps(kthread, fp); return printed + fprintf(fp, "END kernel maps\n"); } @@ -772,9 +740,9 @@ out: return 0; } -static int dso__load_sym(struct dso *self, struct map *map, const char *name, - int fd, symbol_filter_t filter, int kernel, - int kmodule) +static int dso__load_sym(struct dso *self, struct map *map, + struct thread *thread, const char *name, int fd, + symbol_filter_t filter, int kernel, int kmodule) { struct map *curr_map = map; struct dso *curr_dso = self; @@ -877,7 +845,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, snprintf(dso_name, sizeof(dso_name), "%s%s", self->short_name, section_name); - curr_map = kernel_maps__find_by_dso_name(dso_name); + curr_map = thread__find_map_by_name(thread, dso_name); if (curr_map == NULL) { u64 start = sym.st_value; @@ -896,7 +864,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, curr_map->map_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip; curr_dso->origin = DSO__ORIG_KERNEL; - kernel_maps__insert(curr_map); + __thread__insert_map(kthread, curr_map); dsos__add(&dsos__kernel, curr_dso); } else curr_dso = curr_map->dso; @@ -1121,7 +1089,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) dso__set_loaded(self, map->type); if (self->kernel) - return dso__load_kernel_sym(self, map, filter); + return dso__load_kernel_sym(self, map, kthread, filter); name = malloc(size); if (!name) @@ -1186,7 +1154,7 @@ compare_build_id: fd = open(name, O_RDONLY); } while (fd < 0); - ret = dso__load_sym(self, map, name, fd, filter, 0, 0); + ret = dso__load_sym(self, map, NULL, name, fd, filter, 0, 0); close(fd); /* @@ -1207,16 +1175,11 @@ out: return ret; } -static void kernel_maps__insert(struct map *map) +static struct symbol *thread__find_symbol(struct thread *self, u64 ip, + enum map_type type, struct map **mapp, + symbol_filter_t filter) { - maps__insert(&kernel_maps[map->type], map); -} - -static struct symbol *kernel_maps__find_symbol(u64 ip, enum map_type type, - struct map **mapp, - symbol_filter_t filter) -{ - struct map *map = maps__find(&kernel_maps[type], ip); + struct map *map = thread__find_map(self, type, ip); if (mapp) *mapp = map; @@ -1224,9 +1187,7 @@ static struct symbol *kernel_maps__find_symbol(u64 ip, enum map_type type, if (map) { ip = map->map_ip(map, ip); return map__find_symbol(map, ip, filter); - } else - WARN_ONCE(RB_EMPTY_ROOT(&kernel_maps[type]), - "Empty kernel_maps, was symbol__init() called?\n"); + } return NULL; } @@ -1234,14 +1195,14 @@ static struct symbol *kernel_maps__find_symbol(u64 ip, enum map_type type, struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, symbol_filter_t filter) { - return kernel_maps__find_symbol(ip, MAP__FUNCTION, mapp, filter); + return thread__find_symbol(kthread, ip, MAP__FUNCTION, mapp, filter); } -static struct map *kernel_maps__find_by_dso_name(const char *name) +static struct map *thread__find_map_by_name(struct thread *self, char *name) { struct rb_node *nd; - for (nd = rb_first(&kernel_maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&self->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->name, name) == 0) @@ -1285,7 +1246,7 @@ static int dsos__set_modules_path_dir(char *dirname) (int)(dot - dent->d_name), dent->d_name); strxfrchar(dso_name, '-', '_'); - map = kernel_maps__find_by_dso_name(dso_name); + map = thread__find_map_by_name(kthread, dso_name); if (map == NULL) continue; @@ -1338,7 +1299,7 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) return self; } -static int kernel_maps__create_module_maps(void) +static int thread__create_module_maps(struct thread *self) { char *line = NULL; size_t n; @@ -1395,7 +1356,7 @@ static int kernel_maps__create_module_maps(void) dso->has_build_id = true; dso->origin = DSO__ORIG_KMODULE; - kernel_maps__insert(map); + __thread__insert_map(self, map); dsos__add(&dsos__kernel, dso); } @@ -1410,7 +1371,7 @@ out_failure: return -1; } -static int dso__load_vmlinux(struct dso *self, struct map *map, +static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *thread, const char *vmlinux, symbol_filter_t filter) { int err = -1, fd; @@ -1444,15 +1405,14 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, return -1; dso__set_loaded(self, map->type); - err = dso__load_sym(self, map, self->long_name, fd, filter, 1, 0); - + err = dso__load_sym(self, map, thread, self->long_name, fd, filter, 1, 0); close(fd); return err; } static int dso__load_kernel_sym(struct dso *self, struct map *map, - symbol_filter_t filter) + struct thread *thread, symbol_filter_t filter) { int err; bool is_kallsyms; @@ -1462,8 +1422,8 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, pr_debug("Looking at the vmlinux_path (%d entries long)\n", vmlinux_path__nr_entries); for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(self, map, vmlinux_path[i], - filter); + err = dso__load_vmlinux(self, map, thread, + vmlinux_path[i], filter); if (err > 0) { pr_debug("Using %s for symbols\n", vmlinux_path[i]); @@ -1478,12 +1438,12 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, if (is_kallsyms) goto do_kallsyms; - err = dso__load_vmlinux(self, map, self->long_name, filter); + err = dso__load_vmlinux(self, map, thread, self->long_name, filter); if (err <= 0) { pr_info("The file %s cannot be used, " "trying to use /proc/kallsyms...", self->long_name); do_kallsyms: - err = dso__load_kallsyms(self, map, filter); + err = dso__load_kallsyms(self, map, thread, filter); if (err > 0 && !is_kallsyms) dso__set_long_name(self, strdup("[kernel.kallsyms]")); } @@ -1535,8 +1495,11 @@ static void __dsos__fprintf(struct list_head *head, FILE *fp) { struct dso *pos; - list_for_each_entry(pos, head, node) - dso__fprintf(pos, fp); + list_for_each_entry(pos, head, node) { + int i; + for (i = 0; i < MAP__NR_TYPES; ++i) + dso__fprintf(pos, i, fp); + } } void dsos__fprintf(FILE *fp) @@ -1563,10 +1526,10 @@ size_t dsos__fprintf_buildid(FILE *fp) __dsos__fprintf_buildid(&dsos__user, fp)); } -static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) +static int thread__create_kernel_map(struct thread *self, const char *vmlinux) { struct map *kmap; - struct dso *kernel = dso__new(conf->vmlinux_name ?: "[kernel.kallsyms]"); + struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]"); if (kernel == NULL) return -1; @@ -1588,7 +1551,7 @@ static int kernel_maps__create_kernel_map(const struct symbol_conf *conf) sizeof(kernel->build_id)) == 0) kernel->has_build_id = true; - kernel_maps__insert(kmap); + __thread__insert_map(self, kmap); dsos__add(&dsos__kernel, kernel); dsos__add(&dsos__user, vdso); @@ -1656,32 +1619,28 @@ out_fail: return -1; } -static int kernel_maps__init(const struct symbol_conf *conf) +int symbol__init(struct symbol_conf *conf) { const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults; + elf_version(EV_CURRENT); symbol__priv_size = pconf->priv_size; + thread__init(kthread, 0); if (pconf->try_vmlinux_path && vmlinux_path__init() < 0) return -1; - if (kernel_maps__create_kernel_map(pconf) < 0) { + if (thread__create_kernel_map(kthread, pconf->vmlinux_name) < 0) { vmlinux_path__exit(); return -1; } - if (pconf->use_modules && kernel_maps__create_module_maps() < 0) + if (pconf->use_modules && thread__create_module_maps(kthread) < 0) pr_debug("Failed to load list of modules in use, " "continuing...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ - kernel_maps__fixup_end(); + thread__fixup_maps_end(kthread); return 0; } - -int symbol__init(struct symbol_conf *conf) -{ - elf_version(EV_CURRENT); - return kernel_maps__init(conf); -} diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 8934814d5a6..3f9e4a4d83d 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -92,7 +92,7 @@ void dsos__fprintf(FILE *fp); size_t dsos__fprintf_buildid(FILE *fp); size_t dso__fprintf_buildid(struct dso *self, FILE *fp); -size_t dso__fprintf(struct dso *self, FILE *fp); +size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); char dso__symtab_origin(const struct dso *self); void dso__set_build_id(struct dso *self, void *build_id); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 1796625f778..2229f82cd63 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -9,17 +9,26 @@ static struct rb_root threads; static struct thread *last_match; +void thread__init(struct thread *self, pid_t pid) +{ + int i; + self->pid = pid; + self->comm = NULL; + for (i = 0; i < MAP__NR_TYPES; ++i) { + self->maps[i] = RB_ROOT; + INIT_LIST_HEAD(&self->removed_maps[i]); + } +} + static struct thread *thread__new(pid_t pid) { struct thread *self = zalloc(sizeof(*self)); if (self != NULL) { - self->pid = pid; + thread__init(self, pid); self->comm = malloc(32); if (self->comm) snprintf(self->comm, 32, ":%d", self->pid); - self->maps = RB_ROOT; - INIT_LIST_HEAD(&self->removed_maps); } return self; @@ -44,24 +53,68 @@ int thread__comm_len(struct thread *self) return self->comm_len; } -static size_t thread__fprintf(struct thread *self, FILE *fp) -{ - struct rb_node *nd; - struct map *pos; - size_t ret = fprintf(fp, "Thread %d %s\nCurrent maps:\n", - self->pid, self->comm); +static const char *map_type__name[MAP__NR_TYPES] = { + [MAP__FUNCTION] = "Functions", +}; - for (nd = rb_first(&self->maps); nd; nd = rb_next(nd)) { - pos = rb_entry(nd, struct map, rb_node); - ret += map__fprintf(pos, fp); +static size_t __thread__fprintf_maps(struct thread *self, + enum map_type type, FILE *fp) +{ + size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); + struct rb_node *nd; + + for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { + struct map *pos = rb_entry(nd, struct map, rb_node); + printed += fprintf(fp, "Map:"); + printed += map__fprintf(pos, fp); + if (verbose > 1) { + printed += dso__fprintf(pos->dso, type, fp); + printed += fprintf(fp, "--\n"); + } } - ret = fprintf(fp, "Removed maps:\n"); + return printed; +} - list_for_each_entry(pos, &self->removed_maps, node) - ret += map__fprintf(pos, fp); +size_t thread__fprintf_maps(struct thread *self, FILE *fp) +{ + size_t printed = 0, i; + for (i = 0; i < MAP__NR_TYPES; ++i) + printed += __thread__fprintf_maps(self, i, fp); + return printed; +} - return ret; +static size_t __thread__fprintf_removed_maps(struct thread *self, + enum map_type type, FILE *fp) +{ + struct map *pos; + size_t printed = 0; + + list_for_each_entry(pos, &self->removed_maps[type], node) { + printed += fprintf(fp, "Map:"); + printed += map__fprintf(pos, fp); + if (verbose > 1) { + printed += dso__fprintf(pos->dso, type, fp); + printed += fprintf(fp, "--\n"); + } + } + return printed; +} + +static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp) +{ + size_t printed = 0, i; + for (i = 0; i < MAP__NR_TYPES; ++i) + printed += __thread__fprintf_removed_maps(self, i, fp); + return printed; +} + +static size_t thread__fprintf(struct thread *self, FILE *fp) +{ + size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm); + printed += thread__fprintf_removed_maps(self, fp); + printed += fprintf(fp, "Removed maps:\n"); + return printed + thread__fprintf_removed_maps(self, fp); } struct thread *threads__findnew(pid_t pid) @@ -117,7 +170,8 @@ struct thread *register_idle_thread(void) static void thread__remove_overlappings(struct thread *self, struct map *map) { - struct rb_node *next = rb_first(&self->maps); + struct rb_root *root = &self->maps[map->type]; + struct rb_node *next = rb_first(root); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); @@ -132,13 +186,13 @@ static void thread__remove_overlappings(struct thread *self, struct map *map) map__fprintf(pos, stderr); } - rb_erase(&pos->rb_node, &self->maps); + rb_erase(&pos->rb_node, root); /* * We may have references to this map, for instance in some * hist_entry instances, so just move them to a separate * list. */ - list_add_tail(&pos->node, &self->removed_maps); + list_add_tail(&pos->node, &self->removed_maps[map->type]); } } @@ -185,12 +239,26 @@ struct map *maps__find(struct rb_root *maps, u64 ip) void thread__insert_map(struct thread *self, struct map *map) { thread__remove_overlappings(self, map); - maps__insert(&self->maps, map); + maps__insert(&self->maps[map->type], map); +} + +static int thread__clone_maps(struct thread *self, struct thread *parent, + enum map_type type) +{ + struct rb_node *nd; + for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { + struct map *map = rb_entry(nd, struct map, rb_node); + struct map *new = map__clone(map); + if (new == NULL) + return -ENOMEM; + thread__insert_map(self, new); + } + return 0; } int thread__fork(struct thread *self, struct thread *parent) { - struct rb_node *nd; + int i; if (self->comm) free(self->comm); @@ -198,14 +266,9 @@ int thread__fork(struct thread *self, struct thread *parent) if (!self->comm) return -ENOMEM; - for (nd = rb_first(&parent->maps); nd; nd = rb_next(nd)) { - struct map *map = rb_entry(nd, struct map, rb_node); - struct map *new = map__clone(map); - if (!new) + for (i = 0; i < MAP__NR_TYPES; ++i) + if (thread__clone_maps(self, parent, i) < 0) return -ENOMEM; - thread__insert_map(self, new); - } - return 0; } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 54580bb8000..3bdd9b2276f 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -7,20 +7,22 @@ struct thread { struct rb_node rb_node; - struct rb_root maps; - struct list_head removed_maps; + struct rb_root maps[MAP__NR_TYPES]; + struct list_head removed_maps[MAP__NR_TYPES]; pid_t pid; char shortname[3]; char *comm; int comm_len; }; +void thread__init(struct thread *self, pid_t pid); int thread__set_comm(struct thread *self, const char *comm); int thread__comm_len(struct thread *self); struct thread *threads__findnew(pid_t pid); struct thread *register_idle_thread(void); void thread__insert_map(struct thread *self, struct map *map); int thread__fork(struct thread *self, struct thread *parent); +size_t thread__fprintf_maps(struct thread *self, FILE *fp); size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); @@ -29,9 +31,14 @@ struct map *maps__find(struct rb_root *maps, u64 ip); struct symbol *kernel_maps__find_function(const u64 ip, struct map **mapp, symbol_filter_t filter); -static inline struct map *thread__find_map(struct thread *self, u64 ip) +static inline struct map *thread__find_map(struct thread *self, + enum map_type type, u64 ip) { - return self ? maps__find(&self->maps, ip) : NULL; + return self ? maps__find(&self->maps[type], ip) : NULL; } +static inline void __thread__insert_map(struct thread *self, struct map *map) +{ + maps__insert(&self->maps[map->type], map); +} #endif /* __PERF_THREAD_H */ From 1de8e24520ffdcf2a90c842eed937f59079a2abd Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:21 -0200 Subject: [PATCH 429/470] perf symbols: When not using modules, discard its symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-10-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/symbol.c | 6 +++++- tools/perf/util/thread.h | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b6a2941e778..b788c2f5d67 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -381,6 +381,9 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread module = strchr(pos->name, '\t'); if (module) { + if (!thread->use_modules) + goto discard_symbol; + *module++ = '\0'; if (strcmp(self->name, module)) { @@ -420,7 +423,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread } if (filter && filter(curr_map, pos)) { - rb_erase(&pos->rb_node, root); +discard_symbol: rb_erase(&pos->rb_node, root); symbol__delete(pos); } else { if (curr_map != map) { @@ -1635,6 +1638,7 @@ int symbol__init(struct symbol_conf *conf) return -1; } + kthread->use_modules = pconf->use_modules; if (pconf->use_modules && thread__create_module_maps(kthread) < 0) pr_debug("Failed to load list of modules in use, " "continuing...\n"); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 3bdd9b2276f..59b0d9b577d 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -10,6 +10,7 @@ struct thread { struct rb_root maps[MAP__NR_TYPES]; struct list_head removed_maps[MAP__NR_TYPES]; pid_t pid; + bool use_modules; char shortname[3]; char *comm; int comm_len; From 62daacb51a2bf8480e6f6b3696b03f102fc15eb0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:22 -0200 Subject: [PATCH 430/470] perf tools: Reorganize event processing routines, lotsa dups killed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While implementing event__preprocess_sample, that will do all of the symbol lookup in one convenient function, I noticed that util/process_event.[ch] were not being used at all, then started looking if there were other functions that could be shared and... All those functions really don't need to receive offset + head, the only thing they did was common to all of them, so do it at one place instead. Stats about number of each type of event processed now is done in a central place. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: John Kacur Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-11-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 - tools/perf/builtin-annotate.c | 63 +++++++-------------------- tools/perf/builtin-kmem.c | 33 ++------------ tools/perf/builtin-report.c | 72 ++++++++----------------------- tools/perf/builtin-sched.c | 42 +++--------------- tools/perf/builtin-top.c | 19 -------- tools/perf/builtin-trace.c | 40 +++-------------- tools/perf/util/data_map.c | 56 ++++++++++++++++++------ tools/perf/util/data_map.h | 2 +- tools/perf/util/event.c | 74 ++++++++++++++++++++++++++++++++ tools/perf/util/event.h | 17 ++++++++ tools/perf/util/hist.c | 7 --- tools/perf/util/process_event.c | 53 ----------------------- tools/perf/util/process_event.h | 29 ------------- tools/perf/util/process_events.c | 64 --------------------------- tools/perf/util/process_events.h | 35 --------------- 16 files changed, 183 insertions(+), 425 deletions(-) delete mode 100644 tools/perf/util/process_event.c delete mode 100644 tools/perf/util/process_event.h delete mode 100644 tools/perf/util/process_events.c delete mode 100644 tools/perf/util/process_events.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index de37d492e10..f1537a94a05 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -369,7 +369,6 @@ LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h LIB_H += util/data_map.h -LIB_H += util/process_events.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -412,7 +411,6 @@ LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/data_map.o -LIB_OBJS += util/process_events.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 3ebd70b1ef9..7d39bd2b19b 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -19,12 +19,12 @@ #include "perf.h" #include "util/debug.h" +#include "util/event.h" #include "util/parse-options.h" #include "util/parse-events.h" #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" -#include "util/process_events.h" static char const *input_name = "perf.data"; @@ -136,8 +136,7 @@ static int hist_entry__add(struct thread *thread, struct map *map, return 0; } -static int -process_sample_event(event_t *event, unsigned long offset, unsigned long head) +static int process_sample_event(event_t *event) { char level; u64 ip = event->ip.ip; @@ -145,12 +144,8 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) struct symbol *sym = NULL; struct thread *thread = threads__findnew(event->ip.pid); - dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.misc, - event->ip.pid, - (void *)(long)ip); + dump_printf("(IP, %d): %d: %p\n", event->header.misc, + event->ip.pid, (void *)(long)ip); if (thread == NULL) { fprintf(stderr, "problem processing %d event, skipping it.\n", @@ -198,46 +193,24 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) "skipping event\n"); return -1; } - total++; return 0; } -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) +static int event__process(event_t *self) { - struct thread *thread = threads__findnew(event->comm.pid); - - dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - -static int -process_event(event_t *event, unsigned long offset, unsigned long head) -{ - switch (event->header.type) { + switch (self->header.type) { case PERF_RECORD_SAMPLE: - return process_sample_event(event, offset, head); + return process_sample_event(self); case PERF_RECORD_MMAP: - return process_mmap_event(event, offset, head); + return event__process_mmap(self); case PERF_RECORD_COMM: - return process_comm_event(event, offset, head); + return event__process_comm(self); case PERF_RECORD_FORK: - return process_task_event(event, offset, head); + return event__process_task(self); /* * We dont process them right now but they are fine: */ @@ -621,15 +594,12 @@ more: (void *)(long)event->header.size, event->header.type); - if (!size || process_event(event, offset, head) < 0) { + if (!size || event__process(event) < 0) { dump_printf("%p [%p]: skipping unknown header type: %d\n", (void *)(offset + head), (void *)(long)(event->header.size), event->header.type); - - total_unknown++; - /* * assume we lost track of the stream, check alignment, and * increment a single u64 in the hope to catch on again 'soon'. @@ -649,14 +619,11 @@ more: rc = EXIT_SUCCESS; close(input); - dump_printf(" IP events: %10ld\n", total); - dump_printf(" mmap events: %10ld\n", total_mmap); - dump_printf(" comm events: %10ld\n", total_comm); - dump_printf(" fork events: %10ld\n", total_fork); - dump_printf(" unknown events: %10ld\n", total_unknown); - if (dump_trace) + if (dump_trace) { + event__print_totals(); return 0; + } if (verbose > 3) threads__fprintf(stdout); @@ -665,7 +632,7 @@ more: dsos__fprintf(stdout); collapse__resort(); - output__resort(total); + output__resort(event__total[0]); find_annotations(); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 35722fafc4d..e7294c8fc62 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -33,9 +33,6 @@ static bool raw_ip; static char default_sort_order[] = "frag,hit,bytes"; -static char *cwd; -static int cwdlen; - static int *cpunode_map; static int max_cpu_num; @@ -126,25 +123,6 @@ static void setup_cpunode_map(void) } } -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.pid); - - dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; - } - - return 0; -} - static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, int bytes_req, int bytes_alloc, int cpu) { @@ -340,8 +318,7 @@ process_raw_event(event_t *raw_event __used, void *more_data, } } -static int -process_sample_event(event_t *event, unsigned long offset, unsigned long head) +static int process_sample_event(event_t *event) { u64 ip = event->ip.ip; u64 timestamp = -1; @@ -366,9 +343,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", event->header.misc, event->ip.pid, event->ip.tid, (void *)(long)ip, @@ -403,7 +378,7 @@ static int sample_type_check(u64 type) static struct perf_file_handler file_handler = { .process_sample_event = process_sample_event, - .process_comm_event = process_comm_event, + .process_comm_event = event__process_comm, .sample_type_check = sample_type_check, }; @@ -413,7 +388,7 @@ static int read_events(void) register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, - &cwdlen, &cwd); + &event__cwdlen, &event__cwd); } static double fragmentation(unsigned long n_req, unsigned long n_alloc) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 9bd20c2ee3d..01ef35cac5f 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -30,7 +30,6 @@ #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" -#include "util/process_events.h" static char const *input_name = "perf.data"; @@ -655,8 +654,7 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) return 0; } -static int -process_sample_event(event_t *event, unsigned long offset, unsigned long head) +static int process_sample_event(event_t *event) { char level; struct symbol *sym = NULL; @@ -673,9 +671,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", event->header.misc, event->ip.pid, event->ip.tid, (void *)(long)ip, @@ -743,47 +739,27 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return -1; } - total += period; + event__stats.total += period; return 0; } -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) +static int process_comm_event(event_t *event) { struct thread *thread = threads__findnew(event->comm.pid); - dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); + dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid); if (thread == NULL || thread__set_comm_adjust(thread, event->comm.comm)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); return -1; } - total_comm++; return 0; } -static int -process_lost_event(event_t *event, unsigned long offset, unsigned long head) -{ - dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->lost.id, - event->lost.lost); - - total_lost += event->lost.lost; - - return 0; -} - -static int -process_read_event(event_t *event, unsigned long offset, unsigned long head) +static int process_read_event(event_t *event) { struct perf_event_attr *attr; @@ -799,14 +775,9 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head) event->read.value); } - dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->read.pid, - event->read.tid, - attr ? __event_name(attr->type, attr->config) - : "FAIL", - event->read.value); + dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid, + attr ? __event_name(attr->type, attr->config) : "FAIL", + event->read.value); return 0; } @@ -842,11 +813,11 @@ static int sample_type_check(u64 type) static struct perf_file_handler file_handler = { .process_sample_event = process_sample_event, - .process_mmap_event = process_mmap_event, + .process_mmap_event = event__process_mmap, .process_comm_event = process_comm_event, - .process_exit_event = process_task_event, - .process_fork_event = process_task_event, - .process_lost_event = process_lost_event, + .process_exit_event = event__process_task, + .process_fork_event = event__process_task, + .process_lost_event = event__process_lost, .process_read_event = process_read_event, .sample_type_check = sample_type_check, }; @@ -866,19 +837,14 @@ static int __cmd_report(void) register_perf_file_handler(&file_handler); ret = mmap_dispatch_perf_file(&header, input_name, force, - full_paths, &cwdlen, &cwd); + full_paths, &event__cwdlen, &event__cwd); if (ret) return ret; - dump_printf(" IP events: %10ld\n", total); - dump_printf(" mmap events: %10ld\n", total_mmap); - dump_printf(" comm events: %10ld\n", total_comm); - dump_printf(" fork events: %10ld\n", total_fork); - dump_printf(" lost events: %10ld\n", total_lost); - dump_printf(" unknown events: %10ld\n", file_handler.total_unknown); - - if (dump_trace) + if (dump_trace) { + event__print_totals(); return 0; + } if (verbose > 3) threads__fprintf(stdout); @@ -887,8 +853,8 @@ static int __cmd_report(void) dsos__fprintf(stdout); collapse__resort(); - output__resort(total); - output__fprintf(stdout, total); + output__resort(event__stats.total); + output__fprintf(stdout, event__stats.total); if (show_threads) perf_read_values_destroy(&show_threads_values); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 19eb708a706..26b782f26ee 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -22,8 +22,6 @@ static char const *input_name = "perf.data"; -static unsigned long total_comm = 0; - static struct perf_header *header; static u64 sample_type; @@ -32,9 +30,6 @@ static char *sort_order = default_sort_order; static int profile_cpu = -1; -static char *cwd; -static int cwdlen; - #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 @@ -633,27 +628,6 @@ static void test_calibrations(void) printf("the sleep test took %Ld nsecs\n", T1-T0); } -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.tid); - - dump_printf("%p [%p]: perf_event_comm: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing perf_event_comm, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - - struct raw_event_sample { u32 size; char data[0]; @@ -1622,8 +1596,7 @@ process_raw_event(event_t *raw_event __used, void *more_data, process_sched_migrate_task_event(raw, event, cpu, timestamp, thread); } -static int -process_sample_event(event_t *event, unsigned long offset, unsigned long head) +static int process_sample_event(event_t *event) { struct thread *thread; u64 ip = event->ip.ip; @@ -1653,9 +1626,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", event->header.misc, event->ip.pid, event->ip.tid, (void *)(long)ip, @@ -1677,10 +1648,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) return 0; } -static int -process_lost_event(event_t *event __used, - unsigned long offset __used, - unsigned long head __used) +static int process_lost_event(event_t *event __used) { nr_lost_chunks++; nr_lost_events += event->lost.lost; @@ -1704,7 +1672,7 @@ static int sample_type_check(u64 type) static struct perf_file_handler file_handler = { .process_sample_event = process_sample_event, - .process_comm_event = process_comm_event, + .process_comm_event = event__process_comm, .process_lost_event = process_lost_event, .sample_type_check = sample_type_check, }; @@ -1715,7 +1683,7 @@ static int read_events(void) register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, 0, 0, - &cwdlen, &cwd); + &event__cwdlen, &event__cwd); } static void print_bad_events(void) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index bf6730c7603..7a3c0c7aad3 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -991,25 +991,6 @@ static void event__process_sample(const event_t *self, int counter) } } -static void event__process_mmap(event_t *self) -{ - struct thread *thread = threads__findnew(self->mmap.pid); - - if (thread != NULL) { - struct map *map = map__new(&self->mmap, MAP__FUNCTION, NULL, 0); - if (map != NULL) - thread__insert_map(thread, map); - } -} - -static void event__process_comm(event_t *self) -{ - struct thread *thread = threads__findnew(self->comm.pid); - - if (thread != NULL) - thread__set_comm(thread, self->comm.comm); -} - static int event__process(event_t *event) { switch (event->header.type) { diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 75972fd073d..a7750256c40 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -16,38 +16,10 @@ static char const *input_name = "perf.data"; -static unsigned long total = 0; -static unsigned long total_comm = 0; - static struct perf_header *header; static u64 sample_type; -static char *cwd; -static int cwdlen; - - -static int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.pid); - - dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm(thread, event->comm.comm)) { - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - -static int -process_sample_event(event_t *event, unsigned long offset, unsigned long head) +static int process_sample_event(event_t *event) { u64 ip = event->ip.ip; u64 timestamp = -1; @@ -72,9 +44,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) more_data += sizeof(u64); } - dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", - (void *)(offset + head), - (void *)(long)(event->header.size), + dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", event->header.misc, event->ip.pid, event->ip.tid, (void *)(long)ip, @@ -101,7 +71,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head) */ print_event(cpu, raw->data, raw->size, timestamp, thread->comm); } - total += period; + event__stats.total += period; return 0; } @@ -122,7 +92,7 @@ static int sample_type_check(u64 type) static struct perf_file_handler file_handler = { .process_sample_event = process_sample_event, - .process_comm_event = process_comm_event, + .process_comm_event = event__process_comm, .sample_type_check = sample_type_check, }; @@ -132,7 +102,7 @@ static int __cmd_trace(void) register_perf_file_handler(&file_handler); return mmap_dispatch_perf_file(&header, input_name, - 0, 0, &cwdlen, &cwd); + 0, 0, &event__cwdlen, &event__cwd); } static const char * const annotate_usage[] = { diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c index b238462b898..ca0bedf637c 100644 --- a/tools/perf/util/data_map.c +++ b/tools/perf/util/data_map.c @@ -8,11 +8,9 @@ static struct perf_file_handler *curr_handler; static unsigned long mmap_window = 32; static char __cwd[PATH_MAX]; -static int -process_event_stub(event_t *event __used, - unsigned long offset __used, - unsigned long head __used) +static int process_event_stub(event_t *event __used) { + dump_printf(": unhandled!\n"); return 0; } @@ -40,30 +38,62 @@ void register_perf_file_handler(struct perf_file_handler *handler) curr_handler = handler; } +static const char *event__name[] = { + [0] = "TOTAL", + [PERF_RECORD_MMAP] = "MMAP", + [PERF_RECORD_LOST] = "LOST", + [PERF_RECORD_COMM] = "COMM", + [PERF_RECORD_EXIT] = "EXIT", + [PERF_RECORD_THROTTLE] = "THROTTLE", + [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", + [PERF_RECORD_FORK] = "FORK", + [PERF_RECORD_READ] = "READ", + [PERF_RECORD_SAMPLE] = "SAMPLE", +}; + +unsigned long event__total[PERF_RECORD_MAX]; + +void event__print_totals(void) +{ + int i; + for (i = 0; i < PERF_RECORD_MAX; ++i) + pr_info("%10s events: %10ld\n", + event__name[i], event__total[i]); +} + static int process_event(event_t *event, unsigned long offset, unsigned long head) { trace_event(event); + if (event->header.type < PERF_RECORD_MAX) { + dump_printf("%p [%p]: PERF_RECORD_%s", + (void *)(offset + head), + (void *)(long)(event->header.size), + event__name[event->header.type]); + ++event__total[0]; + ++event__total[event->header.type]; + } + switch (event->header.type) { case PERF_RECORD_SAMPLE: - return curr_handler->process_sample_event(event, offset, head); + return curr_handler->process_sample_event(event); case PERF_RECORD_MMAP: - return curr_handler->process_mmap_event(event, offset, head); + return curr_handler->process_mmap_event(event); case PERF_RECORD_COMM: - return curr_handler->process_comm_event(event, offset, head); + return curr_handler->process_comm_event(event); case PERF_RECORD_FORK: - return curr_handler->process_fork_event(event, offset, head); + return curr_handler->process_fork_event(event); case PERF_RECORD_EXIT: - return curr_handler->process_exit_event(event, offset, head); + return curr_handler->process_exit_event(event); case PERF_RECORD_LOST: - return curr_handler->process_lost_event(event, offset, head); + return curr_handler->process_lost_event(event); case PERF_RECORD_READ: - return curr_handler->process_read_event(event, offset, head); + return curr_handler->process_read_event(event); case PERF_RECORD_THROTTLE: - return curr_handler->process_throttle_event(event, offset, head); + return curr_handler->process_throttle_event(event); case PERF_RECORD_UNTHROTTLE: - return curr_handler->process_unthrottle_event(event, offset, head); + return curr_handler->process_unthrottle_event(event); default: curr_handler->total_unknown++; return -1; diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h index ae036ecd762..3180ff7e363 100644 --- a/tools/perf/util/data_map.h +++ b/tools/perf/util/data_map.h @@ -4,7 +4,7 @@ #include "event.h" #include "header.h" -typedef int (*event_type_handler_t)(event_t *, unsigned long, unsigned long); +typedef int (*event_type_handler_t)(event_t *); struct perf_file_handler { event_type_handler_t process_sample_event; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1dae7e3b400..70b4aa03b47 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -2,6 +2,7 @@ #include "event.h" #include "debug.h" #include "string.h" +#include "thread.h" static pid_t event__synthesize_comm(pid_t pid, int full, int (*process)(event_t *event)) @@ -175,3 +176,76 @@ void event__synthesize_threads(int (*process)(event_t *event)) closedir(proc); } + +char *event__cwd; +int event__cwdlen; + +struct events_stats event__stats; + +int event__process_comm(event_t *self) +{ + struct thread *thread = threads__findnew(self->comm.pid); + + dump_printf("PERF_RECORD_COMM: %s:%d\n", + self->comm.comm, self->comm.pid); + + if (thread == NULL || thread__set_comm(thread, self->comm.comm)) { + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); + return -1; + } + + return 0; +} + +int event__process_lost(event_t *self) +{ + dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); + event__stats.lost += self->lost.lost; + return 0; +} + +int event__process_mmap(event_t *self) +{ + struct thread *thread = threads__findnew(self->mmap.pid); + struct map *map = map__new(&self->mmap, MAP__FUNCTION, + event__cwd, event__cwdlen); + + dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n", + self->mmap.pid, self->mmap.tid, + (void *)(long)self->mmap.start, + (void *)(long)self->mmap.len, + (void *)(long)self->mmap.pgoff, + self->mmap.filename); + + if (thread == NULL || map == NULL) + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); + else + thread__insert_map(thread, map); + + return 0; +} + +int event__process_task(event_t *self) +{ + struct thread *thread = threads__findnew(self->fork.pid); + struct thread *parent = threads__findnew(self->fork.ppid); + + dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, + self->fork.ppid, self->fork.ptid); + /* + * A thread clone will have the same PID for both parent and child. + */ + if (thread == parent) + return 0; + + if (self->header.type == PERF_RECORD_EXIT) + return 0; + + if (thread == NULL || parent == NULL || + thread__fork(thread, parent) < 0) { + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); + return -1; + } + + return 0; +} diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 3ae3c964c90..13c12c75f97 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -80,6 +80,13 @@ typedef union event_union { struct sample_event sample; } event_t; +struct events_stats { + unsigned long total; + unsigned long lost; +}; + +void event__print_totals(void); + enum map_type { MAP__FUNCTION = 0, @@ -135,4 +142,14 @@ void map__fixup_end(struct map *self); int event__synthesize_thread(pid_t pid, int (*process)(event_t *event)); void event__synthesize_threads(int (*process)(event_t *event)); +extern char *event__cwd; +extern int event__cwdlen; +extern struct events_stats event__stats; +extern unsigned long event__total[PERF_RECORD_MAX]; + +int event__process_comm(event_t *self); +int event__process_lost(event_t *self); +int event__process_mmap(event_t *self); +int event__process_task(event_t *self); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 7393a02fd8d..f26cd9ba00f 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -10,13 +10,6 @@ struct callchain_param callchain_param = { .min_percent = 0.5 }; -unsigned long total; -unsigned long total_mmap; -unsigned long total_comm; -unsigned long total_fork; -unsigned long total_unknown; -unsigned long total_lost; - /* * histogram, sorted on item, collects counts */ diff --git a/tools/perf/util/process_event.c b/tools/perf/util/process_event.c deleted file mode 100644 index a970789581a..00000000000 --- a/tools/perf/util/process_event.c +++ /dev/null @@ -1,53 +0,0 @@ -#include "process_event.h" - -char *cwd; -int cwdlen; - -int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct map *map = map__new(&event->mmap, cwd, cwdlen); - struct thread *thread = threads__findnew(event->mmap.pid); - - dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - event->mmap.tid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; - -} - -int -process_comm_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->comm.pid); - - dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->comm.comm, event->comm.pid); - - if (thread == NULL || - thread__set_comm_adjust(thread, event->comm.comm)) { - dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); - return -1; - } - total_comm++; - - return 0; -} - diff --git a/tools/perf/util/process_event.h b/tools/perf/util/process_event.h deleted file mode 100644 index 6f68c69736c..00000000000 --- a/tools/perf/util/process_event.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __PROCESS_EVENT_H -#define __PROCESS_EVENT_H - -#include "../builtin.h" -#include "util.h" - -#include "color.h" -#include -#include "cache.h" -#include -#include "symbol.h" -#include "string.h" - -#include "../perf.h" -#include "debug.h" - -#include "parse-options.h" -#include "parse-events.h" - -#include "thread.h" -#include "sort.h" -#include "hist.h" - -extern char *cwd; -extern int cwdlen; -extern int process_mmap_event(event_t *, unsigned long, unsigned long); -extern int process_comm_event(event_t *, unsigned long , unsigned long); - -#endif /* __PROCESS_H */ diff --git a/tools/perf/util/process_events.c b/tools/perf/util/process_events.c deleted file mode 100644 index 53778684641..00000000000 --- a/tools/perf/util/process_events.c +++ /dev/null @@ -1,64 +0,0 @@ -#include "process_events.h" - -char *cwd; -int cwdlen; - -int -process_mmap_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct map *map = map__new(&event->mmap, MAP__FUNCTION, cwd, cwdlen); - struct thread *thread = threads__findnew(event->mmap.pid); - - dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->mmap.pid, - event->mmap.tid, - (void *)(long)event->mmap.start, - (void *)(long)event->mmap.len, - (void *)(long)event->mmap.pgoff, - event->mmap.filename); - - if (thread == NULL || map == NULL) { - dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); - return 0; - } - - thread__insert_map(thread, map); - total_mmap++; - - return 0; -} - -int -process_task_event(event_t *event, unsigned long offset, unsigned long head) -{ - struct thread *thread = threads__findnew(event->fork.pid); - struct thread *parent = threads__findnew(event->fork.ppid); - - dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", - event->fork.pid, event->fork.tid, - event->fork.ppid, event->fork.ptid); - - /* - * A thread clone will have the same PID for both - * parent and child. - */ - if (thread == parent) - return 0; - - if (event->header.type == PERF_RECORD_EXIT) - return 0; - - if (!thread || !parent || thread__fork(thread, parent)) { - dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); - return -1; - } - total_fork++; - - return 0; -} - diff --git a/tools/perf/util/process_events.h b/tools/perf/util/process_events.h deleted file mode 100644 index 73d092f8328..00000000000 --- a/tools/perf/util/process_events.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __PROCESS_EVENTS_H -#define __PROCESS_EVENTS_H - -#include "../builtin.h" - -#include "util.h" -#include "color.h" -#include -#include "cache.h" -#include -#include "symbol.h" -#include "string.h" -#include "callchain.h" -#include "strlist.h" -#include "values.h" - -#include "../perf.h" -#include "debug.h" -#include "header.h" - -#include "parse-options.h" -#include "parse-events.h" - -#include "data_map.h" -#include "thread.h" -#include "sort.h" -#include "hist.h" - -extern char *cwd; -extern int cwdlen; - -extern int process_mmap_event(event_t *, unsigned long , unsigned long); -extern int process_task_event(event_t *, unsigned long, unsigned long); - -#endif /* __PROCESS_EVENTS_H */ From 1ed091c45ae33b2179d387573c3fe3f3b4adf60a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Nov 2009 16:29:23 -0200 Subject: [PATCH 431/470] perf tools: Consolidate symbol resolving across all tools MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we have a very high level routine for simple tools to process IP sample events: int event__preprocess_sample(const event_t *self, struct addr_location *al, symbol_filter_t filter) It receives the event itself and will insert new threads in the global threads list and resolve the map and symbol, filling all this info into the new addr_location struct, so that tools like annotate and report can further process the event by creating hist_entries in their specific way (with or without callgraphs, etc). It in turn uses the new next layer function: void thread__find_addr_location(struct thread *self, u8 cpumode, enum map_type type, u64 addr, struct addr_location *al, symbol_filter_t filter) This one will, given a thread (userspace or the kernel kthread one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE too in the near future) at the given cpumode, taking vdsos into account (userspace hit, but kernel symbol) and will fill all these details in the addr_location given. Tools that need a more compact API for plain function resolution, like 'kmem', can use this other one: struct symbol *thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter) So, to resolve a kernel symbol, that is all the 'kmem' tool needs, its just a matter of calling: sym = thread__find_function(kthread, addr, NULL); The 'filter' parameter is needed because we do lazy parsing/loading of ELF symtabs or /proc/kallsyms. With this we remove more code duplication all around, which is always good, huh? :-) Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: John Kacur Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 55 ++------------ tools/perf/builtin-kmem.c | 2 +- tools/perf/builtin-report.c | 135 ++++++++++------------------------ tools/perf/builtin-top.c | 44 +++-------- tools/perf/util/event.c | 62 ++++++++++++++++ tools/perf/util/event.h | 4 + tools/perf/util/hist.c | 15 ++-- tools/perf/util/hist.h | 6 +- tools/perf/util/symbol.c | 26 +------ tools/perf/util/symbol.h | 10 +++ tools/perf/util/thread.c | 12 +++ tools/perf/util/thread.h | 23 ++++-- 12 files changed, 172 insertions(+), 222 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7d39bd2b19b..7f85c6e159a 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -124,71 +124,30 @@ static void hist_hit(struct hist_entry *he, u64 ip) h->ip[offset]); } -static int hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, u64 ip, u64 count, char level) +static int hist_entry__add(struct addr_location *al, u64 count) { bool hit; - struct hist_entry *he = __hist_entry__add(thread, map, sym, NULL, ip, - count, level, &hit); + struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit); if (he == NULL) return -ENOMEM; - hist_hit(he, ip); + hist_hit(he, al->addr); return 0; } static int process_sample_event(event_t *event) { - char level; - u64 ip = event->ip.ip; - struct map *map = NULL; - struct symbol *sym = NULL; - struct thread *thread = threads__findnew(event->ip.pid); + struct addr_location al; dump_printf("(IP, %d): %d: %p\n", event->header.misc, - event->ip.pid, (void *)(long)ip); + event->ip.pid, (void *)(long)event->ip.ip); - if (thread == NULL) { + if (event__preprocess_sample(event, &al, symbol_filter) < 0) { fprintf(stderr, "problem processing %d event, skipping it.\n", event->header.type); return -1; } - dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - - if (event->header.misc & PERF_RECORD_MISC_KERNEL) { - level = 'k'; - sym = kernel_maps__find_function(ip, &map, symbol_filter); - dump_printf(" ...... dso: %s\n", - map ? map->dso->long_name : ""); - } else if (event->header.misc & PERF_RECORD_MISC_USER) { - level = '.'; - map = thread__find_map(thread, MAP__FUNCTION, ip); - if (map != NULL) { - ip = map->map_ip(map, ip); - sym = map__find_symbol(map, ip, symbol_filter); - } else { - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall or vdso (which executes in user-mode). - * - * XXX This is nasty, we should have a symbol list in - * the "[vdso]" dso, but for now lets use the old - * trick of looking in the whole kernel symbol list. - */ - if ((long long)ip < 0) - sym = kernel_maps__find_function(ip, &map, - symbol_filter); - } - dump_printf(" ...... dso: %s\n", - map ? map->dso->long_name : ""); - } else { - level = 'H'; - dump_printf(" ...... dso: [hypervisor]\n"); - } - - if (hist_entry__add(thread, map, sym, ip, 1, level)) { + if (hist_entry__add(&al, 1)) { fprintf(stderr, "problem incrementing symbol count, " "skipping event\n"); return -1; diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index e7294c8fc62..047fef74bd5 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -420,7 +420,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller) if (is_caller) { addr = data->call_site; if (!raw_ip) - sym = kernel_maps__find_function(addr, NULL, NULL); + sym = thread__find_function(kthread, addr, NULL); } else addr = data->ptr; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 01ef35cac5f..383c4ab4f9a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -408,55 +408,6 @@ static int thread__set_comm_adjust(struct thread *self, const char *comm) return 0; } - -static struct symbol * -resolve_symbol(struct thread *thread, struct map **mapp, u64 *ipp) -{ - struct map *map = mapp ? *mapp : NULL; - u64 ip = *ipp; - - if (map) - goto got_map; - - if (!thread) - return NULL; - - map = thread__find_map(thread, MAP__FUNCTION, ip); - if (map != NULL) { - /* - * We have to do this here as we may have a dso - * with no symbol hit that has a name longer than - * the ones with symbols sampled. - */ - if (!sort_dso.elide && !map->dso->slen_calculated) - dso__calc_col_width(map->dso); - - if (mapp) - *mapp = map; -got_map: - ip = map->map_ip(map, ip); - } else { - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall or vdso (which executes in user-mode). - * - * XXX This is nasty, we should have a symbol list in - * the "[vdso]" dso, but for now lets use the old - * trick of looking in the whole kernel symbol list. - */ - if ((long long)ip < 0) - return kernel_maps__find_function(ip, mapp, NULL); - } - dump_printf(" ...... dso: %s\n", - map ? map->dso->long_name : ""); - dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip); - *ipp = ip; - - return map ? map__find_symbol(map, ip, NULL) : NULL; -} - static int call__match(struct symbol *sym) { if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) @@ -469,7 +420,7 @@ static struct symbol **resolve_callchain(struct thread *thread, struct ip_callchain *chain, struct symbol **parent) { - u64 context = PERF_CONTEXT_MAX; + u8 cpumode = PERF_RECORD_MISC_USER; struct symbol **syms = NULL; unsigned int i; @@ -483,30 +434,31 @@ static struct symbol **resolve_callchain(struct thread *thread, for (i = 0; i < chain->nr; i++) { u64 ip = chain->ips[i]; - struct symbol *sym = NULL; + struct addr_location al; if (ip >= PERF_CONTEXT_MAX) { - context = ip; + switch (ip) { + case PERF_CONTEXT_HV: + cpumode = PERF_RECORD_MISC_HYPERVISOR; break; + case PERF_CONTEXT_KERNEL: + cpumode = PERF_RECORD_MISC_KERNEL; break; + case PERF_CONTEXT_USER: + cpumode = PERF_RECORD_MISC_USER; break; + default: + break; + } continue; } - switch (context) { - case PERF_CONTEXT_HV: - break; - case PERF_CONTEXT_KERNEL: - sym = kernel_maps__find_function(ip, NULL, NULL); - break; - default: - sym = resolve_symbol(thread, NULL, &ip); - break; - } - - if (sym) { - if (sort__has_parent && !*parent && call__match(sym)) - *parent = sym; + thread__find_addr_location(thread, cpumode, MAP__FUNCTION, + ip, &al, NULL); + if (al.sym != NULL) { + if (sort__has_parent && !*parent && + call__match(al.sym)) + *parent = al.sym; if (!callchain) break; - syms[i] = sym; + syms[i] = al.sym; } } @@ -517,20 +469,17 @@ static struct symbol **resolve_callchain(struct thread *thread, * collect histogram counts */ -static int -hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, u64 ip, struct ip_callchain *chain, - char level, u64 count) +static int hist_entry__add(struct addr_location *al, + struct ip_callchain *chain, u64 count) { struct symbol **syms = NULL, *parent = NULL; bool hit; struct hist_entry *he; if ((sort__has_parent || callchain) && chain) - syms = resolve_callchain(thread, chain, &parent); + syms = resolve_callchain(al->thread, chain, &parent); - he = __hist_entry__add(thread, map, sym, parent, - ip, count, level, &hit); + he = __hist_entry__add(al, parent, count, &hit); if (he == NULL) return -ENOMEM; @@ -656,14 +605,12 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) static int process_sample_event(event_t *event) { - char level; - struct symbol *sym = NULL; u64 ip = event->ip.ip; u64 period = 1; - struct map *map = NULL; void *more_data = event->ip.__more_data; struct ip_callchain *chain = NULL; int cpumode; + struct addr_location al; struct thread *thread = threads__findnew(event->ip.pid); if (sample_type & PERF_SAMPLE_PERIOD) { @@ -709,32 +656,26 @@ static int process_sample_event(event_t *event) cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; - if (cpumode == PERF_RECORD_MISC_KERNEL) { - level = 'k'; - sym = kernel_maps__find_function(ip, &map, NULL); - dump_printf(" ...... dso: %s\n", - map ? map->dso->long_name : ""); - } else if (cpumode == PERF_RECORD_MISC_USER) { - level = '.'; - sym = resolve_symbol(thread, &map, &ip); - - } else { - level = 'H'; - dump_printf(" ...... dso: [hypervisor]\n"); - } + thread__find_addr_location(thread, cpumode, + MAP__FUNCTION, ip, &al, NULL); + /* + * We have to do this here as we may have a dso with no symbol hit that + * has a name longer than the ones with symbols sampled. + */ + if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated) + dso__calc_col_width(al.map->dso); if (dso_list && - (!map || !map->dso || - !(strlist__has_entry(dso_list, map->dso->short_name) || - (map->dso->short_name != map->dso->long_name && - strlist__has_entry(dso_list, map->dso->long_name))))) + (!al.map || !al.map->dso || + !(strlist__has_entry(dso_list, al.map->dso->short_name) || + (al.map->dso->short_name != al.map->dso->long_name && + strlist__has_entry(dso_list, al.map->dso->long_name))))) return 0; - if (sym_list && sym && !strlist__has_entry(sym_list, sym->name)) + if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name)) return 0; - if (hist_entry__add(thread, map, sym, ip, - chain, level, period)) { + if (hist_entry__add(&al, chain, period)) { pr_debug("problem incrementing symbol count, skipping event\n"); return -1; } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7a3c0c7aad3..e0a374d0e43 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -929,55 +929,28 @@ static int symbol_filter(struct map *map, struct symbol *sym) static void event__process_sample(const event_t *self, int counter) { u64 ip = self->ip.ip; - struct map *map; struct sym_entry *syme; - struct symbol *sym; + struct addr_location al; u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; switch (origin) { - case PERF_RECORD_MISC_USER: { - struct thread *thread; - + case PERF_RECORD_MISC_USER: if (hide_user_symbols) return; - - thread = threads__findnew(self->ip.pid); - if (thread == NULL) - return; - - map = thread__find_map(thread, MAP__FUNCTION, ip); - if (map != NULL) { - ip = map->map_ip(map, ip); - sym = map__find_symbol(map, ip, symbol_filter); - if (sym == NULL) - return; - userspace_samples++; - break; - } - } - /* - * If this is outside of all known maps, - * and is a negative address, try to look it - * up in the kernel dso, as it might be a - * vsyscall or vdso (which executes in user-mode). - */ - if ((long long)ip >= 0) - return; - /* Fall thru */ + break; case PERF_RECORD_MISC_KERNEL: if (hide_kernel_symbols) return; - - sym = kernel_maps__find_function(ip, &map, symbol_filter); - if (sym == NULL) - return; break; default: return; } - syme = symbol__priv(sym); + if (event__preprocess_sample(self, &al, symbol_filter) < 0 || + al.sym == NULL) + return; + syme = symbol__priv(al.sym); if (!syme->skip) { syme->count[counter]++; syme->origin = origin; @@ -986,8 +959,9 @@ static void event__process_sample(const event_t *self, int counter) if (list_empty(&syme->node) || !syme->node.next) __list_insert_active_sym(syme); pthread_mutex_unlock(&active_symbols_lock); + if (origin == PERF_RECORD_MISC_USER) + ++userspace_samples; ++samples; - return; } } diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 70b4aa03b47..233d7ad9bd7 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -249,3 +249,65 @@ int event__process_task(event_t *self) return 0; } + +void thread__find_addr_location(struct thread *self, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al, + symbol_filter_t filter) +{ + struct thread *thread = al->thread = self; + + al->addr = addr; + + if (cpumode & PERF_RECORD_MISC_KERNEL) { + al->level = 'k'; + thread = kthread; + } else if (cpumode & PERF_RECORD_MISC_USER) + al->level = '.'; + else { + al->level = 'H'; + al->map = NULL; + al->sym = NULL; + return; + } +try_again: + al->map = thread__find_map(thread, type, al->addr); + if (al->map == NULL) { + /* + * If this is outside of all known maps, and is a negative + * address, try to look it up in the kernel dso, as it might be + * a vsyscall or vdso (which executes in user-mode). + * + * XXX This is nasty, we should have a symbol list in the + * "[vdso]" dso, but for now lets use the old trick of looking + * in the whole kernel symbol list. + */ + if ((long long)al->addr < 0 && thread != kthread) { + thread = kthread; + goto try_again; + } + al->sym = NULL; + } else { + al->addr = al->map->map_ip(al->map, al->addr); + al->sym = map__find_symbol(al->map, al->addr, filter); + } +} + +int event__preprocess_sample(const event_t *self, struct addr_location *al, + symbol_filter_t filter) +{ + u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; + struct thread *thread = threads__findnew(self->ip.pid); + + if (thread == NULL) + return -1; + + dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); + + thread__find_addr_location(thread, cpumode, MAP__FUNCTION, + self->ip.ip, al, filter); + dump_printf(" ...... dso: %s\n", + al->map ? al->map->dso->long_name : + al->level == 'H' ? "[hypervisor]" : ""); + return 0; +} diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 13c12c75f97..a4cc8105cf6 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -152,4 +152,8 @@ int event__process_lost(event_t *self); int event__process_mmap(event_t *self); int event__process_task(event_t *self); +struct addr_location; +int event__preprocess_sample(const event_t *self, struct addr_location *al, + symbol_filter_t filter); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index f26cd9ba00f..0ebf6ee16ca 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -14,20 +14,19 @@ struct callchain_param callchain_param = { * histogram, sorted on item, collects counts */ -struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, +struct hist_entry *__hist_entry__add(struct addr_location *al, struct symbol *sym_parent, - u64 ip, u64 count, char level, bool *hit) + u64 count, bool *hit) { struct rb_node **p = &hist.rb_node; struct rb_node *parent = NULL; struct hist_entry *he; struct hist_entry entry = { - .thread = thread, - .map = map, - .sym = sym, - .ip = ip, - .level = level, + .thread = al->thread, + .map = al->map, + .sym = al->sym, + .ip = al->addr, + .level = al->level, .count = count, .parent = sym_parent, }; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index ac2149c559b..3020db0c929 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -36,9 +36,9 @@ extern unsigned long total_fork; extern unsigned long total_unknown; extern unsigned long total_lost; -struct hist_entry *__hist_entry__add(struct thread *thread, struct map *map, - struct symbol *sym, struct symbol *parent, - u64 ip, u64 count, char level, bool *hit); +struct hist_entry *__hist_entry__add(struct addr_location *al, + struct symbol *parent, + u64 count, bool *hit); extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); extern void hist_entry__free(struct hist_entry *); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b788c2f5d67..fffcb937cdc 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -43,7 +43,8 @@ static struct symbol_conf symbol_conf__defaults = { .try_vmlinux_path = true, }; -static struct thread kthread_mem, *kthread = &kthread_mem; +static struct thread kthread_mem; +struct thread *kthread = &kthread_mem; bool dso__loaded(const struct dso *self, enum map_type type) { @@ -1178,29 +1179,6 @@ out: return ret; } -static struct symbol *thread__find_symbol(struct thread *self, u64 ip, - enum map_type type, struct map **mapp, - symbol_filter_t filter) -{ - struct map *map = thread__find_map(self, type, ip); - - if (mapp) - *mapp = map; - - if (map) { - ip = map->map_ip(map, ip); - return map__find_symbol(map, ip, filter); - } - - return NULL; -} - -struct symbol *kernel_maps__find_function(u64 ip, struct map **mapp, - symbol_filter_t filter) -{ - return thread__find_symbol(kthread, ip, MAP__FUNCTION, mapp, filter); -} - static struct map *thread__find_map_by_name(struct thread *self, char *name) { struct rb_node *nd; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 3f9e4a4d83d..17003efa0b3 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -63,6 +63,14 @@ static inline void *symbol__priv(struct symbol *self) return ((void *)self) - symbol__priv_size; } +struct addr_location { + struct thread *thread; + struct map *map; + struct symbol *sym; + u64 addr; + char level; +}; + struct dso { struct list_head node; struct rb_root symbols[MAP__NR_TYPES]; @@ -105,6 +113,8 @@ size_t kernel_maps__fprintf(FILE *fp); int symbol__init(struct symbol_conf *conf); +struct thread; +struct thread *kthread; extern struct list_head dsos__user, dsos__kernel; extern struct dso *vdso; #endif /* __PERF_SYMBOL */ diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 2229f82cd63..603f5610861 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -285,3 +285,15 @@ size_t threads__fprintf(FILE *fp) return ret; } + +struct symbol *thread__find_symbol(struct thread *self, + enum map_type type, u64 addr, + symbol_filter_t filter) +{ + struct map *map = thread__find_map(self, type, addr); + + if (map != NULL) + return map__find_symbol(map, map->map_ip(map, addr), filter); + + return NULL; +} diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 59b0d9b577d..686d6e914d9 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -27,19 +27,30 @@ size_t thread__fprintf_maps(struct thread *self, FILE *fp); size_t threads__fprintf(FILE *fp); void maps__insert(struct rb_root *maps, struct map *map); -struct map *maps__find(struct rb_root *maps, u64 ip); - -struct symbol *kernel_maps__find_function(const u64 ip, struct map **mapp, - symbol_filter_t filter); +struct map *maps__find(struct rb_root *maps, u64 addr); static inline struct map *thread__find_map(struct thread *self, - enum map_type type, u64 ip) + enum map_type type, u64 addr) { - return self ? maps__find(&self->maps[type], ip) : NULL; + return self ? maps__find(&self->maps[type], addr) : NULL; } static inline void __thread__insert_map(struct thread *self, struct map *map) { maps__insert(&self->maps[map->type], map); } + +void thread__find_addr_location(struct thread *self, u8 cpumode, + enum map_type type, u64 addr, + struct addr_location *al, + symbol_filter_t filter); +struct symbol *thread__find_symbol(struct thread *self, + enum map_type type, u64 addr, + symbol_filter_t filter); + +static inline struct symbol * +thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter) +{ + return thread__find_symbol(self, MAP__FUNCTION, addr, filter); +} #endif /* __PERF_THREAD_H */ From 956ffd027bedc4106b901eb6a50f0a6c6de4113d Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:46 -0600 Subject: [PATCH 432/470] perf trace: Add scripting ops Adds an interface, scripting_ops, that when implemented for a particular scripting language enables built-in support for trace stream processing using that language. The interface is designed to enable full-fledged language interpreters to be embedded inside the perf executable and thereby make the full capabilities of the supported languages available for trace processing. See below for details on the interface. This patch also adds a couple command-line options to 'perf trace': The -s option option is used to specify the script to be run. Script names that can be used with -s take the form: [language spec:]scriptname[.ext] Scripting languages register a set of 'language specs' that can be used to specify scripts for the registered languages. The specs can be used either as prefixes or extensions. If [language spec:] is used, the script is taken as a script of the matching language regardless of any extension it might have. If [language spec:] is not used, [.ext] is used to look up the language it corresponds to. Language specs are case insensitive. e.g. Perl scripts can be specified in the following ways: Perl:scriptname pl:scriptname.py # extension ignored PL:scriptname scriptname.pl scriptname.perl The -g [language spec] option gives users an easy starting point for writing scripts in the specified language. Scripting support for a particular language can implement a generate_script() scripting op that outputs an empty (or near-empty) set of handlers for all the events contained in a given perf.data trace file - this option gives users a direct way to access that. Adding support for a scripting language --------------------------------------- The main thing that needs to be done do add support for a new language is to implement the scripting_ops interface: It consists of the following four functions: start_script() stop_script() process_event() generate_script() start_script() is called before any events are processed, and is meant to give the scripting language support an opportunity to set things up to receive events e.g. create and initialize an instance of a language interpreter. stop_script() is called after all events are processed, and is meant to give the scripting language support an opportunity to clean up e.g. destroy the interpreter instance, etc. process_event() is called once for each event and takes as its main parameter a pointer to the binary trace event record to be processed. The implementation is responsible for picking out the binary fields from the event record and sending them to the script handler function associated with that event e.g. a function derived from the event name it's meant to handle e.g. 'sched::sched_switch()'. The 'format' information for trace events can be used to parse the binary data and map it into a form usable by a given scripting language; see the Perl implemention in subsequent patches for one possible way to leverage the existing trace format parsing code in perf and map that info into specific scripting language types. generate_script() should generate a ready-to-run script for the current set of events in the trace, preferably with bodies that print out every field for each event. Again, look at the Perl implementation for clues as to how that can be done. This is an optional, but very useful op. Support for a given language should also add a language-specific setup function and call it from setup_scripting(). The language-specific setup function associates the the scripting ops for that language with one or more 'language specifiers' (see below) using script_spec_register(). When a script name is specified on the command line, the scripting ops associated with the specified language are used to instantiate and use the appropriate interpreter to process the trace stream. In general, it should be relatively easy to add support for a new language, especially if the language implementation supports an interface allowing an interpreter to be 'embedded' inside another program (in this case the containing program will be 'perf trace'). If so, it should be relatively straightforward to translate trace events into invocations of user-defined script functions where e.g. the function name corresponds to the event type and the function parameters correspond to the event fields. The event and field type information exported by the event tracing infrastructure (via the event 'format' files) should be enough to parse and send any piece of trace data to the user script. The easiest way to see how this can be done would be to look at the Perl implementation contained in perf/util/trace-event-perl.c/.h. There are a couple of other things that aren't covered by the scripting_ops or setup interface and are technically optional, but should be implemented if possible. One of these is support for 'flag' and 'symbolic' fields e.g. being able to use more human-readable values such as 'GFP_KERNEL' or HI/BLOCK_IOPOLL/TASKLET in place of raw flag values. See the Perl implementation to see how this can be done. The other thing is support for 'calling back' into the perf executable to access e.g. uncommon fields not passed by default into handler functions, or any metadata the implementation might want to make available to users via the language interface. Again, see the Perl implementation for examples. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 255 +++++++++++++++++++++++++++++++++- tools/perf/util/trace-event.h | 11 ++ 2 files changed, 261 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index a7750256c40..e96bb534b94 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -6,6 +6,46 @@ #include "util/thread.h" #include "util/header.h" +static char const *script_name; +static char const *generate_script_lang; + +static int default_start_script(const char *script __attribute((unused))) +{ + return 0; +} + +static int default_stop_script(void) +{ + return 0; +} + +static int default_generate_script(const char *outfile __attribute ((unused))) +{ + return 0; +} + +static struct scripting_ops default_scripting_ops = { + .start_script = default_start_script, + .stop_script = default_stop_script, + .process_event = print_event, + .generate_script = default_generate_script, +}; + +static struct scripting_ops *scripting_ops; + +static void setup_scripting(void) +{ + /* make sure PERF_EXEC_PATH is set for scripts */ + perf_set_argv_exec_path(perf_exec_path()); + + scripting_ops = &default_scripting_ops; +} + +static int cleanup_scripting(void) +{ + return scripting_ops->stop_script(); +} + #include "util/parse-options.h" #include "perf.h" @@ -13,11 +53,12 @@ #include "util/trace-event.h" #include "util/data_map.h" +#include "util/exec_cmd.h" -static char const *input_name = "perf.data"; +static char const *input_name = "perf.data"; -static struct perf_header *header; -static u64 sample_type; +static struct perf_header *header; +static u64 sample_type; static int process_sample_event(event_t *event) { @@ -69,7 +110,8 @@ static int process_sample_event(event_t *event) * field, although it should be the same than this perf * event pid */ - print_event(cpu, raw->data, raw->size, timestamp, thread->comm); + scripting_ops->process_event(cpu, raw->data, raw->size, + timestamp, thread->comm); } event__stats.total += period; @@ -105,6 +147,154 @@ static int __cmd_trace(void) 0, 0, &event__cwdlen, &event__cwd); } +struct script_spec { + struct list_head node; + struct scripting_ops *ops; + char spec[0]; +}; + +LIST_HEAD(script_specs); + +static struct script_spec *script_spec__new(const char *spec, + struct scripting_ops *ops) +{ + struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1); + + if (s != NULL) { + strcpy(s->spec, spec); + s->ops = ops; + } + + return s; +} + +static void script_spec__delete(struct script_spec *s) +{ + free(s->spec); + free(s); +} + +static void script_spec__add(struct script_spec *s) +{ + list_add_tail(&s->node, &script_specs); +} + +static struct script_spec *script_spec__find(const char *spec) +{ + struct script_spec *s; + + list_for_each_entry(s, &script_specs, node) + if (strcasecmp(s->spec, spec) == 0) + return s; + return NULL; +} + +static struct script_spec *script_spec__findnew(const char *spec, + struct scripting_ops *ops) +{ + struct script_spec *s = script_spec__find(spec); + + if (s) + return s; + + s = script_spec__new(spec, ops); + if (!s) + goto out_delete_spec; + + script_spec__add(s); + + return s; + +out_delete_spec: + script_spec__delete(s); + + return NULL; +} + +int script_spec_register(const char *spec, struct scripting_ops *ops) +{ + struct script_spec *s; + + s = script_spec__find(spec); + if (s) + return -1; + + s = script_spec__findnew(spec, ops); + if (!s) + return -1; + + return 0; +} + +static struct scripting_ops *script_spec__lookup(const char *spec) +{ + struct script_spec *s = script_spec__find(spec); + if (!s) + return NULL; + + return s->ops; +} + +static void list_available_languages(void) +{ + struct script_spec *s; + + fprintf(stderr, "\n"); + fprintf(stderr, "Scripting language extensions (used in " + "perf trace -s [spec:]script.[spec]):\n\n"); + + list_for_each_entry(s, &script_specs, node) + fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name); + + fprintf(stderr, "\n"); +} + +static int parse_scriptname(const struct option *opt __used, + const char *str, int unset __used) +{ + char spec[PATH_MAX]; + const char *script, *ext; + int len; + + if (strcmp(str, "list") == 0) { + list_available_languages(); + return 0; + } + + script = strchr(str, ':'); + if (script) { + len = script - str; + if (len >= PATH_MAX) { + fprintf(stderr, "invalid language specifier"); + return -1; + } + strncpy(spec, str, len); + spec[len] = '\0'; + scripting_ops = script_spec__lookup(spec); + if (!scripting_ops) { + fprintf(stderr, "invalid language specifier"); + return -1; + } + script++; + } else { + script = str; + ext = strchr(script, '.'); + if (!ext) { + fprintf(stderr, "invalid script extension"); + return -1; + } + scripting_ops = script_spec__lookup(++ext); + if (!scripting_ops) { + fprintf(stderr, "invalid script extension"); + return -1; + } + } + + script_name = strdup(script); + + return 0; +} + static const char * const annotate_usage[] = { "perf trace [] ", NULL @@ -117,13 +307,23 @@ static const struct option options[] = { "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('l', "latency", &latency_format, "show latency attributes (irqs/preemption disabled, etc)"), + OPT_CALLBACK('s', "script", NULL, "name", + "script file name (lang:script name, script name, or *)", + parse_scriptname), + OPT_STRING('g', "gen-script", &generate_script_lang, "lang", + "generate perf-trace.xx script in specified language"), + OPT_END() }; int cmd_trace(int argc, const char **argv, const char *prefix __used) { + int err; + symbol__init(0); + setup_scripting(); + argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { /* @@ -136,5 +336,50 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used) setup_pager(); - return __cmd_trace(); + if (generate_script_lang) { + struct stat perf_stat; + + int input = open(input_name, O_RDONLY); + if (input < 0) { + perror("failed to open file"); + exit(-1); + } + + err = fstat(input, &perf_stat); + if (err < 0) { + perror("failed to stat file"); + exit(-1); + } + + if (!perf_stat.st_size) { + fprintf(stderr, "zero-sized file, nothing to do!\n"); + exit(0); + } + + scripting_ops = script_spec__lookup(generate_script_lang); + if (!scripting_ops) { + fprintf(stderr, "invalid language specifier"); + return -1; + } + + header = perf_header__new(); + if (header == NULL) + return -1; + + perf_header__read(header, input); + err = scripting_ops->generate_script("perf-trace"); + goto out; + } + + if (script_name) { + err = scripting_ops->start_script(script_name); + if (err) + goto out; + } + + err = __cmd_trace(); + + cleanup_scripting(); +out: + return err; } diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index dd51c6872a1..e7aaf002e66 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -259,4 +259,15 @@ enum trace_flag_type { TRACE_FLAG_SOFTIRQ = 0x10, }; +struct scripting_ops { + const char *name; + int (*start_script) (const char *); + int (*stop_script) (void); + void (*process_event) (int cpu, void *data, int size, + unsigned long long nsecs, char *comm); + int (*generate_script) (const char *outfile); +}; + +int script_spec_register(const char *spec, struct scripting_ops *ops); + #endif /* __PERF_TRACE_EVENTS_H */ From eb9a42caa7a926beb935a22bc59d981b35f0b652 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:47 -0600 Subject: [PATCH 433/470] perf trace: Add flag/symbolic format_flags It's useful to know whether a field is a flag or symbolic field for e.g. when generating scripts - it allows us to translate those fields specially rather than literally as plain numeric values. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/util/trace-event-parse.c | 17 +++++++++++++++++ tools/perf/util/trace-event.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 7021dc1b0ca..85d7163a9fd 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -48,6 +48,11 @@ static unsigned long long input_buf_siz; static int cpus; static int long_size; +static int is_flag_field; +static int is_symbolic_field; + +static struct format_field * +find_any_field(struct event *event, const char *name); static void init_input_buf(char *buf, unsigned long long size) { @@ -1301,6 +1306,16 @@ process_entry(struct event *event __unused, struct print_arg *arg, arg->type = PRINT_FIELD; arg->field.name = field; + if (is_flag_field) { + arg->field.field = find_any_field(event, arg->field.name); + arg->field.field->flags |= FIELD_IS_FLAG; + is_flag_field = 0; + } else if (is_symbolic_field) { + arg->field.field = find_any_field(event, arg->field.name); + arg->field.field->flags |= FIELD_IS_SYMBOLIC; + is_symbolic_field = 0; + } + type = read_token(&token); *tok = token; @@ -1668,9 +1683,11 @@ process_arg_token(struct event *event, struct print_arg *arg, type = process_entry(event, arg, &token); } else if (strcmp(token, "__print_flags") == 0) { free_token(token); + is_flag_field = 1; type = process_flags(event, arg, &token); } else if (strcmp(token, "__print_symbolic") == 0) { free_token(token); + is_symbolic_field = 1; type = process_symbols(event, arg, &token); } else if (strcmp(token, "__get_str") == 0) { free_token(token); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index e7aaf002e66..aeb915778ae 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -29,6 +29,8 @@ enum format_flags { FIELD_IS_SIGNED = 4, FIELD_IS_STRING = 8, FIELD_IS_DYNAMIC = 16, + FIELD_IS_FLAG = 32, + FIELD_IS_SYMBOLIC = 64, }; struct format_field { From 16c632de64a74644a46e7636db26b2cfb530ca13 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:48 -0600 Subject: [PATCH 434/470] perf trace: Add Perl scripting support Implement trace_scripting_ops to make Perl a supported perf trace scripting language. Additionally adds code that allows Perl trace scripts to access the 'flag' and 'symbolic' (__print_flags(), __print_symbolic()) field information parsed from the trace format files. Also adds the Perl implementation of the generate_script() trace_scripting_op, which creates a ready-to-run perf trace Perl script based on existing trace data. Scripts generated by this implementation print out all the fields for each event mentioned in perf.data (and will detect and generate the proper scripting code for 'flag' and 'symbolic' fields), and will additionally generate handlers for the special 'trace_unhandled', 'trace_begin' and 'trace_end' handlers. Script authors can simply remove the printing code to implement their own custom event handling. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-4-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 13 + tools/perf/builtin-trace.c | 2 + tools/perf/util/trace-event-parse.c | 18 +- tools/perf/util/trace-event-perl.c | 552 ++++++++++++++++++++++++++++ tools/perf/util/trace-event-perl.h | 42 +++ tools/perf/util/trace-event.h | 7 + 6 files changed, 629 insertions(+), 5 deletions(-) create mode 100644 tools/perf/util/trace-event-perl.c create mode 100644 tools/perf/util/trace-event-perl.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index f1537a94a05..19e37cd14ae 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -407,6 +407,7 @@ LIB_OBJS += util/thread.o LIB_OBJS += util/trace-event-parse.o LIB_OBJS += util/trace-event-read.o LIB_OBJS += util/trace-event-info.o +LIB_OBJS += util/trace-event-perl.o LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o @@ -489,6 +490,15 @@ else LIB_OBJS += util/probe-finder.o endif +PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts` +PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts` + +ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y) + BASIC_CFLAGS += -DNO_LIBPERL +else + ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) +endif + ifdef NO_DEMANGLE BASIC_CFLAGS += -DNO_DEMANGLE else @@ -860,6 +870,9 @@ util/hweight.o: ../../lib/hweight.c PERF-CFLAGS util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e96bb534b94..ca8ebf1ec64 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -38,6 +38,8 @@ static void setup_scripting(void) /* make sure PERF_EXEC_PATH is set for scripts */ perf_set_argv_exec_path(perf_exec_path()); + setup_perl_scripting(); + scripting_ops = &default_scripting_ops; } diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 85d7163a9fd..1f16495e559 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1888,7 +1888,7 @@ find_any_field(struct event *event, const char *name) return find_field(event, name); } -static unsigned long long read_size(void *ptr, int size) +unsigned long long read_size(void *ptr, int size) { switch (size) { case 1: @@ -1973,7 +1973,7 @@ int trace_parse_common_type(void *data) "common_type"); } -static int parse_common_pid(void *data) +int trace_parse_common_pid(void *data) { static int pid_offset; static int pid_size; @@ -2025,6 +2025,14 @@ struct event *trace_find_event(int id) return event; } +struct event *trace_find_next_event(struct event *event) +{ + if (!event) + return event_list; + + return event->next; +} + static unsigned long long eval_num_arg(void *data, int size, struct event *event, struct print_arg *arg) { @@ -2164,7 +2172,7 @@ static const struct flag flags[] = { { "HRTIMER_RESTART", 1 }, }; -static unsigned long long eval_flag(const char *flag) +unsigned long long eval_flag(const char *flag) { int i; @@ -2694,7 +2702,7 @@ get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func, if (!(event->flags & EVENT_FL_ISFUNCRET)) return NULL; - pid = parse_common_pid(next->data); + pid = trace_parse_common_pid(next->data); field = find_field(event, "func"); if (!field) die("function return does not have field func"); @@ -2980,7 +2988,7 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs, return; } - pid = parse_common_pid(data); + pid = trace_parse_common_pid(data); if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET)) return pretty_print_func_graph(data, size, event, cpu, diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c new file mode 100644 index 00000000000..c56b08d704d --- /dev/null +++ b/tools/perf/util/trace-event-perl.c @@ -0,0 +1,552 @@ +/* + * trace-event-perl. Feed perf trace events to an embedded Perl interpreter. + * + * Copyright (C) 2009 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include + +#include "../perf.h" +#include "util.h" +#include "trace-event.h" +#include "trace-event-perl.h" + +INTERP my_perl; + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(unsigned short) * 8)) - 1) + +struct event *events[FTRACE_MAX_EVENT]; + +static struct scripting_context *scripting_context; + +static char *cur_field_name; +static int zero_flag_atom; + +static void define_symbolic_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_value", 0)) + call_pv("main::define_symbolic_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_symbolic_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_symbolic_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_symbolic_values(field->next, ev_name, field_name); +} + +static void define_symbolic_field(const char *ev_name, + const char *field_name) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + + PUTBACK; + if (get_cv("main::define_symbolic_field", 0)) + call_pv("main::define_symbolic_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_value(const char *ev_name, + const char *field_name, + const char *field_value, + const char *field_str) +{ + unsigned long long value; + dSP; + + value = eval_flag(field_value); + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVuv(value))); + XPUSHs(sv_2mortal(newSVpv(field_str, 0))); + + PUTBACK; + if (get_cv("main::define_flag_value", 0)) + call_pv("main::define_flag_value", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_flag_values(struct print_flag_sym *field, + const char *ev_name, + const char *field_name) +{ + define_flag_value(ev_name, field_name, field->value, field->str); + if (field->next) + define_flag_values(field->next, ev_name, field_name); +} + +static void define_flag_field(const char *ev_name, + const char *field_name, + const char *delim) +{ + dSP; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); + XPUSHs(sv_2mortal(newSVpv(field_name, 0))); + XPUSHs(sv_2mortal(newSVpv(delim, 0))); + + PUTBACK; + if (get_cv("main::define_flag_field", 0)) + call_pv("main::define_flag_field", G_SCALAR); + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void define_event_symbols(struct event *event, + const char *ev_name, + struct print_arg *args) +{ + switch (args->type) { + case PRINT_NULL: + break; + case PRINT_ATOM: + define_flag_value(ev_name, cur_field_name, "0", + args->atom.atom); + zero_flag_atom = 0; + break; + case PRINT_FIELD: + if (cur_field_name) + free(cur_field_name); + cur_field_name = strdup(args->field.name); + break; + case PRINT_FLAGS: + define_event_symbols(event, ev_name, args->flags.field); + define_flag_field(ev_name, cur_field_name, args->flags.delim); + define_flag_values(args->flags.flags, ev_name, cur_field_name); + break; + case PRINT_SYMBOL: + define_event_symbols(event, ev_name, args->symbol.field); + define_symbolic_field(ev_name, cur_field_name); + define_symbolic_values(args->symbol.symbols, ev_name, + cur_field_name); + break; + case PRINT_STRING: + break; + case PRINT_TYPE: + define_event_symbols(event, ev_name, args->typecast.item); + break; + case PRINT_OP: + if (strcmp(args->op.op, ":") == 0) + zero_flag_atom = 1; + define_event_symbols(event, ev_name, args->op.left); + define_event_symbols(event, ev_name, args->op.right); + break; + default: + /* we should warn... */ + return; + } + + if (args->next) + define_event_symbols(event, ev_name, args->next); +} + +static inline struct event *find_cache_event(int type) +{ + static char ev_name[256]; + struct event *event; + + if (events[type]) + return events[type]; + + events[type] = event = trace_find_event(type); + if (!event) + return NULL; + + sprintf(ev_name, "%s::%s", event->system, event->name); + + define_event_symbols(event, ev_name, event->print_fmt.args); + + return event; +} + +static void perl_process_event(int cpu, void *data, + int size __attribute((unused)), + unsigned long long nsecs, char *comm) +{ + struct format_field *field; + static char handler[256]; + unsigned long long val; + unsigned long s, ns; + struct event *event; + int type; + int pid; + + dSP; + + type = trace_parse_common_type(data); + + event = find_cache_event(type); + if (!event) + die("ug! no event found for type %d", type); + + pid = trace_parse_common_pid(data); + + sprintf(handler, "%s::%s", event->system, event->name); + + s = nsecs / NSECS_PER_SEC; + ns = nsecs - s * NSECS_PER_SEC; + + scripting_context->event_data = data; + + ENTER; + SAVETMPS; + PUSHMARK(SP); + + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(s))); + XPUSHs(sv_2mortal(newSVuv(ns))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + + /* common fields other than pid can be accessed via xsub fns */ + + for (field = event->format.fields; field; field = field->next) { + if (field->flags & FIELD_IS_STRING) { + int offset; + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(data + field->offset); + offset &= 0xffff; + } else + offset = field->offset; + XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); + } else { /* FIELD_IS_NUMERIC */ + val = read_size(data + field->offset, field->size); + if (field->flags & FIELD_IS_SIGNED) { + XPUSHs(sv_2mortal(newSViv(val))); + } else { + XPUSHs(sv_2mortal(newSVuv(val))); + } + } + } + + PUTBACK; + if (get_cv(handler, 0)) + call_pv(handler, G_SCALAR); + else if (get_cv("main::trace_unhandled", 0)) { + XPUSHs(sv_2mortal(newSVpv(handler, 0))); + XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); + XPUSHs(sv_2mortal(newSVuv(cpu))); + XPUSHs(sv_2mortal(newSVuv(nsecs))); + XPUSHs(sv_2mortal(newSViv(pid))); + XPUSHs(sv_2mortal(newSVpv(comm, 0))); + call_pv("main::trace_unhandled", G_SCALAR); + } + SPAGAIN; + PUTBACK; + FREETMPS; + LEAVE; +} + +static void run_start_sub(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_begin", 0)) + call_pv("main::trace_begin", G_DISCARD | G_NOARGS); +} + +/* + * Start trace script + */ +static int perl_start_script(const char *script) +{ + const char *command_line[2] = { "", NULL }; + + command_line[1] = script; + + my_perl = perl_alloc(); + perl_construct(my_perl); + + if (perl_parse(my_perl, NULL, 2, (char **)command_line, (char **)NULL)) + return -1; + + perl_run(my_perl); + if (SvTRUE(ERRSV)) + return -1; + + run_start_sub(); + + fprintf(stderr, "perf trace started with Perl script %s\n\n", script); + + return 0; +} + +/* + * Stop trace script + */ +static int perl_stop_script(void) +{ + dSP; /* access to Perl stack */ + PUSHMARK(SP); + + if (get_cv("main::trace_end", 0)) + call_pv("main::trace_end", G_DISCARD | G_NOARGS); + + perl_destruct(my_perl); + perl_free(my_perl); + + fprintf(stderr, "\nperf trace Perl script stopped\n"); + + return 0; +} + +static int perl_generate_script(const char *outfile) +{ + struct event *event = NULL; + struct format_field *f; + char fname[PATH_MAX]; + int not_first, count; + FILE *ofp; + + sprintf(fname, "%s.pl", outfile); + ofp = fopen(fname, "w"); + if (ofp == NULL) { + fprintf(stderr, "couldn't open %s\n", fname); + return -1; + } + + fprintf(ofp, "# perf trace event handlers, " + "generated by perf trace -g perl\n"); + + fprintf(ofp, "# Licensed under the terms of the GNU GPL" + " License version 2\n\n"); + + fprintf(ofp, "# The common_* event handler fields are the most useful " + "fields common to\n"); + + fprintf(ofp, "# all events. They don't necessarily correspond to " + "the 'common_*' fields\n"); + + fprintf(ofp, "# in the format files. Those fields not available as " + "handler params can\n"); + + fprintf(ofp, "# be retrieved using Perl functions of the form " + "common_*($context).\n"); + + fprintf(ofp, "# See Context.pm for the list of available " + "functions.\n\n"); + + fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" + "Perf-Trace-Util/lib\";\n"); + + fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); + fprintf(ofp, "use Perf::Trace::Core;\n"); + fprintf(ofp, "use Perf::Trace::Context;\n"); + fprintf(ofp, "use Perf::Trace::Util;\n\n"); + + fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); + fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); + + while ((event = trace_find_next_event(event))) { + fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); + fprintf(ofp, "\tmy ("); + + fprintf(ofp, "$event_name, "); + fprintf(ofp, "$context, "); + fprintf(ofp, "$common_cpu, "); + fprintf(ofp, "$common_secs, "); + fprintf(ofp, "$common_nsecs,\n"); + fprintf(ofp, "\t $common_pid, "); + fprintf(ofp, "$common_comm,\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + fprintf(ofp, "$%s", f->name); + } + fprintf(ofp, ") = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm);\n\n"); + + fprintf(ofp, "\tprintf(\""); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + if (count && count % 4 == 0) { + fprintf(ofp, "\".\n\t \""); + } + count++; + + fprintf(ofp, "%s=", f->name); + if (f->flags & FIELD_IS_STRING || + f->flags & FIELD_IS_FLAG || + f->flags & FIELD_IS_SYMBOLIC) + fprintf(ofp, "%%s"); + else if (f->flags & FIELD_IS_SIGNED) + fprintf(ofp, "%%d"); + else + fprintf(ofp, "%%u"); + } + + fprintf(ofp, "\\n\",\n\t "); + + not_first = 0; + count = 0; + + for (f = event->format.fields; f; f = f->next) { + if (not_first++) + fprintf(ofp, ", "); + + if (++count % 5 == 0) + fprintf(ofp, "\n\t "); + + if (f->flags & FIELD_IS_FLAG) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "flag_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else if (f->flags & FIELD_IS_SYMBOLIC) { + if ((count - 1) % 5 != 0) { + fprintf(ofp, "\n\t "); + count = 4; + } + fprintf(ofp, "symbol_str(\""); + fprintf(ofp, "%s::%s\", ", event->system, + event->name); + fprintf(ofp, "\"%s\", $%s)", f->name, + f->name); + } else + fprintf(ofp, "$%s", f->name); + } + + fprintf(ofp, ");\n"); + fprintf(ofp, "}\n\n"); + } + + fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " + "$common_cpu, $common_secs, $common_nsecs,\n\t " + "$common_pid, $common_comm) = @_;\n\n"); + + fprintf(ofp, "\tprint_header($event_name, $common_cpu, " + "$common_secs, $common_nsecs,\n\t $common_pid, " + "$common_comm);\n}\n\n"); + + fprintf(ofp, "sub print_header\n{\n" + "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" + "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " + "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); + + fclose(ofp); + + fprintf(stderr, "generated Perl script: %s\n", fname); + + return 0; +} + +struct scripting_ops perl_scripting_ops = { + .name = "Perl", + .start_script = perl_start_script, + .stop_script = perl_stop_script, + .process_event = perl_process_event, + .generate_script = perl_generate_script, +}; + +#ifdef NO_LIBPERL +void setup_perl_scripting(void) +{ + fprintf(stderr, "Perl scripting not supported." + " Install libperl-dev[el] and rebuild perf to get it.\n"); +} +#else +void setup_perl_scripting(void) +{ + int err; + err = script_spec_register("Perl", &perl_scripting_ops); + if (err) + die("error registering Perl script extension"); + + err = script_spec_register("pl", &perl_scripting_ops); + if (err) + die("error registering pl script extension"); + + scripting_context = malloc(sizeof(struct scripting_context)); +} +#endif diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h new file mode 100644 index 00000000000..6c94fa93013 --- /dev/null +++ b/tools/perf/util/trace-event-perl.h @@ -0,0 +1,42 @@ +#ifndef __PERF_TRACE_EVENT_PERL_H +#define __PERF_TRACE_EVENT_PERL_H +#ifdef NO_LIBPERL +typedef int INTERP; +#define dSP +#define ENTER +#define SAVETMPS +#define PUTBACK +#define SPAGAIN +#define FREETMPS +#define LEAVE +#define SP +#define ERRSV +#define G_SCALAR (0) +#define G_DISCARD (0) +#define G_NOARGS (0) +#define PUSHMARK(a) +#define SvTRUE(a) (0) +#define XPUSHs(s) +#define sv_2mortal(a) +#define newSVpv(a,b) +#define newSVuv(a) +#define newSViv(a) +#define get_cv(a,b) (0) +#define call_pv(a,b) (0) +#define perl_alloc() (0) +#define perl_construct(a) (0) +#define perl_parse(a,b,c,d,e) (0) +#define perl_run(a) (0) +#define perl_destruct(a) (0) +#define perl_free(a) (0) +#else +#include +#include +typedef PerlInterpreter * INTERP; +#endif + +struct scripting_context { + void *event_data; +}; + +#endif /* __PERF_TRACE_EVENT_PERL_H */ diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index aeb915778ae..b1e58d3d947 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -245,10 +245,14 @@ extern int latency_format; int parse_header_page(char *buf, unsigned long size); int trace_parse_common_type(void *data); +int trace_parse_common_pid(void *data); struct event *trace_find_event(int id); +struct event *trace_find_next_event(struct event *event); +unsigned long long read_size(void *ptr, int size); unsigned long long raw_field_value(struct event *event, const char *name, void *data); void *raw_field_ptr(struct event *event, const char *name, void *data); +unsigned long long eval_flag(const char *flag); int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); @@ -272,4 +276,7 @@ struct scripting_ops { int script_spec_register(const char *spec, struct scripting_ops *ops); +extern struct scripting_ops perl_scripting_ops; +void setup_perl_scripting(void); + #endif /* __PERF_TRACE_EVENTS_H */ From bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:49 -0600 Subject: [PATCH 435/470] perf trace: Add perf trace scripting support modules for Perl Add Perf-Trace-Util Perl module and some scripts that use it. Core.pm contains Perl code to define and access flag and symbolic fields. Util.pm contains general-purpose utility functions. Also adds some makefile bits to install them in libexec/perf-core/scripts/perl (or wherever perfexec_instdir points). Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-5-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 7 + .../scripts/perl/Perf-Trace-Util/Makefile.PL | 12 ++ .../perf/scripts/perl/Perf-Trace-Util/README | 35 ++++ .../Perf-Trace-Util/lib/Perf/Trace/Core.pm | 157 ++++++++++++++++ .../Perf-Trace-Util/lib/Perf/Trace/Util.pm | 88 +++++++++ tools/perf/scripts/perl/rw-by-file.pl | 105 +++++++++++ tools/perf/scripts/perl/rw-by-pid.pl | 170 ++++++++++++++++++ tools/perf/scripts/perl/wakeup-latency.pl | 103 +++++++++++ tools/perf/scripts/perl/workqueue-stats.pl | 129 +++++++++++++ 9 files changed, 806 insertions(+) create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/README create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm create mode 100644 tools/perf/scripts/perl/rw-by-file.pl create mode 100644 tools/perf/scripts/perl/rw-by-pid.pl create mode 100644 tools/perf/scripts/perl/wakeup-latency.pl create mode 100644 tools/perf/scripts/perl/workqueue-stats.pl diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 19e37cd14ae..efbc0e86421 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -980,6 +980,13 @@ export perfexec_instdir install: all $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)' $(INSTALL) perf$X '$(DESTDIR_SQ)$(bindir_SQ)' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' + $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace' + $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl' + $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin' + $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' + $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util' ifdef BUILT_INS $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL new file mode 100644 index 00000000000..b0de02e6950 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL @@ -0,0 +1,12 @@ +use 5.010000; +use ExtUtils::MakeMaker; +# See lib/ExtUtils/MakeMaker.pm for details of how to influence +# the contents of the Makefile that is written. +WriteMakefile( + NAME => 'Perf::Trace::Util', + VERSION_FROM => 'lib/Perf/Trace/Util.pm', # finds $VERSION + PREREQ_PM => {}, # e.g., Module::Name => 1.1 + ($] >= 5.005 ? ## Add these new keywords supported since 5.005 + (ABSTRACT_FROM => 'lib/Perf/Trace/Util.pm', # retrieve abstract from module + AUTHOR => 'Tom Zanussi ') : ()), +); diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README new file mode 100644 index 00000000000..0a58378f083 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/README @@ -0,0 +1,35 @@ +Perf-Trace-Util version 0.01 +============================ + +This module contains utility functions for use with perf trace. + +INSTALLATION + +Building perf with perf trace Perl scripting should install this +module in the right place. + +You should make sure libperl is installed first e.g. apt-get install +libperl-dev. + +DEPENDENCIES + +This module requires these other modules and libraries: + + blah blah blah + +COPYRIGHT AND LICENCE + +Put the correct copyright and licence information here. + +Copyright (C) 2009 by Tom Zanussi + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.10.0 or, +at your option, any later version of Perl 5 you may have available. + +Alternatively, this software may be distributed under the terms of the +GNU General Public License ("GPL") version 2 as published by the Free +Software Foundation. + + + diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm new file mode 100644 index 00000000000..fd250fb7be1 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm @@ -0,0 +1,157 @@ +package Perf::Trace::Core; + +use 5.010000; +use strict; +use warnings; + +require Exporter; + +our @ISA = qw(Exporter); + +our %EXPORT_TAGS = ( 'all' => [ qw( +) ] ); + +our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); + +our @EXPORT = qw( +define_flag_field define_flag_value flag_str dump_flag_fields +define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields +); + +our $VERSION = '0.01'; + +my %flag_fields; +my %symbolic_fields; + +sub flag_str +{ + my ($event_name, $field_name, $value) = @_; + + my $string; + + if ($flag_fields{$event_name}{$field_name}) { + my $print_delim = 0; + foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event_name}{$field_name}{"values"}}) { + if (!$value && !$idx) { + $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}"; + last; + } + if ($idx && ($value & $idx) == $idx) { + if ($print_delim && $flag_fields{$event_name}{$field_name}{'delim'}) { + $string .= " $flag_fields{$event_name}{$field_name}{'delim'} "; + } + $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}"; + $print_delim = 1; + $value &= ~$idx; + } + } + } + + return $string; +} + +sub define_flag_field +{ + my ($event_name, $field_name, $delim) = @_; + + $flag_fields{$event_name}{$field_name}{"delim"} = $delim; +} + +sub define_flag_value +{ + my ($event_name, $field_name, $value, $field_str) = @_; + + $flag_fields{$event_name}{$field_name}{"values"}{$value} = $field_str; +} + +sub dump_flag_fields +{ + for my $event (keys %flag_fields) { + print "event $event:\n"; + for my $field (keys %{$flag_fields{$event}}) { + print " field: $field:\n"; + print " delim: $flag_fields{$event}{$field}{'delim'}\n"; + foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) { + print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n"; + } + } + } +} + +sub symbol_str +{ + my ($event_name, $field_name, $value) = @_; + + if ($symbolic_fields{$event_name}{$field_name}) { + foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event_name}{$field_name}{"values"}}) { + if (!$value && !$idx) { + return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}"; + last; + } + if ($value == $idx) { + return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}"; + } + } + } + + return undef; +} + +sub define_symbolic_field +{ + my ($event_name, $field_name) = @_; + + # nothing to do, really +} + +sub define_symbolic_value +{ + my ($event_name, $field_name, $value, $field_str) = @_; + + $symbolic_fields{$event_name}{$field_name}{"values"}{$value} = $field_str; +} + +sub dump_symbolic_fields +{ + for my $event (keys %symbolic_fields) { + print "event $event:\n"; + for my $field (keys %{$symbolic_fields{$event}}) { + print " field: $field:\n"; + foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) { + print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n"; + } + } + } +} + +1; +__END__ +=head1 NAME + +Perf::Trace::Core - Perl extension for perf trace + +=head1 SYNOPSIS + + use Perf::Trace::Core + +=head1 SEE ALSO + +Perf (trace) documentation + +=head1 AUTHOR + +Tom Zanussi, Etzanussi@gmail.com + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Tom Zanussi + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.10.0 or, +at your option, any later version of Perl 5 you may have available. + +Alternatively, this software may be distributed under the terms of the +GNU General Public License ("GPL") version 2 as published by the Free +Software Foundation. + +=cut diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm new file mode 100644 index 00000000000..052f132ced2 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm @@ -0,0 +1,88 @@ +package Perf::Trace::Util; + +use 5.010000; +use strict; +use warnings; + +require Exporter; + +our @ISA = qw(Exporter); + +our %EXPORT_TAGS = ( 'all' => [ qw( +) ] ); + +our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); + +our @EXPORT = qw( +avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs +); + +our $VERSION = '0.01'; + +sub avg +{ + my ($total, $n) = @_; + + return $total / $n; +} + +my $NSECS_PER_SEC = 1000000000; + +sub nsecs +{ + my ($secs, $nsecs) = @_; + + return $secs * $NSECS_PER_SEC + $nsecs; +} + +sub nsecs_secs { + my ($nsecs) = @_; + + return $nsecs / $NSECS_PER_SEC; +} + +sub nsecs_nsecs { + my ($nsecs) = @_; + + return $nsecs - nsecs_secs($nsecs); +} + +sub nsecs_str { + my ($nsecs) = @_; + + my $str = sprintf("%5u.%09u", nsecs_secs($nsecs), nsecs_nsecs($nsecs)); + + return $str; +} + +1; +__END__ +=head1 NAME + +Perf::Trace::Util - Perl extension for perf trace + +=head1 SYNOPSIS + + use Perf::Trace::Util; + +=head1 SEE ALSO + +Perf (trace) documentation + +=head1 AUTHOR + +Tom Zanussi, Etzanussi@gmail.com + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Tom Zanussi + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.10.0 or, +at your option, any later version of Perl 5 you may have available. + +Alternatively, this software may be distributed under the terms of the +GNU General Public License ("GPL") version 2 as published by the Free +Software Foundation. + +=cut diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl new file mode 100644 index 00000000000..61f91561d84 --- /dev/null +++ b/tools/perf/scripts/perl/rw-by-file.pl @@ -0,0 +1,105 @@ +#!/usr/bin/perl -w +# (c) 2009, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 + +# Display r/w activity for files read/written to for a given program + +# The common_* event handler fields are the most useful fields common to +# all events. They don't necessarily correspond to the 'common_*' fields +# in the status files. Those fields not available as handler params can +# be retrieved via script functions of the form get_common_*(). + +use 5.010000; +use strict; +use warnings; + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Util; + +# change this to the comm of the program you're interested in +my $for_comm = "perf"; + +my %reads; +my %writes; + +sub syscalls::sys_enter_read +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_; + + if ($common_comm eq $for_comm) { + $reads{$fd}{bytes_requested} += $count; + $reads{$fd}{total_reads}++; + } +} + +sub syscalls::sys_enter_write +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_; + + if ($common_comm eq $for_comm) { + $writes{$fd}{bytes_written} += $count; + $writes{$fd}{total_writes}++; + } +} + +sub trace_end +{ + printf("file read counts for $for_comm:\n\n"); + + printf("%6s %10s %10s\n", "fd", "# reads", "bytes_requested"); + printf("%6s %10s %10s\n", "------", "----------", "-----------"); + + foreach my $fd (sort {$reads{$b}{bytes_requested} <=> + $reads{$a}{bytes_requested}} keys %reads) { + my $total_reads = $reads{$fd}{total_reads}; + my $bytes_requested = $reads{$fd}{bytes_requested}; + printf("%6u %10u %10u\n", $fd, $total_reads, $bytes_requested); + } + + printf("\nfile write counts for $for_comm:\n\n"); + + printf("%6s %10s %10s\n", "fd", "# writes", "bytes_written"); + printf("%6s %10s %10s\n", "------", "----------", "-----------"); + + foreach my $fd (sort {$writes{$b}{bytes_written} <=> + $writes{$a}{bytes_written}} keys %writes) { + my $total_writes = $writes{$fd}{total_writes}; + my $bytes_written = $writes{$fd}{bytes_written}; + printf("%6u %10u %10u\n", $fd, $total_writes, $bytes_written); + } + + print_unhandled(); +} + +my %unhandled; + +sub print_unhandled +{ + if ((scalar keys %unhandled) == 0) { + return; + } + + print "\nunhandled events:\n\n"; + + printf("%-40s %10s\n", "event", "count"); + printf("%-40s %10s\n", "----------------------------------------", + "-----------"); + + foreach my $event_name (keys %unhandled) { + printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); + } +} + +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm) = @_; + + $unhandled{$event_name}++; +} + + diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl new file mode 100644 index 00000000000..da601fae1a0 --- /dev/null +++ b/tools/perf/scripts/perl/rw-by-pid.pl @@ -0,0 +1,170 @@ +#!/usr/bin/perl -w +# (c) 2009, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 + +# Display r/w activity for all processes + +# The common_* event handler fields are the most useful fields common to +# all events. They don't necessarily correspond to the 'common_*' fields +# in the status files. Those fields not available as handler params can +# be retrieved via script functions of the form get_common_*(). + +use 5.010000; +use strict; +use warnings; + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Util; + +my %reads; +my %writes; + +sub syscalls::sys_exit_read +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $nr, $ret) = @_; + + if ($ret > 0) { + $reads{$common_pid}{bytes_read} += $ret; + } else { + if (!defined ($reads{$common_pid}{bytes_read})) { + $reads{$common_pid}{bytes_read} = 0; + } + $reads{$common_pid}{errors}{$ret}++; + } +} + +sub syscalls::sys_enter_read +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $nr, $fd, $buf, $count) = @_; + + $reads{$common_pid}{bytes_requested} += $count; + $reads{$common_pid}{total_reads}++; + $reads{$common_pid}{comm} = $common_comm; +} + +sub syscalls::sys_exit_write +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $nr, $ret) = @_; + + if ($ret <= 0) { + $writes{$common_pid}{errors}{$ret}++; + } +} + +sub syscalls::sys_enter_write +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $nr, $fd, $buf, $count) = @_; + + $writes{$common_pid}{bytes_written} += $count; + $writes{$common_pid}{total_writes}++; + $writes{$common_pid}{comm} = $common_comm; +} + +sub trace_end +{ + printf("read counts by pid:\n\n"); + + printf("%6s %20s %10s %10s %10s\n", "pid", "comm", + "# reads", "bytes_requested", "bytes_read"); + printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------", + "-----------", "----------", "----------"); + + foreach my $pid (sort {$reads{$b}{bytes_read} <=> + $reads{$a}{bytes_read}} keys %reads) { + my $comm = $reads{$pid}{comm}; + my $total_reads = $reads{$pid}{total_reads}; + my $bytes_requested = $reads{$pid}{bytes_requested}; + my $bytes_read = $reads{$pid}{bytes_read}; + + printf("%6s %-20s %10s %10s %10s\n", $pid, $comm, + $total_reads, $bytes_requested, $bytes_read); + } + + printf("\nfailed reads by pid:\n\n"); + + printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors"); + printf("%6s %20s %6s %10s\n", "------", "--------------------", + "------", "----------"); + + foreach my $pid (keys %reads) { + my $comm = $reads{$pid}{comm}; + foreach my $err (sort {$reads{$b}{comm} cmp $reads{$a}{comm}} + keys %{$reads{$pid}{errors}}) { + my $errors = $reads{$pid}{errors}{$err}; + + printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors); + } + } + + printf("\nwrite counts by pid:\n\n"); + + printf("%6s %20s %10s %10s\n", "pid", "comm", + "# writes", "bytes_written"); + printf("%6s %-20s %10s %10s\n", "------", "--------------------", + "-----------", "----------"); + + foreach my $pid (sort {$writes{$b}{bytes_written} <=> + $writes{$a}{bytes_written}} keys %writes) { + my $comm = $writes{$pid}{comm}; + my $total_writes = $writes{$pid}{total_writes}; + my $bytes_written = $writes{$pid}{bytes_written}; + + printf("%6s %-20s %10s %10s\n", $pid, $comm, + $total_writes, $bytes_written); + } + + printf("\nfailed writes by pid:\n\n"); + + printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors"); + printf("%6s %20s %6s %10s\n", "------", "--------------------", + "------", "----------"); + + foreach my $pid (keys %writes) { + my $comm = $writes{$pid}{comm}; + foreach my $err (sort {$writes{$b}{comm} cmp $writes{$a}{comm}} + keys %{$writes{$pid}{errors}}) { + my $errors = $writes{$pid}{errors}{$err}; + + printf("%6d %-20s %6d %10s\n", $pid, $comm, $err, $errors); + } + } + + print_unhandled(); +} + +my %unhandled; + +sub print_unhandled +{ + if ((scalar keys %unhandled) == 0) { + return; + } + + print "\nunhandled events:\n\n"; + + printf("%-40s %10s\n", "event", "count"); + printf("%-40s %10s\n", "----------------------------------------", + "-----------"); + + foreach my $event_name (keys %unhandled) { + printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); + } +} + +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm) = @_; + + $unhandled{$event_name}++; +} diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl new file mode 100644 index 00000000000..ed58ef284e2 --- /dev/null +++ b/tools/perf/scripts/perl/wakeup-latency.pl @@ -0,0 +1,103 @@ +#!/usr/bin/perl -w +# (c) 2009, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 + +# Display avg/min/max wakeup latency + +# The common_* event handler fields are the most useful fields common to +# all events. They don't necessarily correspond to the 'common_*' fields +# in the status files. Those fields not available as handler params can +# be retrieved via script functions of the form get_common_*(). + +use 5.010000; +use strict; +use warnings; + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Util; + +my %last_wakeup; + +my $max_wakeup_latency; +my $min_wakeup_latency; +my $total_wakeup_latency; +my $total_wakeups; + +sub sched::sched_switch +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $prev_comm, $prev_pid, $prev_prio, $prev_state, $next_comm, $next_pid, + $next_prio) = @_; + + my $wakeup_ts = $last_wakeup{$common_cpu}{ts}; + if ($wakeup_ts) { + my $switch_ts = nsecs($common_secs, $common_nsecs); + my $wakeup_latency = $switch_ts - $wakeup_ts; + if ($wakeup_latency > $max_wakeup_latency) { + $max_wakeup_latency = $wakeup_latency; + } + if ($wakeup_latency < $min_wakeup_latency) { + $min_wakeup_latency = $wakeup_latency; + } + $total_wakeup_latency += $wakeup_latency; + $total_wakeups++; + } + $last_wakeup{$common_cpu}{ts} = 0; +} + +sub sched::sched_wakeup +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $comm, $pid, $prio, $success, $target_cpu) = @_; + + $last_wakeup{$target_cpu}{ts} = nsecs($common_secs, $common_nsecs); +} + +sub trace_begin +{ + $min_wakeup_latency = 1000000000; + $max_wakeup_latency = 0; +} + +sub trace_end +{ + printf("wakeup_latency stats:\n\n"); + print "total_wakeups: $total_wakeups\n"; + printf("avg_wakeup_latency (ns): %u\n", + avg($total_wakeup_latency, $total_wakeups)); + printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency); + printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency); + + print_unhandled(); +} + +my %unhandled; + +sub print_unhandled +{ + if ((scalar keys %unhandled) == 0) { + return; + } + + print "\nunhandled events:\n\n"; + + printf("%-40s %10s\n", "event", "count"); + printf("%-40s %10s\n", "----------------------------------------", + "-----------"); + + foreach my $event_name (keys %unhandled) { + printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); + } +} + +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm) = @_; + + $unhandled{$event_name}++; +} diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl new file mode 100644 index 00000000000..511302c8a49 --- /dev/null +++ b/tools/perf/scripts/perl/workqueue-stats.pl @@ -0,0 +1,129 @@ +#!/usr/bin/perl -w +# (c) 2009, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 + +# Displays workqueue stats +# +# Usage: +# +# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e +# workqueue:workqueue_destruction -e workqueue:workqueue_execution +# -e workqueue:workqueue_insertion +# +# perf trace -p -s tools/perf/scripts/perl/workqueue-stats.pl + +use 5.010000; +use strict; +use warnings; + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Util; + +my @cpus; + +sub workqueue::workqueue_destruction +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $thread_comm, $thread_pid) = @_; + + $cpus[$common_cpu]{$thread_pid}{destroyed}++; + $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; +} + +sub workqueue::workqueue_creation +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $thread_comm, $thread_pid, $cpu) = @_; + + $cpus[$common_cpu]{$thread_pid}{created}++; + $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; +} + +sub workqueue::workqueue_execution +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $thread_comm, $thread_pid, $func) = @_; + + $cpus[$common_cpu]{$thread_pid}{executed}++; + $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; +} + +sub workqueue::workqueue_insertion +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $thread_comm, $thread_pid, $func) = @_; + + $cpus[$common_cpu]{$thread_pid}{inserted}++; + $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm; +} + +sub trace_end +{ + print "workqueue work stats:\n\n"; + my $cpu = 0; + printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name"); + printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----"); + foreach my $pidhash (@cpus) { + while ((my $pid, my $wqhash) = each %$pidhash) { + my $ins = $$wqhash{'inserted'}; + my $exe = $$wqhash{'executed'}; + my $comm = $$wqhash{'comm'}; + if ($ins || $exe) { + printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm); + } + } + $cpu++; + } + + $cpu = 0; + print "\nworkqueue lifecycle stats:\n\n"; + printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name"); + printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----"); + foreach my $pidhash (@cpus) { + while ((my $pid, my $wqhash) = each %$pidhash) { + my $created = $$wqhash{'created'}; + my $destroyed = $$wqhash{'destroyed'}; + my $comm = $$wqhash{'comm'}; + if ($created || $destroyed) { + printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed, + $comm); + } + } + $cpu++; + } + + print_unhandled(); +} + +my %unhandled; + +sub print_unhandled +{ + if ((scalar keys %unhandled) == 0) { + return; + } + + print "\nunhandled events:\n\n"; + + printf("%-40s %10s\n", "event", "count"); + printf("%-40s %10s\n", "----------------------------------------", + "-----------"); + + foreach my $event_name (keys %unhandled) { + printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); + } +} + +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm) = @_; + + $unhandled{$event_name}++; +} From d1b93772be78486397693fc39d3ddea3fda90105 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:50 -0600 Subject: [PATCH 436/470] perf trace: Add interface to access perf data from Perl handlers The Perl scripting support for perf trace allows most of a trace event's data to be accessed directly as handler arguments, but not all of it e.g. the less common fields aren't passed in. To give scripts access to the other fields and/or any other data or metadata in the main perf executable that might be useful, a way to access the C data in perf from Perl is needed; this patch uses the Perl XS facility to do it for the common_xxx event fields not passed to handler functions. Context.pm exports three functions to Perl scripts that access fields for the current event by calling back into perf: common_pc(), common_flags() and common_lock_depth(). Support for common_flags() field values was added to Core.pm and a script used to sanity check these and other basic scripting features, check-perf-trace.pl, was also added. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-6-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 6 +- .../scripts/perl/Perf-Trace-Util/Context.c | 134 ++++++++++++++++++ .../scripts/perl/Perf-Trace-Util/Context.xs | 41 ++++++ .../scripts/perl/Perf-Trace-Util/Makefile.PL | 11 +- .../perf/scripts/perl/Perf-Trace-Util/README | 34 ++++- .../Perf-Trace-Util/lib/Perf/Trace/Context.pm | 55 +++++++ .../Perf-Trace-Util/lib/Perf/Trace/Core.pm | 35 +++++ .../perf/scripts/perl/Perf-Trace-Util/typemap | 1 + tools/perf/scripts/perl/check-perf-trace.pl | 106 ++++++++++++++ tools/perf/util/trace-event-parse.c | 6 +- tools/perf/util/trace-event-perl.c | 46 +++++- tools/perf/util/trace-event-perl.h | 9 ++ tools/perf/util/trace-event.h | 3 + 13 files changed, 474 insertions(+), 13 deletions(-) create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/Context.c create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/Context.xs create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm create mode 100644 tools/perf/scripts/perl/Perf-Trace-Util/typemap create mode 100644 tools/perf/scripts/perl/check-perf-trace.pl diff --git a/tools/perf/Makefile b/tools/perf/Makefile index efbc0e86421..8ad57b51d64 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -497,6 +497,7 @@ ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; e BASIC_CFLAGS += -DNO_LIBPERL else ALL_LDFLAGS += $(PERL_EMBED_LDOPTS) + LIB_OBJS += scripts/perl/Perf-Trace-Util/Context.o endif ifdef NO_DEMANGLE @@ -873,6 +874,9 @@ util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter $< +scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS + $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< + perf-%$X: %.o $(PERFLIBS) $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS) @@ -1072,7 +1076,7 @@ distclean: clean # $(RM) configure clean: - $(RM) *.o */*.o $(LIB_FILE) + $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE) $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X $(RM) $(TEST_PROGRAMS) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h TAGS tags cscope* diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c new file mode 100644 index 00000000000..3ba3ffc5416 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c @@ -0,0 +1,134 @@ +/* + * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the + * contents of Context.xs. Do not edit this file, edit Context.xs instead. + * + * ANY CHANGES MADE HERE WILL BE LOST! + * + */ + +#line 1 "Context.xs" +/* + * Context.xs. XS interfaces for perf trace. + * + * Copyright (C) 2009 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include "EXTERN.h" +#include "perl.h" +#include "XSUB.h" +#include "../../../util/trace-event-perl.h" + +#ifndef PERL_UNUSED_VAR +# define PERL_UNUSED_VAR(var) if (0) var = var +#endif + +#line 41 "Context.c" + +XS(XS_Perf__Trace__Context_get_common_pc); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_get_common_pc) +{ +#ifdef dVAR + dVAR; dXSARGS; +#else + dXSARGS; +#endif + if (items != 1) + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_pc", "context"); + PERL_UNUSED_VAR(cv); /* -W */ + { + struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); + int RETVAL; + dXSTARG; + + RETVAL = get_common_pc(context); + XSprePUSH; PUSHi((IV)RETVAL); + } + XSRETURN(1); +} + + +XS(XS_Perf__Trace__Context_get_common_flags); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_get_common_flags) +{ +#ifdef dVAR + dVAR; dXSARGS; +#else + dXSARGS; +#endif + if (items != 1) + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_flags", "context"); + PERL_UNUSED_VAR(cv); /* -W */ + { + struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); + int RETVAL; + dXSTARG; + + RETVAL = get_common_flags(context); + XSprePUSH; PUSHi((IV)RETVAL); + } + XSRETURN(1); +} + + +XS(XS_Perf__Trace__Context_get_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_get_common_lock_depth) +{ +#ifdef dVAR + dVAR; dXSARGS; +#else + dXSARGS; +#endif + if (items != 1) + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_lock_depth", "context"); + PERL_UNUSED_VAR(cv); /* -W */ + { + struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); + int RETVAL; + dXSTARG; + + RETVAL = get_common_lock_depth(context); + XSprePUSH; PUSHi((IV)RETVAL); + } + XSRETURN(1); +} + +#ifdef __cplusplus +extern "C" +#endif +XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */ +XS(boot_Perf__Trace__Context) +{ +#ifdef dVAR + dVAR; dXSARGS; +#else + dXSARGS; +#endif + const char* file = __FILE__; + + PERL_UNUSED_VAR(cv); /* -W */ + PERL_UNUSED_VAR(items); /* -W */ + XS_VERSION_BOOTCHECK ; + + newXSproto("Perf::Trace::Context::get_common_pc", XS_Perf__Trace__Context_get_common_pc, file, "$"); + newXSproto("Perf::Trace::Context::get_common_flags", XS_Perf__Trace__Context_get_common_flags, file, "$"); + newXSproto("Perf::Trace::Context::get_common_lock_depth", XS_Perf__Trace__Context_get_common_lock_depth, file, "$"); + if (PL_unitcheckav) + call_list(PL_scopestack_ix, PL_unitcheckav); + XSRETURN_YES; +} + diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs new file mode 100644 index 00000000000..24facb3696d --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs @@ -0,0 +1,41 @@ +/* + * Context.xs. XS interfaces for perf trace. + * + * Copyright (C) 2009 Tom Zanussi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include "EXTERN.h" +#include "perl.h" +#include "XSUB.h" +#include "../../../util/trace-event-perl.h" + +MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context +PROTOTYPES: ENABLE + +int +get_common_pc(context) + struct scripting_context * context + +int +get_common_flags(context) + struct scripting_context * context + +int +get_common_lock_depth(context) + struct scripting_context * context + diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL index b0de02e6950..decdeb0f678 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL @@ -3,10 +3,15 @@ use ExtUtils::MakeMaker; # See lib/ExtUtils/MakeMaker.pm for details of how to influence # the contents of the Makefile that is written. WriteMakefile( - NAME => 'Perf::Trace::Util', - VERSION_FROM => 'lib/Perf/Trace/Util.pm', # finds $VERSION + NAME => 'Perf::Trace::Context', + VERSION_FROM => 'lib/Perf/Trace/Context.pm', # finds $VERSION PREREQ_PM => {}, # e.g., Module::Name => 1.1 ($] >= 5.005 ? ## Add these new keywords supported since 5.005 - (ABSTRACT_FROM => 'lib/Perf/Trace/Util.pm', # retrieve abstract from module + (ABSTRACT_FROM => 'lib/Perf/Trace/Context.pm', # retrieve abstract from module AUTHOR => 'Tom Zanussi ') : ()), + LIBS => [''], # e.g., '-lm' + DEFINE => '-I ../..', # e.g., '-DHAVE_SOMETHING' + INC => '-I.', # e.g., '-I. -I/usr/include/other' + # Un-comment this if you add C files to link with later: + OBJECT => 'Context.o', # link all the C files too ); diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README index 0a58378f083..adb99aa3a7b 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/README +++ b/tools/perf/scripts/perl/Perf-Trace-Util/README @@ -3,6 +3,34 @@ Perf-Trace-Util version 0.01 This module contains utility functions for use with perf trace. +Core.pm and Util.pm are pure Perl modules; Core.pm contains routines +that the core perf support for Perl calls on and should always be +'used', while Util.pm contains useful but optional utility functions +that scripts may want to use. Context.pm contains the Perl->C +interface that allows scripts to access data in the embedding perf +executable; scripts wishing to do that should 'use Context.pm'. + +The Perl->C perf interface is completely driven by Context.xs. If you +want to add new Perl functions that end up accessing C data in the +perf executable, you add desciptions of the new functions here. +scripting_context is a pointer to the perf data in the perf executable +that you want to access - it's passed as the second parameter, +$context, to all handler functions. + +After you do that: + + perl Makefile.PL # to create a Makefile for the next step + make # to create Context.c + + edit Context.c to add const to the char* file = __FILE__ line in + XS(boot_Perf__Trace__Context) to silence a warning/error. + + You can delete the Makefile, object files and anything else that was + generated e.g. blib and shared library, etc, except for of course + Context.c + + You should then be able to run the normal perf make as usual. + INSTALLATION Building perf with perf trace Perl scripting should install this @@ -15,12 +43,10 @@ DEPENDENCIES This module requires these other modules and libraries: - blah blah blah + None COPYRIGHT AND LICENCE -Put the correct copyright and licence information here. - Copyright (C) 2009 by Tom Zanussi This library is free software; you can redistribute it and/or modify @@ -31,5 +57,3 @@ Alternatively, this software may be distributed under the terms of the GNU General Public License ("GPL") version 2 as published by the Free Software Foundation. - - diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm new file mode 100644 index 00000000000..6c7f3659cb1 --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm @@ -0,0 +1,55 @@ +package Perf::Trace::Context; + +use 5.010000; +use strict; +use warnings; + +require Exporter; + +our @ISA = qw(Exporter); + +our %EXPORT_TAGS = ( 'all' => [ qw( +) ] ); + +our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); + +our @EXPORT = qw( + common_pc common_flags common_lock_depth +); + +our $VERSION = '0.01'; + +require XSLoader; +XSLoader::load('Perf::Trace::Context', $VERSION); + +1; +__END__ +=head1 NAME + +Perf::Trace::Context - Perl extension for accessing functions in perf. + +=head1 SYNOPSIS + + use Perf::Trace::Context; + +=head1 SEE ALSO + +Perf (trace) documentation + +=head1 AUTHOR + +Tom Zanussi, Etzanussi@gmail.com + +=head1 COPYRIGHT AND LICENSE + +Copyright (C) 2009 by Tom Zanussi + +This library is free software; you can redistribute it and/or modify +it under the same terms as Perl itself, either Perl version 5.10.0 or, +at your option, any later version of Perl 5 you may have available. + +Alternatively, this software may be distributed under the terms of the +GNU General Public License ("GPL") version 2 as published by the Free +Software Foundation. + +=cut diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm index fd250fb7be1..9df376a9f62 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm +++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm @@ -16,10 +16,45 @@ our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } ); our @EXPORT = qw( define_flag_field define_flag_value flag_str dump_flag_fields define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields +trace_flag_str ); our $VERSION = '0.01'; +my %trace_flags = (0x00 => "NONE", + 0x01 => "IRQS_OFF", + 0x02 => "IRQS_NOSUPPORT", + 0x04 => "NEED_RESCHED", + 0x08 => "HARDIRQ", + 0x10 => "SOFTIRQ"); + +sub trace_flag_str +{ + my ($value) = @_; + + my $string; + + my $print_delim = 0; + + foreach my $idx (sort {$a <=> $b} keys %trace_flags) { + if (!$value && !$idx) { + $string .= "NONE"; + last; + } + + if ($idx && ($value & $idx) == $idx) { + if ($print_delim) { + $string .= " | "; + } + $string .= "$trace_flags{$idx}"; + $print_delim = 1; + $value &= ~$idx; + } + } + + return $string; +} + my %flag_fields; my %symbolic_fields; diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/typemap b/tools/perf/scripts/perl/Perf-Trace-Util/typemap new file mode 100644 index 00000000000..840836804aa --- /dev/null +++ b/tools/perf/scripts/perl/Perf-Trace-Util/typemap @@ -0,0 +1 @@ +struct scripting_context * T_PTR diff --git a/tools/perf/scripts/perl/check-perf-trace.pl b/tools/perf/scripts/perl/check-perf-trace.pl new file mode 100644 index 00000000000..4e7dc0a407a --- /dev/null +++ b/tools/perf/scripts/perl/check-perf-trace.pl @@ -0,0 +1,106 @@ +# perf trace event handlers, generated by perf trace -g perl +# (c) 2009, Tom Zanussi +# Licensed under the terms of the GNU GPL License version 2 + +# This script tests basic functionality such as flag and symbol +# strings, common_xxx() calls back into perf, begin, end, unhandled +# events, etc. Basically, if this script runs successfully and +# displays expected results, perl scripting support should be ok. + +use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; +use lib "./Perf-Trace-Util/lib"; +use Perf::Trace::Core; +use Perf::Trace::Context; +use Perf::Trace::Util; + +sub trace_begin +{ + print "trace_begin\n"; +} + +sub trace_end +{ + print "trace_end\n"; + + print_unhandled(); +} + +sub irq::softirq_entry +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $vec) = @_; + + print_header($event_name, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm); + + print_uncommon($context); + + printf("vec=%s\n", + symbol_str("irq::softirq_entry", "vec", $vec)); +} + +sub kmem::kmalloc +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm, + $call_site, $ptr, $bytes_req, $bytes_alloc, + $gfp_flags) = @_; + + print_header($event_name, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm); + + print_uncommon($context); + + printf("call_site=%p, ptr=%p, bytes_req=%u, bytes_alloc=%u, ". + "gfp_flags=%s\n", + $call_site, $ptr, $bytes_req, $bytes_alloc, + + flag_str("kmem::kmalloc", "gfp_flags", $gfp_flags)); +} + +# print trace fields not included in handler args +sub print_uncommon +{ + my ($context) = @_; + + printf("common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, ", + common_pc($context), trace_flag_str(common_flags($context)), + common_lock_depth($context)); + +} + +my %unhandled; + +sub print_unhandled +{ + if ((scalar keys %unhandled) == 0) { + return; + } + + print "\nunhandled events:\n\n"; + + printf("%-40s %10s\n", "event", "count"); + printf("%-40s %10s\n", "----------------------------------------", + "-----------"); + + foreach my $event_name (keys %unhandled) { + printf("%-40s %10d\n", $event_name, $unhandled{$event_name}); + } +} + +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, + $common_pid, $common_comm) = @_; + + $unhandled{$event_name}++; +} + +sub print_header +{ + my ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_; + + printf("%-20s %5u %05u.%09u %8u %-20s ", + $event_name, $cpu, $secs, $nsecs, $pid, $comm); +} diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 1f16495e559..0302405aa2c 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1982,7 +1982,7 @@ int trace_parse_common_pid(void *data) "common_pid"); } -static int parse_common_pc(void *data) +int parse_common_pc(void *data) { static int pc_offset; static int pc_size; @@ -1991,7 +1991,7 @@ static int parse_common_pc(void *data) "common_preempt_count"); } -static int parse_common_flags(void *data) +int parse_common_flags(void *data) { static int flags_offset; static int flags_size; @@ -2000,7 +2000,7 @@ static int parse_common_flags(void *data) "common_flags"); } -static int parse_common_lock_depth(void *data) +int parse_common_lock_depth(void *data) { static int ld_offset; static int ld_size; diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c index c56b08d704d..d179adebc54 100644 --- a/tools/perf/util/trace-event-perl.c +++ b/tools/perf/util/trace-event-perl.c @@ -30,6 +30,21 @@ #include "trace-event.h" #include "trace-event-perl.h" +void xs_init(pTHX); + +void boot_Perf__Trace__Context(pTHX_ CV *cv); +void boot_DynaLoader(pTHX_ CV *cv); + +void xs_init(pTHX) +{ + const char *file = __FILE__; + dXSUB_SYS; + + newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, + file); + newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); +} + INTERP my_perl; #define FTRACE_MAX_EVENT \ @@ -227,6 +242,33 @@ static inline struct event *find_cache_event(int type) return event; } +int get_common_pc(struct scripting_context *context) +{ + int pc; + + pc = parse_common_pc(context->event_data); + + return pc; +} + +int get_common_flags(struct scripting_context *context) +{ + int flags; + + flags = parse_common_flags(context->event_data); + + return flags; +} + +int get_common_lock_depth(struct scripting_context *context) +{ + int lock_depth; + + lock_depth = parse_common_lock_depth(context->event_data); + + return lock_depth; +} + static void perl_process_event(int cpu, void *data, int size __attribute((unused)), unsigned long long nsecs, char *comm) @@ -290,6 +332,7 @@ static void perl_process_event(int cpu, void *data, } PUTBACK; + if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { @@ -328,7 +371,8 @@ static int perl_start_script(const char *script) my_perl = perl_alloc(); perl_construct(my_perl); - if (perl_parse(my_perl, NULL, 2, (char **)command_line, (char **)NULL)) + if (perl_parse(my_perl, xs_init, 2, (char **)command_line, + (char **)NULL)) return -1; perl_run(my_perl); diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h index 6c94fa93013..666a864f5dd 100644 --- a/tools/perf/util/trace-event-perl.h +++ b/tools/perf/util/trace-event-perl.h @@ -29,6 +29,11 @@ typedef int INTERP; #define perl_run(a) (0) #define perl_destruct(a) (0) #define perl_free(a) (0) +#define pTHX void +#define CV void +#define dXSUB_SYS +#define pTHX_ +static inline void newXS(const char *a, void *b, const char *c) {} #else #include #include @@ -39,4 +44,8 @@ struct scripting_context { void *event_data; }; +int get_common_pc(struct scripting_context *context); +int get_common_flags(struct scripting_context *context); +int get_common_lock_depth(struct scripting_context *context); + #endif /* __PERF_TRACE_EVENT_PERL_H */ diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index b1e58d3d947..81698d5e650 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -246,6 +246,9 @@ extern int latency_format; int parse_header_page(char *buf, unsigned long size); int trace_parse_common_type(void *data); int trace_parse_common_pid(void *data); +int parse_common_pc(void *data); +int parse_common_flags(void *data); +int parse_common_lock_depth(void *data); struct event *trace_find_event(int id); struct event *trace_find_next_event(struct event *event); unsigned long long read_size(void *ptr, int size); From 89fbf0b8a021cbf60abeacfb6b538e97c83afada Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:51 -0600 Subject: [PATCH 437/470] perf trace: Add Documentation for perf trace Perl support Adds perf-trace-perl Documentation and a link to it from the perf-trace page. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-7-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/perf-trace-perl.txt | 219 +++++++++++++++++++ tools/perf/Documentation/perf-trace.txt | 11 +- 2 files changed, 229 insertions(+), 1 deletion(-) create mode 100644 tools/perf/Documentation/perf-trace-perl.txt diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt new file mode 100644 index 00000000000..c5f55f43909 --- /dev/null +++ b/tools/perf/Documentation/perf-trace-perl.txt @@ -0,0 +1,219 @@ +perf-trace-perl(1) +================== + +NAME +---- +perf-trace-perl - Process trace data with a Perl script + +SYNOPSIS +-------- +[verse] +'perf trace' [-s [lang]:script[.ext] ] + +DESCRIPTION +----------- + +This perf trace option is used to process perf trace data using perf's +built-in Perl interpreter. It reads and processes the input file and +displays the results of the trace analysis implemented in the given +Perl script, if any. + +STARTER SCRIPTS +--------------- + +You can avoid reading the rest of this document by running 'perf trace +-g perl' in the same directory as an existing perf.data trace file. +That will generate a starter script containing a handler for each of +the event types in the trace file; it simply prints every available +field for each event in the trace file. + +You can also look at the existing scripts in +~/libexec/perf-core/scripts/perl for typical examples showing how to +do basic things like aggregate event data, print results, etc. Also, +the check-perf-trace.pl script, while not interesting for its results, +attempts to exercise all of the main scripting features. + +EVENT HANDLERS +-------------- + +When perf trace is invoked using a trace script, a user-defined +'handler function' is called for each event in the trace. If there's +no handler function defined for a given event type, the event is +ignored (or passed to a 'trace_handled' function, see below) and the +next event is processed. + +Most of the event's field values are passed as arguments to the +handler function; some of the less common ones aren't - those are +available as calls back into the perf executable (see below). + +As an example, the following perf record command can be used to record +all sched_wakeup events in the system: + + # perf record -c 1 -f -a -M -R -e sched:sched_wakeup + +Traces meant to be processed using a script should be recorded with +the above options: -c 1 says to sample every event, -a to enable +system-wide collection, -M to multiplex the output, and -R to collect +raw samples. + +The format file for the sched_wakep event defines the following fields +(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format): + +---- + format: + field:unsigned short common_type; + field:unsigned char common_flags; + field:unsigned char common_preempt_count; + field:int common_pid; + field:int common_lock_depth; + + field:char comm[TASK_COMM_LEN]; + field:pid_t pid; + field:int prio; + field:int success; + field:int target_cpu; +---- + +The handler function for this event would be defined as: + +---- +sub sched::sched_wakeup +{ + my ($event_name, $context, $common_cpu, $common_secs, + $common_nsecs, $common_pid, $common_comm, + $comm, $pid, $prio, $success, $target_cpu) = @_; +} +---- + +The handler function takes the form subsystem::event_name. + +The $common_* arguments in the handler's argument list are the set of +arguments passed to all event handlers; some of the fields correspond +to the common_* fields in the format file, but some are synthesized, +and some of the common_* fields aren't common enough to to be passed +to every event as arguments but are available as library functions. + +Here's a brief description of each of the invariant event args: + + $event_name the name of the event as text + $context an opaque 'cookie' used in calls back into perf + $common_cpu the cpu the event occurred on + $common_secs the secs portion of the event timestamp + $common_nsecs the nsecs portion of the event timestamp + $common_pid the pid of the current task + $common_comm the name of the current process + +All of the remaining fields in the event's format file have +counterparts as handler function arguments of the same name, as can be +seen in the example above. + +The above provides the basics needed to directly access every field of +every event in a trace, which covers 90% of what you need to know to +write a useful trace script. The sections below cover the rest. + +SCRIPT LAYOUT +------------- + +Every perf trace Perl script should start by setting up a Perl module +search path and 'use'ing a few support modules (see module +descriptions below): + +---- + use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; + use lib "./Perf-Trace-Util/lib"; + use Perf::Trace::Core; + use Perf::Trace::Context; + use Perf::Trace::Util; +---- + +The rest of the script can contain handler functions and support +functions in any order. + +Aside from the event handler functions discussed above, every script +can implement a set of optional functions: + +*trace_begin*, if defined, is called before any event is processed and +gives scripts a chance to do setup tasks: + +---- + sub trace_begin + { + } +---- + +*trace_end*, if defined, is called after all events have been + processed and gives scripts a chance to do end-of-script tasks, such + as display results: + +---- +sub trace_end +{ +} +---- + +*trace_unhandled*, if defined, is called after for any event that + doesn't have a handler explicitly defined for it. The standard set + of common arguments are passed into it: + +---- +sub trace_unhandled +{ + my ($event_name, $context, $common_cpu, $common_secs, + $common_nsecs, $common_pid, $common_comm) = @_; +} +---- + +The remaining sections provide descriptions of each of the available +built-in perf trace Perl modules and their associated functions. + +AVAILABLE MODULES AND FUNCTIONS +------------------------------- + +The following sections describe the functions and variables available +via the various Perf::Trace::* Perl modules. To use the functions and +variables from the given module, add the corresponding 'use +Perf::Trace::XXX' line to your perf trace script. + +Perf::Trace::Core Module +~~~~~~~~~~~~~~~~~~~~~~~~ + +These functions provide some essential functions to user scripts. + +The *flag_str* and *symbol_str* functions provide human-readable +strings for flag and symbolic fields. These correspond to the strings +and values parsed from the 'print fmt' fields of the event format +files: + + flag_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the flag field $field_name of event $event_name + symbol_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the symbolic field $field_name of event $event_name + +Perf::Trace::Context Module +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some of the 'common' fields in the event format file aren't all that +common, but need to be made accessible to user scripts nonetheless. + +Perf::Trace::Context defines a set of functions that can be used to +access this data in the context of the current event. Each of these +functions expects a $context variable, which is the same as the +$context variable passed into every event handler as the second +argument. + + common_pc($context) - returns common_preempt count for the current event + common_flags($context) - returns common_flags for the current event + common_lock_depth($context) - returns common_lock_depth for the current event + +Perf::Trace::Util Module +~~~~~~~~~~~~~~~~~~~~~~~~ + +Various utility functions for use with perf trace: + + nsecs($secs, $nsecs) - returns total nsecs given secs/nsecs pair + nsecs_secs($nsecs) - returns whole secs portion given nsecs + nsecs_nsecs($nsecs) - returns nsecs remainder given nsecs + nsecs_str($nsecs) - returns printable string in the form secs.nsecs + avg($total, $n) - returns average given a sum and a total number of values + +SEE ALSO +-------- +linkperf:perf-trace[1] diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt index 41ed75398ca..07065efa60e 100644 --- a/tools/perf/Documentation/perf-trace.txt +++ b/tools/perf/Documentation/perf-trace.txt @@ -20,6 +20,15 @@ OPTIONS --dump-raw-trace=:: Display verbose dump of the trace data. +-s:: +--script=:: + Process trace data with the given script ([lang]:script[.ext]). + +-g:: +--gen-script=:: + Generate perf-trace.[ext] starter script for given language, + using current perf.data. + SEE ALSO -------- -linkperf:perf-record[1] +linkperf:perf-record[1], linkperf:perf-trace-perl[1] From 1ae4a971250c55e473ca53c78011fcf73809885d Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 25 Nov 2009 01:15:52 -0600 Subject: [PATCH 438/470] perf trace: Add a scripts/perl/bin for perf trace shell scripts To capture the relevant events for a given Perl script and to avoid having to continually remember and type in long command-lines, add a scripts/perl/bin directory containing two simple shell scripts for each Perl script, one for recording and one for processing/display. For example, to record perf data for the rw-by-pid.pl script, run scripts/perl/bin/rw-by-pid-record and to actually run the script and display the output run scripts/perl/bin/rw-by-pid-report. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259133352-23685-8-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/scripts/perl/bin/check-perf-trace-record | 7 +++++++ tools/perf/scripts/perl/bin/check-perf-trace-report | 5 +++++ tools/perf/scripts/perl/bin/rw-by-file-record | 2 ++ tools/perf/scripts/perl/bin/rw-by-file-report | 5 +++++ tools/perf/scripts/perl/bin/rw-by-pid-record | 2 ++ tools/perf/scripts/perl/bin/rw-by-pid-report | 5 +++++ tools/perf/scripts/perl/bin/wakeup-latency-record | 6 ++++++ tools/perf/scripts/perl/bin/wakeup-latency-report | 5 +++++ tools/perf/scripts/perl/bin/workqueue-stats-record | 2 ++ tools/perf/scripts/perl/bin/workqueue-stats-report | 6 ++++++ 10 files changed, 45 insertions(+) create mode 100644 tools/perf/scripts/perl/bin/check-perf-trace-record create mode 100644 tools/perf/scripts/perl/bin/check-perf-trace-report create mode 100644 tools/perf/scripts/perl/bin/rw-by-file-record create mode 100644 tools/perf/scripts/perl/bin/rw-by-file-report create mode 100644 tools/perf/scripts/perl/bin/rw-by-pid-record create mode 100644 tools/perf/scripts/perl/bin/rw-by-pid-report create mode 100644 tools/perf/scripts/perl/bin/wakeup-latency-record create mode 100644 tools/perf/scripts/perl/bin/wakeup-latency-report create mode 100644 tools/perf/scripts/perl/bin/workqueue-stats-record create mode 100644 tools/perf/scripts/perl/bin/workqueue-stats-report diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record new file mode 100644 index 00000000000..c7ec5de2f53 --- /dev/null +++ b/tools/perf/scripts/perl/bin/check-perf-trace-record @@ -0,0 +1,7 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e kmem:kmalloc -e irq:softirq_entry + + + + + diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report new file mode 100644 index 00000000000..89948b01502 --- /dev/null +++ b/tools/perf/scripts/perl/bin/check-perf-trace-report @@ -0,0 +1,5 @@ +#!/bin/bash +perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl + + + diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record new file mode 100644 index 00000000000..b25056ebf96 --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-file-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_enter_write diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report new file mode 100644 index 00000000000..f5dcf9cb5bd --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-file-report @@ -0,0 +1,5 @@ +#!/bin/bash +perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl + + + diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record new file mode 100644 index 00000000000..8903979c5b6 --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-pid-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report new file mode 100644 index 00000000000..cea16f78a3a --- /dev/null +++ b/tools/perf/scripts/perl/bin/rw-by-pid-report @@ -0,0 +1,5 @@ +#!/bin/bash +perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl + + + diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record new file mode 100644 index 00000000000..6abedda911a --- /dev/null +++ b/tools/perf/scripts/perl/bin/wakeup-latency-record @@ -0,0 +1,6 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e sched:sched_switch -e sched:sched_wakeup + + + + diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report new file mode 100644 index 00000000000..85769dc456e --- /dev/null +++ b/tools/perf/scripts/perl/bin/wakeup-latency-report @@ -0,0 +1,5 @@ +#!/bin/bash +perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl + + + diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record new file mode 100644 index 00000000000..fce6637b19b --- /dev/null +++ b/tools/perf/scripts/perl/bin/workqueue-stats-record @@ -0,0 +1,2 @@ +#!/bin/bash +perf record -c 1 -f -a -M -R -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report new file mode 100644 index 00000000000..aa68435be92 --- /dev/null +++ b/tools/perf/scripts/perl/bin/workqueue-stats-report @@ -0,0 +1,6 @@ +#!/bin/bash +perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl + + + + From cf72344d1ad7b33805ef8d65e758b267e6f4cb8d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 28 Nov 2009 10:11:00 +0100 Subject: [PATCH 439/470] perf scripting: Fix build Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- tools/perf/builtin-trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ca8ebf1ec64..abb914aa7be 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -5,6 +5,8 @@ #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" +#include "util/exec_cmd.h" +#include "util/trace-event.h" static char const *script_name; static char const *generate_script_lang; From f8be4231f82ab56a87ce74906671afbe1aa9ec75 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 30 Nov 2009 01:18:46 -0600 Subject: [PATCH 440/470] perf trace/scripting: Silence PERL_EMBED_* backtick errors The backtick shell substitutions for PERL_EMBED_LDOPT/CCOPT make a lot of noise on stderr if Embed.pm isn't installed - this silences them. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259565529-6407-2-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 8ad57b51d64..d62a2d7ff4e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -490,8 +490,8 @@ else LIB_OBJS += util/probe-finder.o endif -PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts` -PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts` +PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null` +PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` ifneq ($(shell sh -c "(echo '\#include '; echo '\#include '; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y) BASIC_CFLAGS += -DNO_LIBPERL From e136323c5a8a7d91d17c5b7b340758bb9dd33739 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 30 Nov 2009 01:18:47 -0600 Subject: [PATCH 441/470] perf trace/scripting: Ignore shadowed variable warning for perf-trace-perl.c The debugging versions of the ENTER and LEAVE internal perl macros, used when embedding perl, define a local block with a my_perl perl variable that shadows a global variable of the same name, which is also the name expected by the embedding API for the embedded interpreter. Since we don't have control over the code generated in this case (it's an externality) and can't get rid of the warning, ignore it. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259565529-6407-3-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d62a2d7ff4e..20cd66362d2 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -872,7 +872,7 @@ util/find_next_bit.o: ../../lib/find_next_bit.c PERF-CFLAGS $(QUIET_CC)$(CC) -o util/find_next_bit.o -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< util/trace-event-perl.o: util/trace-event-perl.c PERF-CFLAGS - $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter $< + $(QUIET_CC)$(CC) -o util/trace-event-perl.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $< scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c PERF-CFLAGS $(QUIET_CC)$(CC) -o scripts/perl/Perf-Trace-Util/Context.o -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $< From 61381de0504181368672a83d2e14c38dbaf3c136 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 30 Nov 2009 01:18:48 -0600 Subject: [PATCH 442/470] perf trace/scripting: Fix Perl common_* access functions The common_* functions (e.g. common_pc(), etc) are exported as common_* but named get_common_*, resulting in unresolved subroutine errors when executing scripts. Make the internal and external names match. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259565529-6407-4-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- .../scripts/perl/Perf-Trace-Util/Context.c | 30 +++++++++---------- .../scripts/perl/Perf-Trace-Util/Context.xs | 6 ++-- tools/perf/util/trace-event-perl.c | 6 ++-- tools/perf/util/trace-event-perl.h | 6 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c index 3ba3ffc5416..af78d9a52a7 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c @@ -39,8 +39,8 @@ #line 41 "Context.c" -XS(XS_Perf__Trace__Context_get_common_pc); /* prototype to pass -Wmissing-prototypes */ -XS(XS_Perf__Trace__Context_get_common_pc) +XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_common_pc) { #ifdef dVAR dVAR; dXSARGS; @@ -48,22 +48,22 @@ XS(XS_Perf__Trace__Context_get_common_pc) dXSARGS; #endif if (items != 1) - Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_pc", "context"); + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; - RETVAL = get_common_pc(context); + RETVAL = common_pc(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } -XS(XS_Perf__Trace__Context_get_common_flags); /* prototype to pass -Wmissing-prototypes */ -XS(XS_Perf__Trace__Context_get_common_flags) +XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_common_flags) { #ifdef dVAR dVAR; dXSARGS; @@ -71,22 +71,22 @@ XS(XS_Perf__Trace__Context_get_common_flags) dXSARGS; #endif if (items != 1) - Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_flags", "context"); + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; - RETVAL = get_common_flags(context); + RETVAL = common_flags(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } -XS(XS_Perf__Trace__Context_get_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ -XS(XS_Perf__Trace__Context_get_common_lock_depth) +XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ +XS(XS_Perf__Trace__Context_common_lock_depth) { #ifdef dVAR dVAR; dXSARGS; @@ -94,14 +94,14 @@ XS(XS_Perf__Trace__Context_get_common_lock_depth) dXSARGS; #endif if (items != 1) - Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::get_common_lock_depth", "context"); + Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; - RETVAL = get_common_lock_depth(context); + RETVAL = common_lock_depth(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); @@ -124,9 +124,9 @@ XS(boot_Perf__Trace__Context) PERL_UNUSED_VAR(items); /* -W */ XS_VERSION_BOOTCHECK ; - newXSproto("Perf::Trace::Context::get_common_pc", XS_Perf__Trace__Context_get_common_pc, file, "$"); - newXSproto("Perf::Trace::Context::get_common_flags", XS_Perf__Trace__Context_get_common_flags, file, "$"); - newXSproto("Perf::Trace::Context::get_common_lock_depth", XS_Perf__Trace__Context_get_common_lock_depth, file, "$"); + newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$"); + newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$"); + newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$"); if (PL_unitcheckav) call_list(PL_scopestack_ix, PL_unitcheckav); XSRETURN_YES; diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs index 24facb3696d..fb78006c165 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs @@ -28,14 +28,14 @@ MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context PROTOTYPES: ENABLE int -get_common_pc(context) +common_pc(context) struct scripting_context * context int -get_common_flags(context) +common_flags(context) struct scripting_context * context int -get_common_lock_depth(context) +common_lock_depth(context) struct scripting_context * context diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c index d179adebc54..2e1cc3c11c7 100644 --- a/tools/perf/util/trace-event-perl.c +++ b/tools/perf/util/trace-event-perl.c @@ -242,7 +242,7 @@ static inline struct event *find_cache_event(int type) return event; } -int get_common_pc(struct scripting_context *context) +int common_pc(struct scripting_context *context) { int pc; @@ -251,7 +251,7 @@ int get_common_pc(struct scripting_context *context) return pc; } -int get_common_flags(struct scripting_context *context) +int common_flags(struct scripting_context *context) { int flags; @@ -260,7 +260,7 @@ int get_common_flags(struct scripting_context *context) return flags; } -int get_common_lock_depth(struct scripting_context *context) +int common_lock_depth(struct scripting_context *context) { int lock_depth; diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h index 666a864f5dd..8fe0d866fe1 100644 --- a/tools/perf/util/trace-event-perl.h +++ b/tools/perf/util/trace-event-perl.h @@ -44,8 +44,8 @@ struct scripting_context { void *event_data; }; -int get_common_pc(struct scripting_context *context); -int get_common_flags(struct scripting_context *context); -int get_common_lock_depth(struct scripting_context *context); +int common_pc(struct scripting_context *context); +int common_flags(struct scripting_context *context); +int common_lock_depth(struct scripting_context *context); #endif /* __PERF_TRACE_EVENT_PERL_H */ From 8ea339adc0a48236008e59dd21564d71c37b331c Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 30 Nov 2009 01:18:49 -0600 Subject: [PATCH 443/470] perf trace/scripting: Add Fedora libperl install note to doc Fedora needs perl-ExtUtils-Embed for Perl scripting, which also brings along libperl-devel; note this info for the convenience of Fedora users. Signed-off-by: Tom Zanussi Cc: fweisbec@gmail.com Cc: rostedt@goodmis.org Cc: anton@samba.org Cc: hch@infradead.org LKML-Reference: <1259565529-6407-5-git-send-email-tzanussi@gmail.com> Signed-off-by: Ingo Molnar --- tools/perf/scripts/perl/Perf-Trace-Util/README | 4 ++-- tools/perf/util/trace-event-perl.c | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README index adb99aa3a7b..9a970763079 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/README +++ b/tools/perf/scripts/perl/Perf-Trace-Util/README @@ -36,8 +36,8 @@ INSTALLATION Building perf with perf trace Perl scripting should install this module in the right place. -You should make sure libperl is installed first e.g. apt-get install -libperl-dev. +You should make sure libperl and ExtUtils/Embed.pm are installed first +e.g. apt-get install libperl-dev or yum install perl-ExtUtils-Embed. DEPENDENCIES diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c index 2e1cc3c11c7..51e833fd58c 100644 --- a/tools/perf/util/trace-event-perl.c +++ b/tools/perf/util/trace-event-perl.c @@ -577,7 +577,9 @@ struct scripting_ops perl_scripting_ops = { void setup_perl_scripting(void) { fprintf(stderr, "Perl scripting not supported." - " Install libperl-dev[el] and rebuild perf to get it.\n"); + " Install libperl and rebuild perf to enable it. e.g. " + "apt-get install libperl-dev (ubuntu), yum install " + "perl-ExtUtils-Embed (Fedora), etc.\n"); } #else void setup_perl_scripting(void) From bab81b624e970f1138535a465ad2b26b6bb0dd6c Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 1 Dec 2009 14:04:49 +0800 Subject: [PATCH 444/470] perf annotate: Fix perf data parsing perf-annotate doesn't parse perf.data correctly in that it doesn't read perf header. Fix this by using mmap_dispatch_perf_file(). Before: TOTAL events: 17565 MMAP events: 3221 LOST events: 10 COMM events: 235 EXIT events: 2 THROTTLE events: 1 UNTHROTTLE events: 2 FORK events: 10 READ events: 1 SAMPLE events: 14083 After: TOTAL events: 17290 MMAP events: 3203 LOST events: 0 COMM events: 234 EXIT events: 1 THROTTLE events: 0 UNTHROTTLE events: 0 FORK events: 0 READ events: 0 SAMPLE events: 13852 Signed-off-by: Li Zefan Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Arjan van de Ven LKML-Reference: <4B14B201.9030708@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 143 +++++----------------------------- tools/perf/util/event.c | 3 +- 2 files changed, 19 insertions(+), 127 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 7f85c6e159a..0bf2e8f9af5 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -25,19 +25,16 @@ #include "util/thread.h" #include "util/sort.h" #include "util/hist.h" +#include "util/data_map.h" static char const *input_name = "perf.data"; static int force; -static int input; static int full_paths; static int print_line; -static unsigned long page_size; -static unsigned long mmap_window = 32; - struct sym_hist { u64 sum; u64 ip[0]; @@ -156,35 +153,6 @@ static int process_sample_event(event_t *event) return 0; } -static int event__process(event_t *self) -{ - switch (self->header.type) { - case PERF_RECORD_SAMPLE: - return process_sample_event(self); - - case PERF_RECORD_MMAP: - return event__process_mmap(self); - - case PERF_RECORD_COMM: - return event__process_comm(self); - - case PERF_RECORD_FORK: - return event__process_task(self); - /* - * We dont process them right now but they are fine: - */ - - case PERF_RECORD_THROTTLE: - case PERF_RECORD_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - static int parse_line(FILE *file, struct hist_entry *he, u64 len) { struct symbol *sym = he->sym; @@ -485,99 +453,26 @@ static void find_annotations(void) } } +static struct perf_file_handler file_handler = { + .process_sample_event = process_sample_event, + .process_mmap_event = event__process_mmap, + .process_comm_event = event__process_comm, + .process_fork_event = event__process_task, +}; + static int __cmd_annotate(void) { - int ret, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head = 0; - struct stat input_stat; - event_t *event; - uint32_t size; - char *buf; + struct perf_header *header; + struct thread *idle; + int ret; - register_idle_thread(); - - input = open(input_name, O_RDONLY); - if (input < 0) { - perror("failed to open file"); - exit(-1); - } - - ret = fstat(input, &input_stat); - if (ret < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { - fprintf(stderr, "file: %s not owned by current user or root\n", input_name); - exit(-1); - } - - if (!input_stat.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - unsigned long shift = page_size * (head / page_size); - int munmap_ret; - - munmap_ret = munmap(buf, page_size * mmap_window); - assert(munmap_ret == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - dump_printf("%p [%p]: event: %d\n", - (void *)(offset + head), - (void *)(long)event->header.size, - event->header.type); - - if (!size || event__process(event) < 0) { - - dump_printf("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head < (unsigned long)input_stat.st_size) - goto more; - - rc = EXIT_SUCCESS; - close(input); + idle = register_idle_thread(); + register_perf_file_handler(&file_handler); + ret = mmap_dispatch_perf_file(&header, input_name, 0, 0, + &event__cwdlen, &event__cwd); + if (ret) + return ret; if (dump_trace) { event__print_totals(); @@ -595,7 +490,7 @@ more: find_annotations(); - return rc; + return ret; } static const char * const annotate_usage[] = { @@ -644,8 +539,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) if (symbol__init(&symbol_conf) < 0) return -1; - page_size = getpagesize(); - argc = parse_options(argc, argv, options, annotate_usage, 0); setup_sorting(); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 233d7ad9bd7..414b89d1bde 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -186,8 +186,7 @@ int event__process_comm(event_t *self) { struct thread *thread = threads__findnew(self->comm.pid); - dump_printf("PERF_RECORD_COMM: %s:%d\n", - self->comm.comm, self->comm.pid); + dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); if (thread == NULL || thread__set_comm(thread, self->comm.comm)) { dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); From 5cbd08056142dcb2aea0dca7261afcb810a63c55 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 1 Dec 2009 14:05:16 +0800 Subject: [PATCH 445/470] perf timechart: Remove open-coded event parsing code Convert builtin-timechart.c to mmap_dispatch_perf_file() + perf_file_handler. Signed-off-by: Li Zefan Acked-by: Arjan van de Ven Cc: Frederic Weisbecker Cc: Arnaldo Carvalho de Melo Cc: Peter Zijlstra LKML-Reference: <4B14B21C.2040406@cn.fujitsu.com> [ v2: cleaned up the printout, fixed a whitespace detail ] Signed-off-by: Ingo Molnar --- tools/perf/builtin-timechart.c | 176 +++++++-------------------------- 1 file changed, 33 insertions(+), 143 deletions(-) diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index dd4d82ac7aa..cb58b6605fc 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -29,14 +29,14 @@ #include "util/header.h" #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/event.h" +#include "util/data_map.h" #include "util/svghelper.h" static char const *input_name = "perf.data"; static char const *output_name = "output.svg"; -static unsigned long page_size; -static unsigned long mmap_window = 32; static u64 sample_type; static unsigned int numcpus; @@ -49,8 +49,6 @@ static u64 first_time, last_time; static int power_only; -static struct perf_header *header; - struct per_pid; struct per_pidcomm; @@ -156,9 +154,9 @@ struct sample_wrapper *all_samples; struct process_filter; struct process_filter { - char *name; - int pid; - struct process_filter *next; + char *name; + int pid; + struct process_filter *next; }; static struct process_filter *process_filter; @@ -1045,36 +1043,6 @@ static void write_svg_file(const char *filename) svg_close(); } -static int -process_event(event_t *event) -{ - - switch (event->header.type) { - - case PERF_RECORD_COMM: - return process_comm_event(event); - case PERF_RECORD_FORK: - return process_fork_event(event); - case PERF_RECORD_EXIT: - return process_exit_event(event); - case PERF_RECORD_SAMPLE: - return queue_sample_event(event); - - /* - * We dont process them right now but they are fine: - */ - case PERF_RECORD_MMAP: - case PERF_RECORD_THROTTLE: - case PERF_RECORD_UNTHROTTLE: - return 0; - - default: - return -1; - } - - return 0; -} - static void process_samples(void) { struct sample_wrapper *cursor; @@ -1090,114 +1058,38 @@ static void process_samples(void) } } +static int sample_type_check(u64 type) +{ + sample_type = type; + + if (!(sample_type & PERF_SAMPLE_RAW)) { + fprintf(stderr, "No trace samples found in the file.\n" + "Have you used 'perf timechart record' to record it?\n"); + return -1; + } + + return 0; +} + +static struct perf_file_handler file_handler = { + .process_comm_event = process_comm_event, + .process_fork_event = process_fork_event, + .process_exit_event = process_exit_event, + .process_sample_event = queue_sample_event, + .sample_type_check = sample_type_check, +}; static int __cmd_timechart(void) { - int err, rc = EXIT_FAILURE; - unsigned long offset = 0; - unsigned long head, shift; - struct stat statbuf; - event_t *event; - uint32_t size; - char *buf; - int input; + struct perf_header *header; + int ret; - input = open(input_name, O_RDONLY); - if (input < 0) { - fprintf(stderr, " failed to open file: %s", input_name); - if (!strcmp(input_name, "perf.data")) - fprintf(stderr, " (try 'perf record' first)"); - fprintf(stderr, "\n"); - exit(-1); - } - - err = fstat(input, &statbuf); - if (err < 0) { - perror("failed to stat file"); - exit(-1); - } - - if (!statbuf.st_size) { - fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); - } - - header = perf_header__new(); - if (header == NULL) - return -ENOMEM; - - err = perf_header__read(header, input); - if (err < 0) { - perf_header__delete(header); - return err; - } - - head = header->data_offset; - - sample_type = perf_header__sample_type(header); - - shift = page_size * (head / page_size); - offset += shift; - head -= shift; - -remap: - buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, - MAP_SHARED, input, offset); - if (buf == MAP_FAILED) { - perror("failed to mmap file"); - exit(-1); - } - -more: - event = (event_t *)(buf + head); - - size = event->header.size; - if (!size) - size = 8; - - if (head + event->header.size >= page_size * mmap_window) { - int ret2; - - shift = page_size * (head / page_size); - - ret2 = munmap(buf, page_size * mmap_window); - assert(ret2 == 0); - - offset += shift; - head -= shift; - goto remap; - } - - size = event->header.size; - - if (!size || process_event(event) < 0) { - pr_warning("%p [%p]: skipping unknown header type: %d\n", - (void *)(offset + head), - (void *)(long)(event->header.size), - event->header.type); - /* - * assume we lost track of the stream, check alignment, and - * increment a single u64 in the hope to catch on again 'soon'. - */ - - if (unlikely(head & 7)) - head &= ~7ULL; - - size = 8; - } - - head += size; - - if (offset + head >= header->data_offset + header->data_size) - goto done; - - if (offset + head < (unsigned long)statbuf.st_size) - goto more; - -done: - rc = EXIT_SUCCESS; - close(input); + register_perf_file_handler(&file_handler); + ret = mmap_dispatch_perf_file(&header, input_name, 0, 0, + &event__cwdlen, &event__cwd); + if (ret) + return EXIT_FAILURE; process_samples(); @@ -1210,7 +1102,7 @@ done: pr_info("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); - return rc; + return EXIT_SUCCESS; } static const char * const timechart_usage[] = { @@ -1277,8 +1169,6 @@ int cmd_timechart(int argc, const char **argv, const char *prefix __used) { symbol__init(0); - page_size = getpagesize(); - argc = parse_options(argc, argv, options, timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION); From ba8665d7dd95eb6093ee06f8f624b6acb1e73206 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:19:20 -0500 Subject: [PATCH 446/470] trace_kprobes: Fix a memory leak bug and check kstrdup() return value Fix a memory leak case in create_trace_probe(). When an argument is too long (> MAX_ARGSTR_LEN), it just jumps to error path. In that case tp->args[i].name is not released. This also fixes a bug to check kstrdup()'s return value. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001919.10235.56455.stgit@harusame> Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 72d0c65c867..aff5f80b59b 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -483,7 +483,8 @@ static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return) return ret; } -static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) +/* Recursive argument parser */ +static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) { int ret = 0; unsigned long param; @@ -543,7 +544,7 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) if (!id) return -ENOMEM; id->offset = offset; - ret = parse_probe_arg(arg, &id->orig, is_return); + ret = __parse_probe_arg(arg, &id->orig, is_return); if (ret) kfree(id); else { @@ -560,6 +561,16 @@ static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) return ret; } +/* String length checking wrapper */ +static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return) +{ + if (strlen(arg) > MAX_ARGSTR_LEN) { + pr_info("Argument is too long.: %s\n", arg); + return -ENOSPC; + } + return __parse_probe_arg(arg, ff, is_return); +} + /* Return 1 if name is reserved or already used by another argument */ static int conflict_field_name(const char *name, struct probe_arg *args, int narg) @@ -698,13 +709,14 @@ static int create_trace_probe(int argc, char **argv) } tp->args[i].name = kstrdup(argv[i], GFP_KERNEL); - - /* Parse fetch argument */ - if (strlen(arg) > MAX_ARGSTR_LEN) { - pr_info("Argument%d(%s) is too long.\n", i, arg); - ret = -ENOSPC; + if (!tp->args[i].name) { + pr_info("Failed to allocate argument%d name '%s'.\n", + i, argv[i]); + ret = -ENOMEM; goto error; } + + /* Parse fetch argument */ ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return); if (ret) { pr_info("Parse error at argument%d. (%d)\n", i, ret); From f41b1e43c41e99c39a2222578a7806032c045c79 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:19:27 -0500 Subject: [PATCH 447/470] perf probe: Change a debugging message from pr_info to pr_debug Change annoying debug-info using notice from pr_info() to pr_debug(), since the message always printed when user adds a probe point which requires debug-info. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001927.10235.63645.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index a2f6daf01ec..4e418afd670 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -351,7 +351,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) #ifdef NO_LIBDWARF semantic_error("Debuginfo-analysis is not supported"); #else /* !NO_LIBDWARF */ - pr_info("Some probes require debuginfo.\n"); + pr_debug("Some probes require debuginfo.\n"); if (session.vmlinux) fd = open(session.vmlinux, O_RDONLY); From 57d250df7deb3e1742fbf3cc3230119731109552 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:19:34 -0500 Subject: [PATCH 448/470] perf probe: Add probe-finder.h without libdwarf Add probe-finder.h as LIB_H without libdwarf, because that header is included even if no libdwarf. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001934.10235.44656.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index f1537a94a05..76e4b04d408 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -369,6 +369,7 @@ LIB_H += util/sort.h LIB_H += util/hist.h LIB_H += util/thread.h LIB_H += util/data_map.h +LIB_H += util/probe-finder.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -485,7 +486,6 @@ ifneq ($(shell sh -c "(echo '\#include '; echo '\#include Date: Mon, 30 Nov 2009 19:19:43 -0500 Subject: [PATCH 449/470] perf probe: Fix argv array size in probe parser Since the syntax has been changed, probe definition needs parameters less than MAX_PROBE_ARGS + 1 (probe-point + arguments). Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001943.10235.80367.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 4e418afd670..510fdd4e5d3 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -151,7 +151,7 @@ static void parse_probe_point(char *arg, struct probe_point *pp) /* Parse an event definition. Note that any error must die. */ static void parse_probe_event(const char *str) { - char *argv[MAX_PROBE_ARGS + 2]; /* Event + probe + args */ + char *argv[MAX_PROBE_ARGS + 1]; /* probe + args */ int argc, i; struct probe_point *pp = &session.probes[session.nr_probe]; @@ -169,6 +169,9 @@ static void parse_probe_event(const char *str) /* Add an argument */ if (*str != '\0') { const char *s = str; + /* Check the limit number of arguments */ + if (argc == MAX_PROBE_ARGS + 1) + semantic_error("Too many arguments"); /* Skip the argument */ while (!isspace(*str) && *str != '\0') @@ -178,9 +181,9 @@ static void parse_probe_event(const char *str) argv[argc] = strndup(s, str - s); if (argv[argc] == NULL) die("strndup"); - if (++argc == MAX_PROBE_ARGS) - semantic_error("Too many arguments"); - pr_debug("argv[%d]=%s\n", argc, argv[argc - 1]); + pr_debug("argv[%d]=%s\n", argc, argv[argc]); + argc++; + } } while (*str != '\0'); if (!argc) From 934b1f5fd0c9a2ddde5a4487695c126243d9a42b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:19:51 -0500 Subject: [PATCH 450/470] perf probe: Fix probe array index for multiple probe points Fix the index of formatted probe array for multiple probe points, which should be probes[i] instead of probes[0]. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001950.10235.54781.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 510fdd4e5d3..5f47e624e57 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -428,7 +428,7 @@ end_dwarf: pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP, pp->function, pp->offset, i, - pp->probes[0]); + pp->probes[i]); write_new_event(fd, buf); } } From 50656eec82684d03add0f4f4b4875a20bd8f9755 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:19:58 -0500 Subject: [PATCH 451/470] perf probe: Move probe event utility functions to probe-event.c Split probe event (kprobe-events and perf probe events) utility functions from builtin-probe.c to probe-event.c. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201001958.10235.90243.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 + tools/perf/builtin-probe.c | 227 ++-------------------------- tools/perf/util/probe-event.c | 273 ++++++++++++++++++++++++++++++++++ tools/perf/util/probe-event.h | 10 ++ 4 files changed, 294 insertions(+), 218 deletions(-) create mode 100644 tools/perf/util/probe-event.c create mode 100644 tools/perf/util/probe-event.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 76e4b04d408..f8537cf812c 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -370,6 +370,7 @@ LIB_H += util/hist.h LIB_H += util/thread.h LIB_H += util/data_map.h LIB_H += util/probe-finder.h +LIB_H += util/probe-event.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -412,6 +413,7 @@ LIB_OBJS += util/svghelper.o LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/data_map.o +LIB_OBJS += util/probe-event.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 5f47e624e57..bf20df2e816 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -40,6 +40,7 @@ #include "util/parse-options.h" #include "util/parse-events.h" /* For debugfs_path */ #include "util/probe-finder.h" +#include "util/probe-event.h" /* Default vmlinux search paths */ #define NR_SEARCH_PATH 3 @@ -51,8 +52,6 @@ const char *default_search_path[NR_SEARCH_PATH] = { #define MAX_PATH_LEN 256 #define MAX_PROBES 128 -#define MAX_PROBE_ARGS 128 -#define PERFPROBE_GROUP "probe" /* Session management structure */ static struct { @@ -63,155 +62,17 @@ static struct { struct probe_point probes[MAX_PROBES]; } session; -#define semantic_error(msg ...) die("Semantic error :" msg) - -/* Parse probe point. Return 1 if return probe */ -static void parse_probe_point(char *arg, struct probe_point *pp) -{ - char *ptr, *tmp; - char c, nc = 0; - /* - * - * perf probe SRC:LN - * perf probe FUNC[+OFFS|%return][@SRC] - */ - - ptr = strpbrk(arg, ":+@%"); - if (ptr) { - nc = *ptr; - *ptr++ = '\0'; - } - - /* Check arg is function or file and copy it */ - if (strchr(arg, '.')) /* File */ - pp->file = strdup(arg); - else /* Function */ - pp->function = strdup(arg); - DIE_IF(pp->file == NULL && pp->function == NULL); - - /* Parse other options */ - while (ptr) { - arg = ptr; - c = nc; - ptr = strpbrk(arg, ":+@%"); - if (ptr) { - nc = *ptr; - *ptr++ = '\0'; - } - switch (c) { - case ':': /* Line number */ - pp->line = strtoul(arg, &tmp, 0); - if (*tmp != '\0') - semantic_error("There is non-digit charactor" - " in line number."); - break; - case '+': /* Byte offset from a symbol */ - pp->offset = strtoul(arg, &tmp, 0); - if (*tmp != '\0') - semantic_error("There is non-digit charactor" - " in offset."); - break; - case '@': /* File name */ - if (pp->file) - semantic_error("SRC@SRC is not allowed."); - pp->file = strdup(arg); - DIE_IF(pp->file == NULL); - if (ptr) - semantic_error("@SRC must be the last " - "option."); - break; - case '%': /* Probe places */ - if (strcmp(arg, "return") == 0) { - pp->retprobe = 1; - } else /* Others not supported yet */ - semantic_error("%%%s is not supported.", arg); - break; - default: - DIE_IF("Program has a bug."); - break; - } - } - - /* Exclusion check */ - if (pp->line && pp->offset) - semantic_error("Offset can't be used with line number."); - if (!pp->line && pp->file && !pp->function) - semantic_error("File always requires line number."); - if (pp->offset && !pp->function) - semantic_error("Offset requires an entry function."); - if (pp->retprobe && !pp->function) - semantic_error("Return probe requires an entry function."); - if ((pp->offset || pp->line) && pp->retprobe) - semantic_error("Offset/Line can't be used with return probe."); - - pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", - pp->function, pp->file, pp->line, pp->offset, pp->retprobe); -} - /* Parse an event definition. Note that any error must die. */ static void parse_probe_event(const char *str) { - char *argv[MAX_PROBE_ARGS + 1]; /* probe + args */ - int argc, i; struct probe_point *pp = &session.probes[session.nr_probe]; pr_debug("probe-definition(%d): %s\n", session.nr_probe, str); if (++session.nr_probe == MAX_PROBES) - semantic_error("Too many probes"); + die("Too many probes (> %d) are specified.", MAX_PROBES); - /* Separate arguments, similar to argv_split */ - argc = 0; - do { - /* Skip separators */ - while (isspace(*str)) - str++; - - /* Add an argument */ - if (*str != '\0') { - const char *s = str; - /* Check the limit number of arguments */ - if (argc == MAX_PROBE_ARGS + 1) - semantic_error("Too many arguments"); - - /* Skip the argument */ - while (!isspace(*str) && *str != '\0') - str++; - - /* Duplicate the argument */ - argv[argc] = strndup(s, str - s); - if (argv[argc] == NULL) - die("strndup"); - pr_debug("argv[%d]=%s\n", argc, argv[argc]); - argc++; - - } - } while (*str != '\0'); - if (!argc) - semantic_error("An empty argument."); - - /* Parse probe point */ - parse_probe_point(argv[0], pp); - free(argv[0]); - if (pp->file || pp->line) - session.need_dwarf = 1; - - /* Copy arguments */ - pp->nr_args = argc - 1; - if (pp->nr_args > 0) { - pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); - if (!pp->args) - die("malloc"); - memcpy(pp->args, &argv[1], sizeof(char *) * pp->nr_args); - } - - /* Ensure return probe has no C argument */ - for (i = 0; i < pp->nr_args; i++) - if (is_c_varname(pp->args[i])) { - if (pp->retprobe) - semantic_error("You can't specify local" - " variable for kretprobe"); - session.need_dwarf = 1; - } + /* Parse perf-probe event into probe_point */ + session.need_dwarf = parse_perf_probe_event(str, pp); pr_debug("%d arguments\n", pp->nr_args); } @@ -288,59 +149,15 @@ static const struct option options[] = { "\t\tALN:\tAbsolute line number in file.\n" "\t\tARG:\tProbe argument (local variable name or\n" #endif - "\t\t\tkprobe-tracer argument format is supported.)\n", + "\t\t\tkprobe-tracer argument format.)\n", opt_add_probe_event), OPT_END() }; -static int write_new_event(int fd, const char *buf) -{ - int ret; - - ret = write(fd, buf, strlen(buf)); - if (ret <= 0) - die("Failed to create event."); - else - printf("Added new event: %s\n", buf); - - return ret; -} - -#define MAX_CMDLEN 256 - -static int synthesize_probe_event(struct probe_point *pp) -{ - char *buf; - int i, len, ret; - pp->probes[0] = buf = zalloc(MAX_CMDLEN); - if (!buf) - die("Failed to allocate memory by zalloc."); - ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); - if (ret <= 0 || ret >= MAX_CMDLEN) - goto error; - len = ret; - - for (i = 0; i < pp->nr_args; i++) { - ret = snprintf(&buf[len], MAX_CMDLEN - len, " %s", - pp->args[i]); - if (ret <= 0 || ret >= MAX_CMDLEN - len) - goto error; - len += ret; - } - pp->found = 1; - return pp->found; -error: - free(pp->probes[0]); - if (ret > 0) - ret = -E2BIG; - return ret; -} - int cmd_probe(int argc, const char **argv, const char *prefix __used) { int i, j, fd, ret; struct probe_point *pp; - char buf[MAX_CMDLEN]; argc = parse_options(argc, argv, options, probe_usage, PARSE_OPT_STOP_AT_NON_OPTION); @@ -352,7 +169,7 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) if (session.need_dwarf) #ifdef NO_LIBDWARF - semantic_error("Debuginfo-analysis is not supported"); + die("Debuginfo-analysis is not supported"); #else /* !NO_LIBDWARF */ pr_debug("Some probes require debuginfo.\n"); @@ -398,41 +215,15 @@ end_dwarf: if (pp->found) /* This probe is already found. */ continue; - ret = synthesize_probe_event(pp); + ret = synthesize_trace_kprobe_event(pp); if (ret == -E2BIG) - semantic_error("probe point is too long."); + die("probe point definition becomes too long."); else if (ret < 0) die("Failed to synthesize a probe point."); } /* Settng up probe points */ - snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); - fd = open(buf, O_WRONLY, O_APPEND); - if (fd < 0) { - if (errno == ENOENT) - die("kprobe_events file does not exist - please rebuild with CONFIG_KPROBE_TRACER."); - else - die("Could not open kprobe_events file: %s", - strerror(errno)); - } - for (j = 0; j < session.nr_probe; j++) { - pp = &session.probes[j]; - if (pp->found == 1) { - snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x %s\n", - pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP, - pp->function, pp->offset, pp->probes[0]); - write_new_event(fd, buf); - } else - for (i = 0; i < pp->found; i++) { - snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x_%d %s\n", - pp->retprobe ? 'r' : 'p', - PERFPROBE_GROUP, - pp->function, pp->offset, i, - pp->probes[i]); - write_new_event(fd, buf); - } - } - close(fd); + add_trace_kprobe_events(session.probes, session.nr_probe); return 0; } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c new file mode 100644 index 00000000000..7335a3b5e49 --- /dev/null +++ b/tools/perf/util/probe-event.c @@ -0,0 +1,273 @@ +/* + * probe-event.c : perf-probe definition to kprobe_events format converter + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef _GNU_SOURCE +#include "event.h" +#include "debug.h" +#include "parse-events.h" /* For debugfs_path */ +#include "probe-event.h" + +#define MAX_CMDLEN 256 +#define MAX_PROBE_ARGS 128 +#define PERFPROBE_GROUP "probe" + +#define semantic_error(msg ...) die("Semantic error :" msg) + +/* Parse probepoint definition. */ +static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp) +{ + char *ptr, *tmp; + char c, nc = 0; + /* + * + * perf probe SRC:LN + * perf probe FUNC[+OFFS|%return][@SRC] + */ + + ptr = strpbrk(arg, ":+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + + /* Check arg is function or file and copy it */ + if (strchr(arg, '.')) /* File */ + pp->file = strdup(arg); + else /* Function */ + pp->function = strdup(arg); + DIE_IF(pp->file == NULL && pp->function == NULL); + + /* Parse other options */ + while (ptr) { + arg = ptr; + c = nc; + ptr = strpbrk(arg, ":+@%"); + if (ptr) { + nc = *ptr; + *ptr++ = '\0'; + } + switch (c) { + case ':': /* Line number */ + pp->line = strtoul(arg, &tmp, 0); + if (*tmp != '\0') + semantic_error("There is non-digit charactor" + " in line number."); + break; + case '+': /* Byte offset from a symbol */ + pp->offset = strtoul(arg, &tmp, 0); + if (*tmp != '\0') + semantic_error("There is non-digit charactor" + " in offset."); + break; + case '@': /* File name */ + if (pp->file) + semantic_error("SRC@SRC is not allowed."); + pp->file = strdup(arg); + DIE_IF(pp->file == NULL); + if (ptr) + semantic_error("@SRC must be the last " + "option."); + break; + case '%': /* Probe places */ + if (strcmp(arg, "return") == 0) { + pp->retprobe = 1; + } else /* Others not supported yet */ + semantic_error("%%%s is not supported.", arg); + break; + default: + DIE_IF("Program has a bug."); + break; + } + } + + /* Exclusion check */ + if (pp->line && pp->offset) + semantic_error("Offset can't be used with line number."); + + if (!pp->line && pp->file && !pp->function) + semantic_error("File always requires line number."); + + if (pp->offset && !pp->function) + semantic_error("Offset requires an entry function."); + + if (pp->retprobe && !pp->function) + semantic_error("Return probe requires an entry function."); + + if ((pp->offset || pp->line) && pp->retprobe) + semantic_error("Offset/Line can't be used with return probe."); + + pr_debug("symbol:%s file:%s line:%d offset:%d, return:%d\n", + pp->function, pp->file, pp->line, pp->offset, pp->retprobe); +} + +/* Parse perf-probe event definition */ +int parse_perf_probe_event(const char *str, struct probe_point *pp) +{ + char *argv[MAX_PROBE_ARGS + 1]; /* probe + args */ + int argc, i, need_dwarf = 0; + + /* Separate arguments, similar to argv_split */ + argc = 0; + do { + /* Skip separators */ + while (isspace(*str)) + str++; + + /* Add an argument */ + if (*str != '\0') { + const char *s = str; + /* Check the limit number of arguments */ + if (argc == MAX_PROBE_ARGS + 1) + semantic_error("Too many arguments"); + + /* Skip the argument */ + while (!isspace(*str) && *str != '\0') + str++; + + /* Duplicate the argument */ + argv[argc] = strndup(s, str - s); + if (argv[argc] == NULL) + die("strndup"); + pr_debug("argv[%d]=%s\n", argc, argv[argc]); + argc++; + } + } while (*str != '\0'); + if (!argc) + semantic_error("An empty argument."); + + /* Parse probe point */ + parse_perf_probe_probepoint(argv[0], pp); + free(argv[0]); + if (pp->file || pp->line) + need_dwarf = 1; + + /* Copy arguments */ + pp->nr_args = argc - 1; + if (pp->nr_args > 0) { + pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); + if (!pp->args) + die("malloc"); + memcpy(pp->args, &argv[1], sizeof(char *) * pp->nr_args); + } + + /* Ensure return probe has no C argument */ + for (i = 0; i < pp->nr_args; i++) + if (is_c_varname(pp->args[i])) { + if (pp->retprobe) + semantic_error("You can't specify local" + " variable for kretprobe"); + need_dwarf = 1; + } + + return need_dwarf; +} + +int synthesize_trace_kprobe_event(struct probe_point *pp) +{ + char *buf; + int i, len, ret; + + pp->probes[0] = buf = zalloc(MAX_CMDLEN); + if (!buf) + die("Failed to allocate memory by zalloc."); + ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); + if (ret <= 0 || ret >= MAX_CMDLEN) + goto error; + len = ret; + + for (i = 0; i < pp->nr_args; i++) { + ret = snprintf(&buf[len], MAX_CMDLEN - len, " %s", + pp->args[i]); + if (ret <= 0 || ret >= MAX_CMDLEN - len) + goto error; + len += ret; + } + pp->found = 1; + + return pp->found; +error: + free(pp->probes[0]); + if (ret > 0) + ret = -E2BIG; + + return ret; +} + +static int write_trace_kprobe_event(int fd, const char *buf) +{ + int ret; + + ret = write(fd, buf, strlen(buf)); + if (ret <= 0) + die("Failed to create event."); + else + printf("Added new event: %s\n", buf); + + return ret; +} + +void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) +{ + int i, j, fd; + struct probe_point *pp; + char buf[MAX_CMDLEN]; + + snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); + fd = open(buf, O_WRONLY, O_APPEND); + if (fd < 0) { + if (errno == ENOENT) + die("kprobe_events file does not exist -" + " please rebuild with CONFIG_KPROBE_TRACER."); + else + die("Could not open kprobe_events file: %s", + strerror(errno)); + } + + for (j = 0; j < nr_probes; j++) { + pp = probes + j; + if (pp->found == 1) { + snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x %s\n", + pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP, + pp->function, pp->offset, pp->probes[0]); + write_trace_kprobe_event(fd, buf); + } else + for (i = 0; i < pp->found; i++) { + snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x_%d %s\n", + pp->retprobe ? 'r' : 'p', + PERFPROBE_GROUP, + pp->function, pp->offset, i, + pp->probes[i]); + write_trace_kprobe_event(fd, buf); + } + } + close(fd); +} diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h new file mode 100644 index 00000000000..0089c455eca --- /dev/null +++ b/tools/perf/util/probe-event.h @@ -0,0 +1,10 @@ +#ifndef _PROBE_EVENT_H +#define _PROBE_EVENT_H + +#include "probe-finder.h" + +extern int parse_perf_probe_event(const char *str, struct probe_point *pp); +extern int synthesize_trace_kprobe_event(struct probe_point *pp); +extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); + +#endif /*_PROBE_EVENT_H */ From e1c01d61a98703fcc80d15b8068ec36d5a215f7e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:20:05 -0500 Subject: [PATCH 452/470] perf probe: Add argv_split() from lib/argv_split.c Add argv_split() ported from lib/argv_split.c to string.c and use it in util/probe-event.c. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201002005.10235.55602.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-event.c | 55 +++++------------- tools/perf/util/string.c | 101 ++++++++++++++++++++++++++++++++++ tools/perf/util/string.h | 2 + 3 files changed, 118 insertions(+), 40 deletions(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 7335a3b5e49..e3a683ab976 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -32,6 +32,7 @@ #undef _GNU_SOURCE #include "event.h" +#include "string.h" #include "debug.h" #include "parse-events.h" /* For debugfs_path */ #include "probe-event.h" @@ -132,62 +133,36 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp) /* Parse perf-probe event definition */ int parse_perf_probe_event(const char *str, struct probe_point *pp) { - char *argv[MAX_PROBE_ARGS + 1]; /* probe + args */ + char **argv; int argc, i, need_dwarf = 0; - /* Separate arguments, similar to argv_split */ - argc = 0; - do { - /* Skip separators */ - while (isspace(*str)) - str++; - - /* Add an argument */ - if (*str != '\0') { - const char *s = str; - /* Check the limit number of arguments */ - if (argc == MAX_PROBE_ARGS + 1) - semantic_error("Too many arguments"); - - /* Skip the argument */ - while (!isspace(*str) && *str != '\0') - str++; - - /* Duplicate the argument */ - argv[argc] = strndup(s, str - s); - if (argv[argc] == NULL) - die("strndup"); - pr_debug("argv[%d]=%s\n", argc, argv[argc]); - argc++; - } - } while (*str != '\0'); - if (!argc) - semantic_error("An empty argument."); + argv = argv_split(str, &argc); + if (!argv) + die("argv_split failed."); + if (argc > MAX_PROBE_ARGS + 1) + semantic_error("Too many arguments"); /* Parse probe point */ parse_perf_probe_probepoint(argv[0], pp); - free(argv[0]); if (pp->file || pp->line) need_dwarf = 1; - /* Copy arguments */ + /* Copy arguments and ensure return probe has no C argument */ pp->nr_args = argc - 1; - if (pp->nr_args > 0) { - pp->args = (char **)malloc(sizeof(char *) * pp->nr_args); - if (!pp->args) - die("malloc"); - memcpy(pp->args, &argv[1], sizeof(char *) * pp->nr_args); - } - - /* Ensure return probe has no C argument */ - for (i = 0; i < pp->nr_args; i++) + pp->args = zalloc(sizeof(char *) * pp->nr_args); + for (i = 0; i < pp->nr_args; i++) { + pp->args[i] = strdup(argv[i + 1]); + if (!pp->args[i]) + die("Failed to copy argument."); if (is_c_varname(pp->args[i])) { if (pp->retprobe) semantic_error("You can't specify local" " variable for kretprobe"); need_dwarf = 1; } + } + argv_free(argv); return need_dwarf; } diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 227043577e0..0977cf43178 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -127,3 +127,104 @@ out_err: out: return length; } + +/* + * Helper function for splitting a string into an argv-like array. + * originaly copied from lib/argv_split.c + */ +static const char *skip_sep(const char *cp) +{ + while (*cp && isspace(*cp)) + cp++; + + return cp; +} + +static const char *skip_arg(const char *cp) +{ + while (*cp && !isspace(*cp)) + cp++; + + return cp; +} + +static int count_argc(const char *str) +{ + int count = 0; + + while (*str) { + str = skip_sep(str); + if (*str) { + count++; + str = skip_arg(str); + } + } + + return count; +} + +/** + * argv_free - free an argv + * @argv - the argument vector to be freed + * + * Frees an argv and the strings it points to. + */ +void argv_free(char **argv) +{ + char **p; + for (p = argv; *p; p++) + free(*p); + + free(argv); +} + +/** + * argv_split - split a string at whitespace, returning an argv + * @str: the string to be split + * @argcp: returned argument count + * + * Returns an array of pointers to strings which are split out from + * @str. This is performed by strictly splitting on white-space; no + * quote processing is performed. Multiple whitespace characters are + * considered to be a single argument separator. The returned array + * is always NULL-terminated. Returns NULL on memory allocation + * failure. + */ +char **argv_split(const char *str, int *argcp) +{ + int argc = count_argc(str); + char **argv = zalloc(sizeof(*argv) * (argc+1)); + char **argvp; + + if (argv == NULL) + goto out; + + if (argcp) + *argcp = argc; + + argvp = argv; + + while (*str) { + str = skip_sep(str); + + if (*str) { + const char *p = str; + char *t; + + str = skip_arg(str); + + t = strndup(p, str-p); + if (t == NULL) + goto fail; + *argvp++ = t; + } + } + *argvp = NULL; + +out: + return argv; + +fail: + argv_free(argv); + return NULL; +} diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h index e50b07f8082..bfecec265a1 100644 --- a/tools/perf/util/string.h +++ b/tools/perf/util/string.h @@ -6,6 +6,8 @@ int hex2u64(const char *ptr, u64 *val); char *strxfrchar(char *s, char from, char to); s64 perf_atoll(const char *str); +char **argv_split(const char *str, int *argcp); +void argv_free(char **argv); #define _STR(x) #x #define STR(x) _STR(x) From 4de189fe6e5ad8241f6f8709d2e2ba4c3aeae33a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:20:17 -0500 Subject: [PATCH 453/470] perf probe: Add --list option for listing current probe events Add --list option for listing currently defined probe events in the kernel. This shows events in below format; [group:event] for example: [probe:schedule_0] schedule+30 cpu Note that source file/line information is not supported yet. So even if you added a probe by line, it will be shown in . Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201002017.10235.76575.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 12 +- tools/perf/util/probe-event.c | 233 +++++++++++++++++++++++++++++++--- tools/perf/util/probe-event.h | 5 + 3 files changed, 231 insertions(+), 19 deletions(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index bf20df2e816..b5d15cf2547 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -62,6 +62,8 @@ static struct { struct probe_point probes[MAX_PROBES]; } session; +static bool listing; + /* Parse an event definition. Note that any error must die. */ static void parse_probe_event(const char *str) { @@ -119,6 +121,7 @@ static int open_default_vmlinux(void) static const char * const probe_usage[] = { "perf probe [] 'PROBEDEF' ['PROBEDEF' ...]", "perf probe [] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", + "perf probe --list", NULL }; @@ -129,6 +132,7 @@ static const struct option options[] = { OPT_STRING('k', "vmlinux", &session.vmlinux, "file", "vmlinux/module pathname"), #endif + OPT_BOOLEAN('l', "list", &listing, "list up current probes"), OPT_CALLBACK('a', "add", NULL, #ifdef NO_LIBDWARF "FUNC[+OFFS|%return] [ARG ...]", @@ -164,9 +168,15 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used) for (i = 0; i < argc; i++) parse_probe_event(argv[i]); - if (session.nr_probe == 0) + if ((session.nr_probe == 0 && !listing) || + (session.nr_probe != 0 && listing)) usage_with_options(probe_usage, options); + if (listing) { + show_perf_probe_events(); + return 0; + } + if (session.need_dwarf) #ifdef NO_LIBDWARF die("Debuginfo-analysis is not supported"); diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e3a683ab976..7f4f288c642 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -29,10 +29,13 @@ #include #include #include +#include +#include #undef _GNU_SOURCE #include "event.h" #include "string.h" +#include "strlist.h" #include "debug.h" #include "parse-events.h" /* For debugfs_path */ #include "probe-event.h" @@ -43,6 +46,19 @@ #define semantic_error(msg ...) die("Semantic error :" msg) +/* If there is no space to write, returns -E2BIG. */ +static int e_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + if (ret >= (int)size) + ret = -E2BIG; + return ret; +} + /* Parse probepoint definition. */ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp) { @@ -166,23 +182,91 @@ int parse_perf_probe_event(const char *str, struct probe_point *pp) return need_dwarf; } -int synthesize_trace_kprobe_event(struct probe_point *pp) +/* Parse kprobe_events event into struct probe_point */ +void parse_trace_kprobe_event(const char *str, char **group, char **event, + struct probe_point *pp) +{ + char pr; + char *p; + int ret, i, argc; + char **argv; + + pr_debug("Parsing kprobe_events: %s\n", str); + argv = argv_split(str, &argc); + if (!argv) + die("argv_split failed."); + if (argc < 2) + semantic_error("Too less arguments."); + + /* Scan event and group name. */ + ret = sscanf(argv[0], "%c:%m[^/ \t]/%m[^ \t]", + &pr, group, event); + if (ret != 3) + semantic_error("Failed to parse event name: %s", argv[0]); + pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr); + + if (!pp) + goto end; + + pp->retprobe = (pr == 'r'); + + /* Scan function name and offset */ + ret = sscanf(argv[1], "%m[^+]+%d", &pp->function, &pp->offset); + if (ret == 1) + pp->offset = 0; + + /* kprobe_events doesn't have this information */ + pp->line = 0; + pp->file = NULL; + + pp->nr_args = argc - 2; + pp->args = zalloc(sizeof(char *) * pp->nr_args); + for (i = 0; i < pp->nr_args; i++) { + p = strchr(argv[i + 2], '='); + if (p) /* We don't need which register is assigned. */ + *p = '\0'; + pp->args[i] = strdup(argv[i + 2]); + if (!pp->args[i]) + die("Failed to copy argument."); + } + +end: + argv_free(argv); +} + +int synthesize_perf_probe_event(struct probe_point *pp) { char *buf; + char offs[64] = "", line[64] = ""; int i, len, ret; pp->probes[0] = buf = zalloc(MAX_CMDLEN); if (!buf) die("Failed to allocate memory by zalloc."); - ret = snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); - if (ret <= 0 || ret >= MAX_CMDLEN) + if (pp->offset) { + ret = e_snprintf(offs, 64, "+%d", pp->offset); + if (ret <= 0) + goto error; + } + if (pp->line) { + ret = e_snprintf(line, 64, ":%d", pp->line); + if (ret <= 0) + goto error; + } + + if (pp->function) + ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function, + offs, pp->retprobe ? "%return" : "", line); + else + ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->file, line); + if (ret <= 0) goto error; len = ret; for (i = 0; i < pp->nr_args; i++) { - ret = snprintf(&buf[len], MAX_CMDLEN - len, " %s", - pp->args[i]); - if (ret <= 0 || ret >= MAX_CMDLEN - len) + ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", + pp->args[i]); + if (ret <= 0) goto error; len += ret; } @@ -191,12 +275,134 @@ int synthesize_trace_kprobe_event(struct probe_point *pp) return pp->found; error: free(pp->probes[0]); - if (ret > 0) - ret = -E2BIG; return ret; } +int synthesize_trace_kprobe_event(struct probe_point *pp) +{ + char *buf; + int i, len, ret; + + pp->probes[0] = buf = zalloc(MAX_CMDLEN); + if (!buf) + die("Failed to allocate memory by zalloc."); + ret = e_snprintf(buf, MAX_CMDLEN, "%s+%d", pp->function, pp->offset); + if (ret <= 0) + goto error; + len = ret; + + for (i = 0; i < pp->nr_args; i++) { + ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s", + pp->args[i]); + if (ret <= 0) + goto error; + len += ret; + } + pp->found = 1; + + return pp->found; +error: + free(pp->probes[0]); + + return ret; +} + +static int open_kprobe_events(int flags, int mode) +{ + char buf[PATH_MAX]; + int ret; + + ret = e_snprintf(buf, PATH_MAX, "%s/../kprobe_events", debugfs_path); + if (ret < 0) + die("Failed to make kprobe_events path."); + + ret = open(buf, flags, mode); + if (ret < 0) { + if (errno == ENOENT) + die("kprobe_events file does not exist -" + " please rebuild with CONFIG_KPROBE_TRACER."); + else + die("Could not open kprobe_events file: %s", + strerror(errno)); + } + return ret; +} + +/* Get raw string list of current kprobe_events */ +static struct strlist *get_trace_kprobe_event_rawlist(int fd) +{ + int ret, idx; + FILE *fp; + char buf[MAX_CMDLEN]; + char *p; + struct strlist *sl; + + sl = strlist__new(true, NULL); + + fp = fdopen(dup(fd), "r"); + while (!feof(fp)) { + p = fgets(buf, MAX_CMDLEN, fp); + if (!p) + break; + + idx = strlen(p) - 1; + if (p[idx] == '\n') + p[idx] = '\0'; + ret = strlist__add(sl, buf); + if (ret < 0) + die("strlist__add failed: %s", strerror(-ret)); + } + fclose(fp); + + return sl; +} + +/* Free and zero clear probe_point */ +static void clear_probe_point(struct probe_point *pp) +{ + int i; + + if (pp->function) + free(pp->function); + if (pp->file) + free(pp->file); + for (i = 0; i < pp->nr_args; i++) + free(pp->args[i]); + if (pp->args) + free(pp->args); + for (i = 0; i < pp->found; i++) + free(pp->probes[i]); + memset(pp, 0, sizeof(pp)); +} + +/* List up current perf-probe events */ +void show_perf_probe_events(void) +{ + unsigned int i; + int fd; + char *group, *event; + struct probe_point pp; + struct strlist *rawlist; + struct str_node *ent; + + fd = open_kprobe_events(O_RDONLY, 0); + rawlist = get_trace_kprobe_event_rawlist(fd); + close(fd); + + for (i = 0; i < strlist__nr_entries(rawlist); i++) { + ent = strlist__entry(rawlist, i); + parse_trace_kprobe_event(ent->s, &group, &event, &pp); + synthesize_perf_probe_event(&pp); + printf("[%s:%s]\t%s\n", group, event, pp.probes[0]); + free(group); + free(event); + clear_probe_point(&pp); + } + + strlist__delete(rawlist); +} + static int write_trace_kprobe_event(int fd, const char *buf) { int ret; @@ -216,16 +422,7 @@ void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) struct probe_point *pp; char buf[MAX_CMDLEN]; - snprintf(buf, MAX_CMDLEN, "%s/../kprobe_events", debugfs_path); - fd = open(buf, O_WRONLY, O_APPEND); - if (fd < 0) { - if (errno == ENOENT) - die("kprobe_events file does not exist -" - " please rebuild with CONFIG_KPROBE_TRACER."); - else - die("Could not open kprobe_events file: %s", - strerror(errno)); - } + fd = open_kprobe_events(O_WRONLY, O_APPEND); for (j = 0; j < nr_probes; j++) { pp = probes + j; diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 0089c455eca..88db7d1a947 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -2,9 +2,14 @@ #define _PROBE_EVENT_H #include "probe-finder.h" +#include "strlist.h" extern int parse_perf_probe_event(const char *str, struct probe_point *pp); +extern int synthesize_perf_probe_event(struct probe_point *pp); +extern void parse_trace_kprobe_event(const char *str, char **group, + char **event, struct probe_point *pp); extern int synthesize_trace_kprobe_event(struct probe_point *pp); extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); +extern void show_perf_probe_events(void); #endif /*_PROBE_EVENT_H */ From b498ce1f2753b9724b2fc05d2057f7d1490cfa93 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:20:25 -0500 Subject: [PATCH 454/470] perf probe: Simplify event naming Simplify event naming as _. Each event name is globally unique (group name is not checked). So, if there is schedule_0, next probe event on schedule() will be schedule_1. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: Jason Baron Cc: K.Prasad Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <20091201002024.10235.2353.stgit@harusame> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-event.c | 67 +++++++++++++++++++++++++++-------- tools/perf/util/probe-event.h | 3 ++ 2 files changed, 56 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 7f4f288c642..e42f3acc9a7 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -403,6 +403,29 @@ void show_perf_probe_events(void) strlist__delete(rawlist); } +/* Get current perf-probe event names */ +static struct strlist *get_perf_event_names(int fd) +{ + unsigned int i; + char *group, *event; + struct strlist *sl, *rawlist; + struct str_node *ent; + + rawlist = get_trace_kprobe_event_rawlist(fd); + + sl = strlist__new(false, NULL); + for (i = 0; i < strlist__nr_entries(rawlist); i++) { + ent = strlist__entry(rawlist, i); + parse_trace_kprobe_event(ent->s, &group, &event, NULL); + strlist__add(sl, event); + free(group); + } + + strlist__delete(rawlist); + + return sl; +} + static int write_trace_kprobe_event(int fd, const char *buf) { int ret; @@ -416,30 +439,46 @@ static int write_trace_kprobe_event(int fd, const char *buf) return ret; } +static void get_new_event_name(char *buf, size_t len, const char *base, + struct strlist *namelist) +{ + int i, ret; + for (i = 0; i < MAX_EVENT_INDEX; i++) { + ret = e_snprintf(buf, len, "%s_%d", base, i); + if (ret < 0) + die("snprintf() failed: %s", strerror(-ret)); + if (!strlist__has_entry(namelist, buf)) + break; + } + if (i == MAX_EVENT_INDEX) + die("Too many events are on the same function."); +} + void add_trace_kprobe_events(struct probe_point *probes, int nr_probes) { int i, j, fd; struct probe_point *pp; char buf[MAX_CMDLEN]; + char event[64]; + struct strlist *namelist; - fd = open_kprobe_events(O_WRONLY, O_APPEND); + fd = open_kprobe_events(O_RDWR, O_APPEND); + /* Get current event names */ + namelist = get_perf_event_names(fd); for (j = 0; j < nr_probes; j++) { pp = probes + j; - if (pp->found == 1) { - snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x %s\n", - pp->retprobe ? 'r' : 'p', PERFPROBE_GROUP, - pp->function, pp->offset, pp->probes[0]); + for (i = 0; i < pp->found; i++) { + /* Get an unused new event name */ + get_new_event_name(event, 64, pp->function, namelist); + snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n", + pp->retprobe ? 'r' : 'p', + PERFPROBE_GROUP, event, + pp->probes[i]); write_trace_kprobe_event(fd, buf); - } else - for (i = 0; i < pp->found; i++) { - snprintf(buf, MAX_CMDLEN, "%c:%s/%s_%x_%d %s\n", - pp->retprobe ? 'r' : 'p', - PERFPROBE_GROUP, - pp->function, pp->offset, i, - pp->probes[i]); - write_trace_kprobe_event(fd, buf); - } + /* Add added event name to namelist */ + strlist__add(namelist, event); + } } close(fd); } diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h index 88db7d1a947..0c6fe56fe38 100644 --- a/tools/perf/util/probe-event.h +++ b/tools/perf/util/probe-event.h @@ -12,4 +12,7 @@ extern int synthesize_trace_kprobe_event(struct probe_point *pp); extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes); extern void show_perf_probe_events(void); +/* Maximum index number of event-name postfix */ +#define MAX_EVENT_INDEX 1024 + #endif /*_PROBE_EVENT_H */ From 59d069eb5ae9b033ed1c124c92e1532c4a958991 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 1 Dec 2009 17:30:08 +0800 Subject: [PATCH 455/470] perf_event: Initialize data.period in perf_swevent_hrtimer() In current code in perf_swevent_hrtimer(), data.period is not initialized, The result is obvious wrong: # ./perf record -f -e cpu-clock make # ./perf report # Samples: 1740 # # Overhead Command ...... # ........ ........ .......................................... # 1025422183050275328.00% sh libc-2.9.90.so ... 1025422183050275328.00% perl libperl.so ... 1025422168240043264.00% perl [kernel] ... 1025422030011210752.00% perl [kernel] ... Signed-off-by: Xiao Guangrong Acked-by: Peter Zijlstra Cc: Frederic Weisbecker Cc: LKML-Reference: <4B14E220.2050107@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0b9ca2d834d..040ee517c80 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4010,6 +4010,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) event->pmu->read(event); data.addr = 0; + data.period = event->hw.last_period; regs = get_irq_regs(); /* * In case we exclude kernel IPs or are somehow not in interrupt From bf56a4ea9f1683c5b223fd3a5dbea23f1fa91c34 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:23:20 +0800 Subject: [PATCH 456/470] trace_syscalls: Remove unused event_syscall_enter and event_syscall_exit fix event_enter_##sname->event fix event_exit_##sname->event remove unused event_syscall_enter and event_syscall_exit Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D278.4090209@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 4 ++-- include/trace/syscall.h | 2 -- kernel/trace/trace_syscalls.c | 8 -------- 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b50974a93af..2f7c539ab96 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -178,7 +178,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ event_enter_##sname = { \ .name = "sys_enter"#sname, \ .system = "syscalls", \ - .event = &event_syscall_enter, \ + .event = &enter_syscall_print_##sname, \ .raw_init = init_enter_##sname, \ .show_format = syscall_enter_format, \ .define_fields = syscall_enter_define_fields, \ @@ -214,7 +214,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ event_exit_##sname = { \ .name = "sys_exit"#sname, \ .system = "syscalls", \ - .event = &event_syscall_exit, \ + .event = &exit_syscall_print_##sname, \ .raw_init = init_exit_##sname, \ .show_format = syscall_exit_format, \ .define_fields = syscall_exit_define_fields, \ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 51ee17d3632..5f8827c92db 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -37,8 +37,6 @@ extern unsigned long arch_syscall_addr(int nr); extern int syscall_name_to_nr(char *name); void set_syscall_enter_id(int num, int id); void set_syscall_exit_id(int num, int id); -extern struct trace_event event_syscall_enter; -extern struct trace_event event_syscall_exit; extern int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 63aa8070365..00d6e176f5b 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -444,14 +444,6 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) mutex_unlock(&syscall_trace_lock); } -struct trace_event event_syscall_enter = { - .trace = print_syscall_enter, -}; - -struct trace_event event_syscall_exit = { - .trace = print_syscall_exit, -}; - int __init init_ftrace_syscalls(void) { struct syscall_metadata *meta; From 31c16b13349970b2684248c7d8608d2a96ae135d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:23:30 +0800 Subject: [PATCH 457/470] trace_syscalls: Set event_enter_##sname->data to its metadata Set event_enter_##sname->data to its metadata, it makes codes simpler. Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D282.7050709@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 6 ++++-- include/trace/syscall.h | 2 +- kernel/trace/trace_syscalls.c | 36 +++++++++++------------------------ 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2f7c539ab96..d3c9fd01a11 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -153,6 +153,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) #define SYSCALL_TRACE_ENTER_EVENT(sname) \ + static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_enter_##sname; \ struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ @@ -184,11 +185,12 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ .define_fields = syscall_enter_define_fields, \ .regfunc = reg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \ - .data = "sys"#sname, \ + .data = (void *)&__syscall_meta_##sname,\ TRACE_SYS_ENTER_PROFILE_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ + static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_exit_##sname; \ struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ @@ -220,7 +222,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ .define_fields = syscall_exit_define_fields, \ .regfunc = reg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \ - .data = "sys"#sname, \ + .data = (void *)&__syscall_meta_##sname,\ TRACE_SYS_EXIT_PROFILE_INIT(sname) \ } diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 5f8827c92db..c5265c81c4e 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -34,7 +34,7 @@ struct syscall_metadata { #ifdef CONFIG_FTRACE_SYSCALLS extern unsigned long arch_syscall_addr(int nr); -extern int syscall_name_to_nr(char *name); +extern int syscall_name_to_nr(const char *name); void set_syscall_enter_id(int num, int id); void set_syscall_exit_id(int num, int id); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 00d6e176f5b..39649b1675d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -51,7 +51,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -int syscall_name_to_nr(char *name) +int syscall_name_to_nr(const char *name) { int i; @@ -172,18 +172,11 @@ extern char *__bad_type_size(void); int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) { int i; - int nr; int ret; - struct syscall_metadata *entry; + struct syscall_metadata *entry = call->data; struct syscall_trace_enter trace; int offset = offsetof(struct syscall_trace_enter, args); - nr = syscall_name_to_nr(call->data); - entry = syscall_nr_to_meta(nr); - - if (!entry) - return 0; - ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;" "\tsigned:%u;\n", SYSCALL_FIELD(int, nr)); @@ -245,18 +238,11 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s) int syscall_enter_define_fields(struct ftrace_event_call *call) { struct syscall_trace_enter trace; - struct syscall_metadata *meta; + struct syscall_metadata *meta = call->data; int ret; - int nr; int i; int offset = offsetof(typeof(trace), args); - nr = syscall_name_to_nr(call->data); - meta = syscall_nr_to_meta(nr); - - if (!meta) - return 0; - ret = trace_define_common_fields(call); if (ret) return ret; @@ -366,9 +352,9 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) { int ret = 0; int num; - char *name; + const char *name; - name = (char *)call->data; + name = ((struct syscall_metadata *)call->data)->name; num = syscall_name_to_nr(name); if (num < 0 || num >= NR_syscalls) return -ENOSYS; @@ -389,9 +375,9 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) void unreg_event_syscall_enter(struct ftrace_event_call *call) { int num; - char *name; + const char *name; - name = (char *)call->data; + name = ((struct syscall_metadata *)call->data)->name; num = syscall_name_to_nr(name); if (num < 0 || num >= NR_syscalls) return; @@ -407,9 +393,9 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) { int ret = 0; int num; - char *name; + const char *name; - name = call->data; + name = ((struct syscall_metadata *)call->data)->name; num = syscall_name_to_nr(name); if (num < 0 || num >= NR_syscalls) return -ENOSYS; @@ -430,9 +416,9 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) void unreg_event_syscall_exit(struct ftrace_event_call *call) { int num; - char *name; + const char *name; - name = call->data; + name = ((struct syscall_metadata *)call->data)->name; num = syscall_name_to_nr(name); if (num < 0 || num >= NR_syscalls) return; From fcc19438dda38dacc8c144e2db3ebc6b9fd4f8b8 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:23:36 +0800 Subject: [PATCH 458/470] trace_syscalls: Remove enter_id exit_id use ->enter_event->id instead of ->enter_id use ->exit_event->id instead of ->exit_id Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D288.7030001@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 2 -- include/trace/syscall.h | 6 ------ kernel/trace/trace_syscalls.c | 30 ++++++++++-------------------- 3 files changed, 10 insertions(+), 28 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d3c9fd01a11..b9af87560ad 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -168,7 +168,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ if (!id) \ return -ENODEV; \ event_enter_##sname.id = id; \ - set_syscall_enter_id(num, id); \ INIT_LIST_HEAD(&event_enter_##sname.fields); \ return 0; \ } \ @@ -205,7 +204,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ if (!id) \ return -ENODEV; \ event_exit_##sname.id = id; \ - set_syscall_exit_id(num, id); \ INIT_LIST_HEAD(&event_exit_##sname.fields); \ return 0; \ } \ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index c5265c81c4e..ca09561cd57 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -15,8 +15,6 @@ * @nb_args: number of parameters it takes * @types: list of types as strings * @args: list of args as strings (args[i] matches types[i]) - * @enter_id: associated ftrace enter event id - * @exit_id: associated ftrace exit event id * @enter_event: associated syscall_enter trace event * @exit_event: associated syscall_exit trace event */ @@ -25,8 +23,6 @@ struct syscall_metadata { int nb_args; const char **types; const char **args; - int enter_id; - int exit_id; struct ftrace_event_call *enter_event; struct ftrace_event_call *exit_event; @@ -35,8 +31,6 @@ struct syscall_metadata { #ifdef CONFIG_FTRACE_SYSCALLS extern unsigned long arch_syscall_addr(int nr); extern int syscall_name_to_nr(const char *name); -void set_syscall_enter_id(int num, int id); -void set_syscall_exit_id(int num, int id); extern int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 39649b1675d..27eb18d6922 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -67,16 +67,6 @@ int syscall_name_to_nr(const char *name) return -1; } -void set_syscall_enter_id(int num, int id) -{ - syscalls_metadata[num]->enter_id = id; -} - -void set_syscall_exit_id(int num, int id) -{ - syscalls_metadata[num]->exit_id = id; -} - enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags) { @@ -93,7 +83,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags) if (!entry) goto end; - if (entry->enter_id != ent->type) { + if (entry->enter_event->id != ent->type) { WARN_ON_ONCE(1); goto end; } @@ -148,7 +138,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags) return TRACE_TYPE_HANDLED; } - if (entry->exit_id != ent->type) { + if (entry->exit_event->id != ent->type) { WARN_ON_ONCE(1); return TRACE_TYPE_UNHANDLED; } @@ -302,8 +292,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; - event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, - size, 0, 0); + event = trace_current_buffer_lock_reserve(&buffer, + sys_data->enter_event->id, size, 0, 0); if (!event) return; @@ -334,8 +324,8 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) if (!sys_data) return; - event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id, - sizeof(*entry), 0, 0); + event = trace_current_buffer_lock_reserve(&buffer, + sys_data->exit_event->id, sizeof(*entry), 0, 0); if (!event) return; @@ -510,11 +500,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec = (struct syscall_trace_enter *) raw_data; tracing_generic_entry_update(&rec->ent, 0, 0); - rec->ent.type = sys_data->enter_id; + rec->ent.type = sys_data->enter_event->id; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - perf_tp_event(sys_data->enter_id, 0, 1, rec, size); + perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); end: perf_swevent_put_recursion_context(rctx); @@ -615,11 +605,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec = (struct syscall_trace_exit *)raw_data; tracing_generic_entry_update(&rec->ent, 0, 0); - rec->ent.type = sys_data->exit_id; + rec->ent.type = sys_data->exit_event->id; rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - perf_tp_event(sys_data->exit_id, 0, 1, rec, size); + perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); end: perf_swevent_put_recursion_context(rctx); From c252f65793874b56d50395ab604db465ce688665 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:23:47 +0800 Subject: [PATCH 459/470] trace_syscalls: Add syscall_nr field to struct syscall_metadata Add syscall_nr field to struct syscall_metadata, it helps us to get syscall number easier. Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D293.6090800@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 4 ++-- include/trace/syscall.h | 3 ++- kernel/trace/trace_syscalls.c | 22 +++++++++------------- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b9af87560ad..3c280d7ecb7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -161,7 +161,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ static int init_enter_##sname(struct ftrace_event_call *call) \ { \ int num, id; \ - num = syscall_name_to_nr("sys"#sname); \ + num = __syscall_meta_##sname.syscall_nr; \ if (num < 0) \ return -ENOSYS; \ id = register_ftrace_event(&enter_syscall_print_##sname);\ @@ -197,7 +197,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ static int init_exit_##sname(struct ftrace_event_call *call) \ { \ int num, id; \ - num = syscall_name_to_nr("sys"#sname); \ + num = __syscall_meta_##sname.syscall_nr; \ if (num < 0) \ return -ENOSYS; \ id = register_ftrace_event(&exit_syscall_print_##sname);\ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index ca09561cd57..1531eef3071 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -12,6 +12,7 @@ * A syscall entry in the ftrace syscalls array. * * @name: name of the syscall + * @syscall_nr: number of the syscall * @nb_args: number of parameters it takes * @types: list of types as strings * @args: list of args as strings (args[i] matches types[i]) @@ -20,6 +21,7 @@ */ struct syscall_metadata { const char *name; + int syscall_nr; int nb_args; const char **types; const char **args; @@ -30,7 +32,6 @@ struct syscall_metadata { #ifdef CONFIG_FTRACE_SYSCALLS extern unsigned long arch_syscall_addr(int nr); -extern int syscall_name_to_nr(const char *name); extern int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 27eb18d6922..144cc14d855 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -51,7 +51,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -int syscall_name_to_nr(const char *name) +static int syscall_name_to_nr(const char *name) { int i; @@ -342,10 +342,8 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) { int ret = 0; int num; - const char *name; - name = ((struct syscall_metadata *)call->data)->name; - num = syscall_name_to_nr(name); + num = ((struct syscall_metadata *)call->data)->syscall_nr; if (num < 0 || num >= NR_syscalls) return -ENOSYS; mutex_lock(&syscall_trace_lock); @@ -365,10 +363,8 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) void unreg_event_syscall_enter(struct ftrace_event_call *call) { int num; - const char *name; - name = ((struct syscall_metadata *)call->data)->name; - num = syscall_name_to_nr(name); + num = ((struct syscall_metadata *)call->data)->syscall_nr; if (num < 0 || num >= NR_syscalls) return; mutex_lock(&syscall_trace_lock); @@ -383,10 +379,8 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) { int ret = 0; int num; - const char *name; - name = ((struct syscall_metadata *)call->data)->name; - num = syscall_name_to_nr(name); + num = ((struct syscall_metadata *)call->data)->syscall_nr; if (num < 0 || num >= NR_syscalls) return -ENOSYS; mutex_lock(&syscall_trace_lock); @@ -406,10 +400,8 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) void unreg_event_syscall_exit(struct ftrace_event_call *call) { int num; - const char *name; - name = ((struct syscall_metadata *)call->data)->name; - num = syscall_name_to_nr(name); + num = ((struct syscall_metadata *)call->data)->syscall_nr; if (num < 0 || num >= NR_syscalls) return; mutex_lock(&syscall_trace_lock); @@ -436,6 +428,10 @@ int __init init_ftrace_syscalls(void) for (i = 0; i < NR_syscalls; i++) { addr = arch_syscall_addr(i); meta = find_syscall_meta(addr); + if (!meta) + continue; + + meta->syscall_nr = i; syscalls_metadata[i] = meta; } From a1301da0997bf73c44dbe584e9070a13adc89672 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:23:55 +0800 Subject: [PATCH 460/470] trace_syscalls: Remove duplicate init_enter_##sname() use only one init_syscall_trace instead of many init_enter_##sname()/init_exit_##sname() Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D29B.6090708@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 30 ++---------------------------- include/trace/syscall.h | 1 + kernel/trace/trace_syscalls.c | 12 ++++++++++++ 3 files changed, 15 insertions(+), 28 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 3c280d7ecb7..cf0d923ea40 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -158,19 +158,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ - static int init_enter_##sname(struct ftrace_event_call *call) \ - { \ - int num, id; \ - num = __syscall_meta_##sname.syscall_nr; \ - if (num < 0) \ - return -ENOSYS; \ - id = register_ftrace_event(&enter_syscall_print_##sname);\ - if (!id) \ - return -ENODEV; \ - event_enter_##sname.id = id; \ - INIT_LIST_HEAD(&event_enter_##sname.fields); \ - return 0; \ - } \ TRACE_SYS_ENTER_PROFILE(sname); \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ @@ -179,7 +166,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ .name = "sys_enter"#sname, \ .system = "syscalls", \ .event = &enter_syscall_print_##sname, \ - .raw_init = init_enter_##sname, \ + .raw_init = init_syscall_trace, \ .show_format = syscall_enter_format, \ .define_fields = syscall_enter_define_fields, \ .regfunc = reg_event_syscall_enter, \ @@ -194,19 +181,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ - static int init_exit_##sname(struct ftrace_event_call *call) \ - { \ - int num, id; \ - num = __syscall_meta_##sname.syscall_nr; \ - if (num < 0) \ - return -ENOSYS; \ - id = register_ftrace_event(&exit_syscall_print_##sname);\ - if (!id) \ - return -ENODEV; \ - event_exit_##sname.id = id; \ - INIT_LIST_HEAD(&event_exit_##sname.fields); \ - return 0; \ - } \ TRACE_SYS_EXIT_PROFILE(sname); \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ @@ -215,7 +189,7 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ .name = "sys_exit"#sname, \ .system = "syscalls", \ .event = &exit_syscall_print_##sname, \ - .raw_init = init_exit_##sname, \ + .raw_init = init_syscall_trace, \ .show_format = syscall_exit_format, \ .define_fields = syscall_exit_define_fields, \ .regfunc = reg_event_syscall_exit, \ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 1531eef3071..dff9371e527 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -32,6 +32,7 @@ struct syscall_metadata { #ifdef CONFIG_FTRACE_SYSCALLS extern unsigned long arch_syscall_addr(int nr); +extern int init_syscall_trace(struct ftrace_event_call *call); extern int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 144cc14d855..c6514093c95 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -412,6 +412,18 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) mutex_unlock(&syscall_trace_lock); } +int init_syscall_trace(struct ftrace_event_call *call) +{ + int id; + + id = register_ftrace_event(call->event); + if (!id) + return -ENODEV; + call->id = id; + INIT_LIST_HEAD(&call->fields); + return 0; +} + int __init init_ftrace_syscalls(void) { struct syscall_metadata *meta; From 3bbe84e9d385205d638035ee9dcc4db1b486ea08 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:24:01 +0800 Subject: [PATCH 461/470] trace_syscalls: Simplify syscall profile use only one prof_sysenter_enable() instead of prof_sysenter_enable_##sname() use only one prof_sysenter_disable() instead of prof_sysenter_disable_##sname() use only one prof_sysexit_enable() instead of prof_sysexit_enable_##sname() use only one prof_sysexit_disable() instead of prof_sysexit_disable_##sname() Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D2A1.8060304@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 31 ++++--------------------------- include/trace/syscall.h | 8 ++++---- kernel/trace/trace_syscalls.c | 24 ++++++++---------------- 3 files changed, 16 insertions(+), 47 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index cf0d923ea40..c2df3a59323 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -99,37 +99,16 @@ struct perf_event_attr; #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) #ifdef CONFIG_EVENT_PROFILE -#define TRACE_SYS_ENTER_PROFILE(sname) \ -static int prof_sysenter_enable_##sname(struct ftrace_event_call *unused) \ -{ \ - return reg_prof_syscall_enter("sys"#sname); \ -} \ - \ -static void prof_sysenter_disable_##sname(struct ftrace_event_call *unused) \ -{ \ - unreg_prof_syscall_enter("sys"#sname); \ -} - -#define TRACE_SYS_EXIT_PROFILE(sname) \ -static int prof_sysexit_enable_##sname(struct ftrace_event_call *unused) \ -{ \ - return reg_prof_syscall_exit("sys"#sname); \ -} \ - \ -static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ -{ \ - unreg_prof_syscall_exit("sys"#sname); \ -} #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ .profile_count = ATOMIC_INIT(-1), \ - .profile_enable = prof_sysenter_enable_##sname, \ - .profile_disable = prof_sysenter_disable_##sname, + .profile_enable = prof_sysenter_enable, \ + .profile_disable = prof_sysenter_disable, #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ .profile_count = ATOMIC_INIT(-1), \ - .profile_enable = prof_sysexit_enable_##sname, \ - .profile_disable = prof_sysexit_disable_##sname, + .profile_enable = prof_sysexit_enable, \ + .profile_disable = prof_sysexit_disable, #else #define TRACE_SYS_ENTER_PROFILE(sname) #define TRACE_SYS_ENTER_PROFILE_INIT(sname) @@ -158,7 +137,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ - TRACE_SYS_ENTER_PROFILE(sname); \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ @@ -181,7 +159,6 @@ static void prof_sysexit_disable_##sname(struct ftrace_event_call *unused) \ struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ - TRACE_SYS_EXIT_PROFILE(sname); \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index dff9371e527..961fda3556b 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -50,10 +50,10 @@ enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif #ifdef CONFIG_EVENT_PROFILE -int reg_prof_syscall_enter(char *name); -void unreg_prof_syscall_enter(char *name); -int reg_prof_syscall_exit(char *name); -void unreg_prof_syscall_exit(char *name); +int prof_sysenter_enable(struct ftrace_event_call *call); +void prof_sysenter_disable(struct ftrace_event_call *call); +int prof_sysexit_enable(struct ftrace_event_call *call); +void prof_sysexit_disable(struct ftrace_event_call *call); #endif diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index c6514093c95..1e85b6cc26a 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -520,14 +520,12 @@ end_recursion: local_irq_restore(flags); } -int reg_prof_syscall_enter(char *name) +int prof_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; - num = syscall_name_to_nr(name); - if (num < 0 || num >= NR_syscalls) - return -ENOSYS; + num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); if (!sys_prof_refcount_enter) @@ -543,13 +541,11 @@ int reg_prof_syscall_enter(char *name) return ret; } -void unreg_prof_syscall_enter(char *name) +void prof_sysenter_disable(struct ftrace_event_call *call) { int num; - num = syscall_name_to_nr(name); - if (num < 0 || num >= NR_syscalls) - return; + num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); sys_prof_refcount_enter--; @@ -625,14 +621,12 @@ end_recursion: local_irq_restore(flags); } -int reg_prof_syscall_exit(char *name) +int prof_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; - num = syscall_name_to_nr(name); - if (num < 0 || num >= NR_syscalls) - return -ENOSYS; + num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); if (!sys_prof_refcount_exit) @@ -648,13 +642,11 @@ int reg_prof_syscall_exit(char *name) return ret; } -void unreg_prof_syscall_exit(char *name) +void prof_sysexit_disable(struct ftrace_event_call *call) { int num; - num = syscall_name_to_nr(name); - if (num < 0 || num >= NR_syscalls) - return; + num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); sys_prof_refcount_exit--; From 7be077f56370cd52c48c08272b0867132f87bc48 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Tue, 1 Dec 2009 16:24:06 +0800 Subject: [PATCH 462/470] trace_syscalls: Remove unused syscall_name_to_nr() After duplications are removed, syscall_name_to_nr() is unused. Signed-off-by: Lai Jiangshan Acked-by: Jason Baron Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <4B14D2A6.6060803@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_syscalls.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 1e85b6cc26a..57501d90096 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -51,22 +51,6 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) return syscalls_metadata[nr]; } -static int syscall_name_to_nr(const char *name) -{ - int i; - - if (!syscalls_metadata) - return -1; - - for (i = 0; i < NR_syscalls; i++) { - if (syscalls_metadata[i]) { - if (!strcmp(syscalls_metadata[i]->name, name)) - return i; - } - } - return -1; -} - enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags) { From c19e33aa840e9202ef8d4c93056b59f3edc2208d Mon Sep 17 00:00:00 2001 From: Liming Wang Date: Wed, 2 Dec 2009 14:11:46 +0800 Subject: [PATCH 463/470] perf tools: Fix _GNU_SOURCE macro related strndup() build error strndup is a GNU extension. So dont include string.h without defining _GNU_SOURCE (it results in a compile error otherwise). Remove these includes as util.h does it already. Signed-off-by: Liming Wang Acked-by: Frederic Weisbecker Acked-by: Xiao Guangrong Cc: peterz@infradead.org Cc: mhiramat@redhat.com LKML-Reference: <1259734306-26323-1-git-send-email-liming.wang@windriver.com> Signed-off-by: Ingo Molnar --- tools/perf/util/string.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index 0977cf43178..f24a8cc933d 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -1,5 +1,3 @@ -#include -#include #include "string.h" #include "util.h" From bdad0db7dbdb37d0bb3c7d0f65cd3ff599ea6ecb Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 2 Dec 2009 16:08:41 +0800 Subject: [PATCH 464/470] perf_event: Fix compile error Fix: cc1: warnings being treated as errors builtin-probe.c: In function 'cmd_probe': builtin-probe.c:163: error: unused variable 'fd' Signed-off-by: Xiao Guangrong Cc: Masami Hiramatsu Cc: Peter Zijlstra LKML-Reference: <4B162089.8000907@cn.fujitsu.com> [ v2: use NO_LIBDWARF instead of __used ] Signed-off-by: Ingo Molnar --- tools/perf/builtin-probe.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index b5d15cf2547..a58e11b7ea8 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -160,7 +160,10 @@ static const struct option options[] = { int cmd_probe(int argc, const char **argv, const char *prefix __used) { - int i, j, fd, ret; + int i, j, ret; +#ifndef NO_LIBDWARF + int fd; +#endif struct probe_point *pp; argc = parse_options(argc, argv, options, probe_usage, From ec70ccd806111ba3caf596def91a8580138b12db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 1 Dec 2009 15:05:01 -0500 Subject: [PATCH 465/470] perf: Don't free perf_mmap_data until work has been done MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the CONFIG_PERF_USE_VMALLOC case, perf_mmap_data_free() only schedules the cleanup of the perf_mmap_data struct. In that case we have to wait until the work has been done before we free data. Signed-off-by: Kristian Høgsberg Cc: David S. Miller Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Frederic Weisbecker Cc: LKML-Reference: <1259697901-1747-1-git-send-email-krh@bitplanet.net> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 040ee517c80..6b7ddba1dd6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2210,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) perf_mmap_free_page((unsigned long)data->user_page); for (i = 0; i < data->nr_pages; i++) perf_mmap_free_page((unsigned long)data->data_pages[i]); + kfree(data); } #else @@ -2250,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work) perf_mmap_unmark_page(base + (i * PAGE_SIZE)); vfree(base); + kfree(data); } static void perf_mmap_data_free(struct perf_mmap_data *data) @@ -2355,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) data = container_of(rcu_head, struct perf_mmap_data, rcu_head); perf_mmap_data_free(data); - kfree(data); } static void perf_mmap_data_release(struct perf_event *event) From 3a9089fd78367e2c6c815129030b790a0f5c2715 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Tue, 1 Dec 2009 12:18:49 -0500 Subject: [PATCH 466/470] tracing: Add DEFINE_EVENT(), DEFINE_SINGLE_EVENT() support to docbook The introduction of the new 'DECLARE_EVENT_CLASS()' obviates the need for the 'TRACE_EVENT()' macro in some cases. Thus, docbook style comments that used to live with 'TRACE_EVENT()' are now moved to 'DEFINE_EVENT()'. Thus, we need to make the docbook system understand the new 'DEFINE_EVENT()' macro. In addition I've tried to futureproof the patch, by also adding support for 'DEFINE_SINGLE_EVENT()', since there has been discussion about renaming: TRACE_EVENT() -> DEFINE_SINGLE_EVENT(). Without this patch the tracepoint docbook fails to build. I've verified that this patch correctly builds the tracepoint docbook which currently covers signals, and irqs. Changes in v2: - properly indent perl 'if' statements Signed-off-by: Jason Baron Acked-by: Steven Rostedt Acked-by: Randy Dunlap Cc: William Cohen Cc: Frederic Weisbecker Cc: Mathieu Desnoyers Cc: Masami Hiramatsu LKML-Reference: <200912011718.nB1HIn7t011371@int-mx04.intmail.prod.int.phx2.redhat.com> Signed-off-by: Ingo Molnar --- scripts/kernel-doc | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/scripts/kernel-doc b/scripts/kernel-doc index ea9f8a58678..241310e59cd 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1852,10 +1852,17 @@ sub tracepoint_munge($) { my $tracepointname = 0; my $tracepointargs = 0; - if($prototype =~ m/TRACE_EVENT\((.*?),/) { + if ($prototype =~ m/TRACE_EVENT\((.*?),/) { $tracepointname = $1; } - if($prototype =~ m/TP_PROTO\((.*?)\)/) { + if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) { + $tracepointname = $1; + } + if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) { + $tracepointname = $2; + } + $tracepointname =~ s/^\s+//; #strip leading whitespace + if ($prototype =~ m/TP_PROTO\((.*?)\)/) { $tracepointargs = $1; } if (($tracepointname eq 0) || ($tracepointargs eq 0)) { @@ -1920,7 +1927,9 @@ sub process_state3_function($$) { if ($prototype =~ /SYSCALL_DEFINE/) { syscall_munge(); } - if ($prototype =~ /TRACE_EVENT/) { + if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ || + $prototype =~ /DEFINE_SINGLE_EVENT/) + { tracepoint_munge($file); } dump_function($prototype, $file); From 6b62fe019e39edfd1dbe3f224ecd0a87d9365223 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 2 Dec 2009 07:23:10 +0100 Subject: [PATCH 467/470] tracing/syscalls: Make syscall events print callbacks static enter_syscall_print_##sname and exit_syscall_print_##sname don't need to have a global scope. Make them static. Signed-off-by: Frederic Weisbecker Cc: Steven Rostedt Cc: Jason Baron Cc: Lai Jiangshan LKML-Reference: <1259734990-9034-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/syscalls.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c2df3a59323..e79e2f3ccc5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -134,7 +134,7 @@ struct perf_event_attr; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_enter_##sname; \ - struct trace_event enter_syscall_print_##sname = { \ + static struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ static struct ftrace_event_call __used \ @@ -156,7 +156,7 @@ struct perf_event_attr; #define SYSCALL_TRACE_EXIT_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_exit_##sname; \ - struct trace_event exit_syscall_print_##sname = { \ + static struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ static struct ftrace_event_call __used \ From 1cedae72904b85462082dbcfd5190309ba37f8bd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 2 Dec 2009 07:32:16 +0100 Subject: [PATCH 468/470] hw-breakpoints: Keep track of user disabled breakpoints When we disable a breakpoint through dr7, we unregister it right away, making us lose track of its corresponding address register value. It means that the following sequence would be unsupported: - set address in dr0 - enable it through dr7 - disable it through dr7 - enable it through dr7 because we lost the address register value when we disabled the breakpoint. Don't unregister the disabled breakpoints but rather disable them. Reported-by: "K.Prasad" Signed-off-by: Frederic Weisbecker LKML-Reference: <1259735536-9236-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 2941b32ea66..04d182a7cfd 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -595,7 +595,7 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) static struct perf_event * ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, - struct task_struct *tsk) + struct task_struct *tsk, int disabled) { int err; int gen_len, gen_type; @@ -616,7 +616,7 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, attr = bp->attr; attr.bp_len = gen_len; attr.bp_type = gen_type; - attr.disabled = 0; + attr.disabled = disabled; return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk); } @@ -655,13 +655,21 @@ restore: */ if (!second_pass) continue; + thread->ptrace_bps[i] = NULL; - unregister_hw_breakpoint(bp); + bp = ptrace_modify_breakpoint(bp, len, type, + tsk, 1); + if (IS_ERR(bp)) { + rc = PTR_ERR(bp); + thread->ptrace_bps[i] = NULL; + break; + } + thread->ptrace_bps[i] = bp; } continue; } - bp = ptrace_modify_breakpoint(bp, len, type, tsk); + bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0); /* Incorrect bp, or we have a bug in bp API */ if (IS_ERR(bp)) { From 93aaa45a6ad3f983180223601fc663cc551ad499 Mon Sep 17 00:00:00 2001 From: Liming Wang Date: Wed, 2 Dec 2009 16:42:54 +0800 Subject: [PATCH 469/470] perf tools: Replace %m with %a in sscanf Not all glibc support %m and it results in a compile error if %m not supported. Replace it with %a and (float *) casts. Signed-off-by: Liming Wang Acked-by: Frederic Weisbecker Cc: peterz@infradead.org Cc: mhiramat@redhat.com LKML-Reference: <1259743374-9950-1-git-send-email-liming.wang@windriver.com> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e42f3acc9a7..cd7fbda5e2a 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -199,8 +199,8 @@ void parse_trace_kprobe_event(const char *str, char **group, char **event, semantic_error("Too less arguments."); /* Scan event and group name. */ - ret = sscanf(argv[0], "%c:%m[^/ \t]/%m[^ \t]", - &pr, group, event); + ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", + &pr, (float *)(void *)group, (float *)(void *)event); if (ret != 3) semantic_error("Failed to parse event name: %s", argv[0]); pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr); @@ -211,7 +211,7 @@ void parse_trace_kprobe_event(const char *str, char **group, char **event, pp->retprobe = (pr == 'r'); /* Scan function name and offset */ - ret = sscanf(argv[1], "%m[^+]+%d", &pp->function, &pp->offset); + ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset); if (ret == 1) pp->offset = 0; From e859cf8656043f158b4004ccc8cbbf1ba4f97177 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Mon, 30 Nov 2009 19:02:22 -0500 Subject: [PATCH 470/470] x86: Fix comments of register/stack access functions Fix typos and some redundant comments of register/stack access functions in asm/ptrace.h. Signed-off-by: Masami Hiramatsu Cc: systemtap Cc: DLE Cc: Frederic Weisbecker Cc: Roland McGrath Cc: Oleg Nesterov Cc: Wenji Huang Cc: Mahesh J Salgaonkar LKML-Reference: <20091201000222.7669.7477.stgit@harusame> Signed-off-by: Ingo Molnar Suggested-by: Wenji Huang --- arch/x86/include/asm/ptrace.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index a3d49dd7d26..3d11fd0f44c 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -227,8 +227,8 @@ extern const char *regs_query_register_name(unsigned int offset); * @regs: pt_regs from which register value is gotten. * @offset: offset number of the register. * - * regs_get_register returns the value of a register whose offset from @regs - * is @offset. The @offset is the offset of the register in struct pt_regs. + * regs_get_register returns the value of a register. The @offset is the + * offset of the register in struct pt_regs address which specified by @regs. * If @offset is bigger than MAX_REG_OFFSET, this returns 0. */ static inline unsigned long regs_get_register(struct pt_regs *regs, @@ -244,7 +244,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * - * regs_within_kenel_stack() checks @addr is within the kernel stack page(s). + * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ static inline int regs_within_kernel_stack(struct pt_regs *regs, @@ -260,7 +260,7 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs, * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,