mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: Tidy up style details
- whitespace fixlets - make local variable definitions more consistent [ Impact: cleanup ] Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
880ca15adf
commit
22a4f650d6
2 changed files with 22 additions and 19 deletions
|
@ -562,7 +562,7 @@ struct perf_cpu_context {
|
||||||
*
|
*
|
||||||
* task, softirq, irq, nmi context
|
* task, softirq, irq, nmi context
|
||||||
*/
|
*/
|
||||||
int recursion[4];
|
int recursion[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_COUNTERS
|
#ifdef CONFIG_PERF_COUNTERS
|
||||||
|
|
|
@ -16,8 +16,9 @@
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/poll.h>
|
#include <linux/poll.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/dcache.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/ptrace.h>
|
||||||
#include <linux/vmstat.h>
|
#include <linux/vmstat.h>
|
||||||
#include <linux/hardirq.h>
|
#include <linux/hardirq.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
|
@ -26,7 +27,6 @@
|
||||||
#include <linux/anon_inodes.h>
|
#include <linux/anon_inodes.h>
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
#include <linux/perf_counter.h>
|
#include <linux/perf_counter.h>
|
||||||
#include <linux/dcache.h>
|
|
||||||
|
|
||||||
#include <asm/irq_regs.h>
|
#include <asm/irq_regs.h>
|
||||||
|
|
||||||
|
@ -65,7 +65,9 @@ void __weak hw_perf_disable(void) { barrier(); }
|
||||||
void __weak hw_perf_enable(void) { barrier(); }
|
void __weak hw_perf_enable(void) { barrier(); }
|
||||||
|
|
||||||
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
|
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
|
||||||
int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
||||||
|
int __weak
|
||||||
|
hw_perf_group_sched_in(struct perf_counter *group_leader,
|
||||||
struct perf_cpu_context *cpuctx,
|
struct perf_cpu_context *cpuctx,
|
||||||
struct perf_counter_context *ctx, int cpu)
|
struct perf_counter_context *ctx, int cpu)
|
||||||
{
|
{
|
||||||
|
@ -127,8 +129,8 @@ static void put_ctx(struct perf_counter_context *ctx)
|
||||||
* This has to cope with with the fact that until it is locked,
|
* This has to cope with with the fact that until it is locked,
|
||||||
* the context could get moved to another task.
|
* the context could get moved to another task.
|
||||||
*/
|
*/
|
||||||
static struct perf_counter_context *perf_lock_task_context(
|
static struct perf_counter_context *
|
||||||
struct task_struct *task, unsigned long *flags)
|
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
|
||||||
{
|
{
|
||||||
struct perf_counter_context *ctx;
|
struct perf_counter_context *ctx;
|
||||||
|
|
||||||
|
@ -1330,9 +1332,9 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
|
||||||
|
|
||||||
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
||||||
{
|
{
|
||||||
struct perf_cpu_context *cpuctx;
|
|
||||||
struct perf_counter_context *ctx;
|
|
||||||
struct perf_counter_context *parent_ctx;
|
struct perf_counter_context *parent_ctx;
|
||||||
|
struct perf_counter_context *ctx;
|
||||||
|
struct perf_cpu_context *cpuctx;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err;
|
int err;
|
||||||
|
@ -1664,8 +1666,8 @@ int perf_counter_task_disable(void)
|
||||||
*/
|
*/
|
||||||
void perf_counter_update_userpage(struct perf_counter *counter)
|
void perf_counter_update_userpage(struct perf_counter *counter)
|
||||||
{
|
{
|
||||||
struct perf_mmap_data *data;
|
|
||||||
struct perf_counter_mmap_page *userpg;
|
struct perf_counter_mmap_page *userpg;
|
||||||
|
struct perf_mmap_data *data;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
data = rcu_dereference(counter->data);
|
data = rcu_dereference(counter->data);
|
||||||
|
@ -1769,10 +1771,11 @@ fail:
|
||||||
|
|
||||||
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
|
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
|
||||||
{
|
{
|
||||||
struct perf_mmap_data *data = container_of(rcu_head,
|
struct perf_mmap_data *data;
|
||||||
struct perf_mmap_data, rcu_head);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
|
||||||
|
|
||||||
free_page((unsigned long)data->user_page);
|
free_page((unsigned long)data->user_page);
|
||||||
for (i = 0; i < data->nr_pages; i++)
|
for (i = 0; i < data->nr_pages; i++)
|
||||||
free_page((unsigned long)data->data_pages[i]);
|
free_page((unsigned long)data->data_pages[i]);
|
||||||
|
@ -1801,8 +1804,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||||
struct perf_counter *counter = vma->vm_file->private_data;
|
struct perf_counter *counter = vma->vm_file->private_data;
|
||||||
|
|
||||||
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
||||||
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
|
if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
|
||||||
&counter->mmap_mutex)) {
|
|
||||||
struct user_struct *user = current_user();
|
struct user_struct *user = current_user();
|
||||||
|
|
||||||
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
|
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
|
||||||
|
@ -1821,11 +1823,11 @@ static struct vm_operations_struct perf_mmap_vmops = {
|
||||||
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct perf_counter *counter = file->private_data;
|
struct perf_counter *counter = file->private_data;
|
||||||
|
unsigned long user_locked, user_lock_limit;
|
||||||
struct user_struct *user = current_user();
|
struct user_struct *user = current_user();
|
||||||
|
unsigned long locked, lock_limit;
|
||||||
unsigned long vma_size;
|
unsigned long vma_size;
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
unsigned long user_locked, user_lock_limit;
|
|
||||||
unsigned long locked, lock_limit;
|
|
||||||
long user_extra, extra;
|
long user_extra, extra;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
@ -1900,8 +1902,8 @@ unlock:
|
||||||
|
|
||||||
static int perf_fasync(int fd, struct file *filp, int on)
|
static int perf_fasync(int fd, struct file *filp, int on)
|
||||||
{
|
{
|
||||||
struct perf_counter *counter = filp->private_data;
|
|
||||||
struct inode *inode = filp->f_path.dentry->d_inode;
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
||||||
|
struct perf_counter *counter = filp->private_data;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
|
@ -2412,8 +2414,8 @@ static void perf_counter_output(struct perf_counter *counter,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct perf_comm_event {
|
struct perf_comm_event {
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
char *comm;
|
char *comm;
|
||||||
int comm_size;
|
int comm_size;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
|
@ -2932,6 +2934,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
|
||||||
int nmi, struct pt_regs *regs, u64 addr)
|
int nmi, struct pt_regs *regs, u64 addr)
|
||||||
{
|
{
|
||||||
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
||||||
|
|
||||||
if (counter->hw.irq_period && !neg)
|
if (counter->hw.irq_period && !neg)
|
||||||
perf_swcounter_overflow(counter, nmi, regs, addr);
|
perf_swcounter_overflow(counter, nmi, regs, addr);
|
||||||
}
|
}
|
||||||
|
@ -3526,7 +3529,7 @@ inherit_counter(struct perf_counter *parent_counter,
|
||||||
/*
|
/*
|
||||||
* Make the child state follow the state of the parent counter,
|
* Make the child state follow the state of the parent counter,
|
||||||
* not its hw_event.disabled bit. We hold the parent's mutex,
|
* not its hw_event.disabled bit. We hold the parent's mutex,
|
||||||
* so we won't race with perf_counter_{en,dis}able_family.
|
* so we won't race with perf_counter_{en, dis}able_family.
|
||||||
*/
|
*/
|
||||||
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
||||||
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||||
|
|
Loading…
Reference in a new issue