2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Performance counter core code
|
|
|
|
|
*
|
2009-04-29 12:52:50 +00:00
|
|
|
|
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
|
|
|
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
2009-04-29 23:48:16 +00:00
|
|
|
|
* Copyright <EFBFBD> 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
2009-03-23 17:22:10 +00:00
|
|
|
|
*
|
|
|
|
|
* For licensing details see kernel-base/COPYING
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
2009-03-25 11:30:22 +00:00
|
|
|
|
#include <linux/mm.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
|
#include <linux/smp.h>
|
2008-12-11 07:38:42 +00:00
|
|
|
|
#include <linux/file.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#include <linux/poll.h>
|
|
|
|
|
#include <linux/sysfs.h>
|
2009-06-01 08:13:37 +00:00
|
|
|
|
#include <linux/dcache.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#include <linux/percpu.h>
|
2009-06-01 08:13:37 +00:00
|
|
|
|
#include <linux/ptrace.h>
|
2009-03-25 11:30:22 +00:00
|
|
|
|
#include <linux/vmstat.h>
|
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
|
#include <linux/rculist.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
|
#include <linux/anon_inodes.h>
|
2008-12-17 13:10:57 +00:00
|
|
|
|
#include <linux/kernel_stat.h>
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#include <linux/perf_counter.h>
|
|
|
|
|
|
2009-03-14 13:29:25 +00:00
|
|
|
|
#include <asm/irq_regs.h>
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Each CPU has a list of per CPU counters:
|
|
|
|
|
*/
|
|
|
|
|
DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
|
|
|
|
|
|
2008-12-14 19:21:00 +00:00
|
|
|
|
int perf_max_counters __read_mostly = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
static int perf_reserved_percpu __read_mostly;
|
|
|
|
|
static int perf_overcommit __read_mostly = 1;
|
|
|
|
|
|
2009-05-08 16:52:21 +00:00
|
|
|
|
static atomic_t nr_counters __read_mostly;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
static atomic_t nr_mmap_counters __read_mostly;
|
|
|
|
|
static atomic_t nr_comm_counters __read_mostly;
|
2009-07-23 12:46:33 +00:00
|
|
|
|
static atomic_t nr_task_counters __read_mostly;
|
2009-04-09 08:53:44 +00:00
|
|
|
|
|
2009-06-11 09:18:36 +00:00
|
|
|
|
/*
|
2009-06-11 09:25:05 +00:00
|
|
|
|
* perf counter paranoia level:
|
|
|
|
|
* 0 - not paranoid
|
|
|
|
|
* 1 - disallow cpu counters to unpriv
|
|
|
|
|
* 2 - disallow kernel profiling to unpriv
|
2009-06-11 09:18:36 +00:00
|
|
|
|
*/
|
2009-06-11 09:25:05 +00:00
|
|
|
|
int sysctl_perf_counter_paranoid __read_mostly;
|
2009-06-11 09:18:36 +00:00
|
|
|
|
|
|
|
|
|
static inline bool perf_paranoid_cpu(void)
|
|
|
|
|
{
|
|
|
|
|
return sysctl_perf_counter_paranoid > 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline bool perf_paranoid_kernel(void)
|
|
|
|
|
{
|
|
|
|
|
return sysctl_perf_counter_paranoid > 1;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-15 13:19:27 +00:00
|
|
|
|
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
|
2009-06-11 09:25:05 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* max perf counter sample rate
|
|
|
|
|
*/
|
|
|
|
|
int sysctl_perf_counter_sample_rate __read_mostly = 100000;
|
2009-04-09 08:53:45 +00:00
|
|
|
|
|
2009-06-03 12:01:36 +00:00
|
|
|
|
static atomic64_t perf_counter_id;
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
2009-05-04 17:23:18 +00:00
|
|
|
|
* Lock for (sysadmin-configurable) counter reservations:
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
2009-05-04 17:23:18 +00:00
|
|
|
|
static DEFINE_SPINLOCK(perf_resource_lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Architecture provided APIs - weak aliases:
|
|
|
|
|
*/
|
2009-04-29 10:47:03 +00:00
|
|
|
|
extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
2009-01-09 05:19:25 +00:00
|
|
|
|
return NULL;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-13 14:21:38 +00:00
|
|
|
|
void __weak hw_perf_disable(void) { barrier(); }
|
|
|
|
|
void __weak hw_perf_enable(void) { barrier(); }
|
|
|
|
|
|
2009-01-14 02:44:19 +00:00
|
|
|
|
void __weak hw_perf_counter_setup(int cpu) { barrier(); }
|
2009-08-13 08:13:22 +00:00
|
|
|
|
void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
|
2009-06-01 08:13:37 +00:00
|
|
|
|
|
|
|
|
|
int __weak
|
|
|
|
|
hw_perf_group_sched_in(struct perf_counter *group_leader,
|
2009-01-09 05:43:42 +00:00
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
struct perf_counter_context *ctx, int cpu)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-01-09 06:24:34 +00:00
|
|
|
|
void __weak perf_counter_print_debug(void) { }
|
|
|
|
|
|
2009-05-13 14:21:38 +00:00
|
|
|
|
static DEFINE_PER_CPU(int, disable_count);
|
|
|
|
|
|
|
|
|
|
void __perf_disable(void)
|
|
|
|
|
{
|
|
|
|
|
__get_cpu_var(disable_count)++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool __perf_enable(void)
|
|
|
|
|
{
|
|
|
|
|
return !--__get_cpu_var(disable_count);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_disable(void)
|
|
|
|
|
{
|
|
|
|
|
__perf_disable();
|
|
|
|
|
hw_perf_disable();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_enable(void)
|
|
|
|
|
{
|
|
|
|
|
if (__perf_enable())
|
|
|
|
|
hw_perf_enable();
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
static void get_ctx(struct perf_counter_context *ctx)
|
|
|
|
|
{
|
2009-06-19 11:22:51 +00:00
|
|
|
|
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
static void free_ctx(struct rcu_head *head)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
|
|
|
|
|
ctx = container_of(head, struct perf_counter_context, rcu_head);
|
|
|
|
|
kfree(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
static void put_ctx(struct perf_counter_context *ctx)
|
|
|
|
|
{
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
if (atomic_dec_and_test(&ctx->refcount)) {
|
|
|
|
|
if (ctx->parent_ctx)
|
|
|
|
|
put_ctx(ctx->parent_ctx);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
if (ctx->task)
|
|
|
|
|
put_task_struct(ctx->task);
|
|
|
|
|
call_rcu(&ctx->rcu_head, free_ctx);
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
}
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-07-10 07:06:56 +00:00
|
|
|
|
static void unclone_ctx(struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
if (ctx->parent_ctx) {
|
|
|
|
|
put_ctx(ctx->parent_ctx);
|
|
|
|
|
ctx->parent_ctx = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 11:19:40 +00:00
|
|
|
|
/*
|
|
|
|
|
* If we inherit counters we want to return the parent counter id
|
|
|
|
|
* to userspace.
|
|
|
|
|
*/
|
|
|
|
|
static u64 primary_counter_id(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
u64 id = counter->id;
|
|
|
|
|
|
|
|
|
|
if (counter->parent)
|
|
|
|
|
id = counter->parent->id;
|
|
|
|
|
|
|
|
|
|
return id;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-01 07:48:12 +00:00
|
|
|
|
/*
|
|
|
|
|
* Get the perf_counter_context for a task and lock it.
|
|
|
|
|
* This has to cope with with the fact that until it is locked,
|
|
|
|
|
* the context could get moved to another task.
|
|
|
|
|
*/
|
2009-06-01 08:13:37 +00:00
|
|
|
|
static struct perf_counter_context *
|
|
|
|
|
perf_lock_task_context(struct task_struct *task, unsigned long *flags)
|
2009-06-01 07:48:12 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
retry:
|
|
|
|
|
ctx = rcu_dereference(task->perf_counter_ctxp);
|
|
|
|
|
if (ctx) {
|
|
|
|
|
/*
|
|
|
|
|
* If this context is a clone of another, it might
|
|
|
|
|
* get swapped for another underneath us by
|
|
|
|
|
* perf_counter_task_sched_out, though the
|
|
|
|
|
* rcu_read_lock() protects us from any context
|
|
|
|
|
* getting freed. Lock the context and check if it
|
|
|
|
|
* got swapped before we could get the lock, and retry
|
|
|
|
|
* if so. If we locked the right context, then it
|
|
|
|
|
* can't get swapped on us any more.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock_irqsave(&ctx->lock, *flags);
|
|
|
|
|
if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
|
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, *flags);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
2009-06-19 15:39:33 +00:00
|
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&ctx->refcount)) {
|
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, *flags);
|
|
|
|
|
ctx = NULL;
|
|
|
|
|
}
|
2009-06-01 07:48:12 +00:00
|
|
|
|
}
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
return ctx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get the context for a task and increment its pin_count so it
|
|
|
|
|
* can't get swapped to another task. This also increments its
|
|
|
|
|
* reference count so that the context can't get freed.
|
|
|
|
|
*/
|
|
|
|
|
static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
ctx = perf_lock_task_context(task, &flags);
|
|
|
|
|
if (ctx) {
|
|
|
|
|
++ctx->pin_count;
|
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
|
}
|
|
|
|
|
return ctx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_unpin_context(struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctx->lock, flags);
|
|
|
|
|
--ctx->pin_count;
|
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
|
put_ctx(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-23 16:28:56 +00:00
|
|
|
|
/*
|
|
|
|
|
* Add a counter from the lists for its context.
|
|
|
|
|
* Must be called with ctx->mutex and ctx->lock held.
|
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void
|
|
|
|
|
list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *group_leader = counter->group_leader;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Depending on whether it is a standalone or sibling counter,
|
|
|
|
|
* add it straight to the context's counter list, or to the group
|
|
|
|
|
* leader's sibling list:
|
|
|
|
|
*/
|
2009-05-08 16:52:22 +00:00
|
|
|
|
if (group_leader == counter)
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_add_tail(&counter->list_entry, &ctx->counter_list);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
else {
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
group_leader->nr_siblings++;
|
|
|
|
|
}
|
2009-03-13 11:21:36 +00:00
|
|
|
|
|
|
|
|
|
list_add_rcu(&counter->event_entry, &ctx->event_list);
|
2009-05-15 18:45:59 +00:00
|
|
|
|
ctx->nr_counters++;
|
2009-06-24 19:11:59 +00:00
|
|
|
|
if (counter->attr.inherit_stat)
|
|
|
|
|
ctx->nr_stat++;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
/*
|
|
|
|
|
* Remove a counter from the lists for its context.
|
2009-05-23 16:28:56 +00:00
|
|
|
|
* Must be called with ctx->mutex and ctx->lock held.
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void
|
|
|
|
|
list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *sibling, *tmp;
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (list_empty(&counter->list_entry))
|
|
|
|
|
return;
|
2009-05-15 18:45:59 +00:00
|
|
|
|
ctx->nr_counters--;
|
2009-06-24 19:11:59 +00:00
|
|
|
|
if (counter->attr.inherit_stat)
|
|
|
|
|
ctx->nr_stat--;
|
2009-05-15 18:45:59 +00:00
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_del_init(&counter->list_entry);
|
2009-03-13 11:21:36 +00:00
|
|
|
|
list_del_rcu(&counter->event_entry);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
2009-03-25 11:30:23 +00:00
|
|
|
|
if (counter->group_leader != counter)
|
|
|
|
|
counter->group_leader->nr_siblings--;
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
/*
|
|
|
|
|
* If this was a group counter with sibling counters then
|
|
|
|
|
* upgrade the siblings to singleton counters by adding them
|
|
|
|
|
* to the context list directly:
|
|
|
|
|
*/
|
|
|
|
|
list_for_each_entry_safe(sibling, tmp,
|
|
|
|
|
&counter->sibling_list, list_entry) {
|
|
|
|
|
|
2009-03-13 11:21:29 +00:00
|
|
|
|
list_move_tail(&sibling->list_entry, &ctx->counter_list);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
sibling->group_leader = sibling;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
static void
|
|
|
|
|
counter_sched_out(struct perf_counter *counter,
|
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2009-08-13 09:47:54 +00:00
|
|
|
|
if (counter->pending_disable) {
|
|
|
|
|
counter->pending_disable = 0;
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
}
|
2009-04-06 09:45:10 +00:00
|
|
|
|
counter->tstamp_stopped = ctx->time;
|
2009-04-29 10:47:03 +00:00
|
|
|
|
counter->pmu->disable(counter);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
counter->oncpu = -1;
|
|
|
|
|
|
|
|
|
|
if (!is_software_counter(counter))
|
|
|
|
|
cpuctx->active_oncpu--;
|
|
|
|
|
ctx->nr_active--;
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.exclusive || !cpuctx->active_oncpu)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
cpuctx->exclusive = 0;
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
static void
|
|
|
|
|
group_sched_out(struct perf_counter *group_counter,
|
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
counter_sched_out(group_counter, cpuctx, ctx);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Schedule out siblings (if any):
|
|
|
|
|
*/
|
|
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
|
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (group_counter->attr.exclusive)
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
cpuctx->exclusive = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Cross CPU call to remove a performance counter
|
|
|
|
|
*
|
|
|
|
|
* We disable the counter on the hardware level first. After that we
|
|
|
|
|
* remove it from the context list.
|
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void __perf_counter_remove_from_context(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
|
* scheduled out before the smp call arrived.
|
|
|
|
|
*/
|
2009-05-29 12:51:57 +00:00
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-05-29 09:25:09 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
2009-05-20 18:13:28 +00:00
|
|
|
|
/*
|
|
|
|
|
* Protect the list operation against NMI by disabling the
|
|
|
|
|
* counters on a global level.
|
|
|
|
|
*/
|
|
|
|
|
perf_disable();
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_del_counter(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
if (!ctx->task) {
|
|
|
|
|
/*
|
|
|
|
|
* Allow more per task counters with respect to the
|
|
|
|
|
* reservation:
|
|
|
|
|
*/
|
|
|
|
|
cpuctx->max_pertask =
|
|
|
|
|
min(perf_max_counters - ctx->nr_counters,
|
|
|
|
|
perf_max_counters - perf_reserved_percpu);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-20 18:13:28 +00:00
|
|
|
|
perf_enable();
|
2009-05-29 12:51:57 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Remove the counter from a task's (or a CPU's) list of counters.
|
|
|
|
|
*
|
2009-05-23 16:28:56 +00:00
|
|
|
|
* Must be called with ctx->mutex held.
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*
|
|
|
|
|
* CPU counters are removed with a smp call. For task counters we only
|
|
|
|
|
* call when the task is on a CPU.
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
*
|
|
|
|
|
* If counter->ctx is a cloned context, callers must make sure that
|
|
|
|
|
* every task struct that counter->ctx->task could possibly point to
|
|
|
|
|
* remains valid. This is OK when called from perf_release since
|
|
|
|
|
* that only calls us on the top-level context, which can't be a clone.
|
|
|
|
|
* When called from perf_counter_exit_task, it's OK because the
|
|
|
|
|
* context has been detached from its task.
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void perf_counter_remove_from_context(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
|
/*
|
|
|
|
|
* Per cpu counters are removed via an smp call and
|
|
|
|
|
* the removal is always sucessful.
|
|
|
|
|
*/
|
|
|
|
|
smp_call_function_single(counter->cpu,
|
2008-12-11 07:38:42 +00:00
|
|
|
|
__perf_counter_remove_from_context,
|
2008-12-04 19:12:29 +00:00
|
|
|
|
counter, 1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
retry:
|
2008-12-11 07:38:42 +00:00
|
|
|
|
task_oncpu_function_call(task, __perf_counter_remove_from_context,
|
2008-12-04 19:12:29 +00:00
|
|
|
|
counter);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
/*
|
|
|
|
|
* If the context is active we need to retry the smp call.
|
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
if (ctx->nr_active && !list_empty(&counter->list_entry)) {
|
2008-12-04 19:12:29 +00:00
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The lock prevents that this context is scheduled in so we
|
2008-12-11 07:38:42 +00:00
|
|
|
|
* can remove the counter safely, if the call above did not
|
2008-12-04 19:12:29 +00:00
|
|
|
|
* succeed.
|
|
|
|
|
*/
|
2008-12-11 07:38:42 +00:00
|
|
|
|
if (!list_empty(&counter->list_entry)) {
|
|
|
|
|
list_del_counter(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:10 +00:00
|
|
|
|
static inline u64 perf_clock(void)
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
{
|
2009-04-06 09:45:10 +00:00
|
|
|
|
return cpu_clock(smp_processor_id());
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Update the record of the current time in a context.
|
|
|
|
|
*/
|
2009-04-06 09:45:10 +00:00
|
|
|
|
static void update_context_time(struct perf_counter_context *ctx)
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
{
|
2009-04-06 09:45:10 +00:00
|
|
|
|
u64 now = perf_clock();
|
|
|
|
|
|
|
|
|
|
ctx->time += now - ctx->timestamp;
|
|
|
|
|
ctx->timestamp = now;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Update the total_time_enabled and total_time_running fields for a counter.
|
|
|
|
|
*/
|
|
|
|
|
static void update_counter_times(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
u64 run_end;
|
|
|
|
|
|
2009-04-06 09:45:10 +00:00
|
|
|
|
if (counter->state < PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
|
|
|
|
|
|
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
run_end = counter->tstamp_stopped;
|
|
|
|
|
else
|
|
|
|
|
run_end = ctx->time;
|
|
|
|
|
|
|
|
|
|
counter->total_time_running = run_end - counter->tstamp_running;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Update total_time_enabled and total_time_running for all counters in a group.
|
|
|
|
|
*/
|
|
|
|
|
static void update_group_times(struct perf_counter *leader)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
update_counter_times(leader);
|
|
|
|
|
list_for_each_entry(counter, &leader->sibling_list, list_entry)
|
|
|
|
|
update_counter_times(counter);
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Cross CPU call to disable a performance counter
|
|
|
|
|
*/
|
|
|
|
|
static void __perf_counter_disable(void *info)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If this is a per-task counter, need to check whether this
|
|
|
|
|
* counter's task is the current task on this cpu.
|
|
|
|
|
*/
|
2009-05-29 12:51:57 +00:00
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-05-29 09:25:09 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the counter is on, turn it off.
|
|
|
|
|
* If it is in error state, leave it in error state.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
|
2009-04-06 09:45:10 +00:00
|
|
|
|
update_context_time(ctx);
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
update_counter_times(counter);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (counter == counter->group_leader)
|
|
|
|
|
group_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
else
|
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-29 12:51:57 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Disable a counter.
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
*
|
|
|
|
|
* If counter->ctx is a cloned context, callers must make sure that
|
|
|
|
|
* every task struct that counter->ctx->task could possibly point to
|
|
|
|
|
* remains valid. This condition is satisifed when called through
|
|
|
|
|
* perf_counter_for_each_child or perf_counter_for_each because they
|
|
|
|
|
* hold the top-level counter's child_mutex, so any descendant that
|
|
|
|
|
* goes to exit will block in sync_child_counter.
|
|
|
|
|
* When called from perf_pending_counter it's OK because counter->ctx
|
|
|
|
|
* is the current context on this CPU and preemption is disabled,
|
|
|
|
|
* hence we can't get into perf_counter_task_sched_out for this context.
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
*/
|
|
|
|
|
static void perf_counter_disable(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
|
/*
|
|
|
|
|
* Disable the counter on the cpu that it's on
|
|
|
|
|
*/
|
|
|
|
|
smp_call_function_single(counter->cpu, __perf_counter_disable,
|
|
|
|
|
counter, 1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
retry:
|
|
|
|
|
task_oncpu_function_call(task, __perf_counter_disable, counter);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
/*
|
|
|
|
|
* If the counter is still active, we need to retry the cross-call.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Since we have the lock this context can't be scheduled
|
|
|
|
|
* in, so we can change the state safely.
|
|
|
|
|
*/
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
|
|
|
|
|
update_counter_times(counter);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
static int
|
|
|
|
|
counter_sched_in(struct perf_counter *counter,
|
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
struct perf_counter_context *ctx,
|
|
|
|
|
int cpu)
|
|
|
|
|
{
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF)
|
2008-12-21 13:43:25 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
|
|
|
|
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
|
|
|
|
/*
|
|
|
|
|
* The new state must be visible before we turn it on in the hardware:
|
|
|
|
|
*/
|
|
|
|
|
smp_wmb();
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
if (counter->pmu->enable(counter)) {
|
2008-12-21 13:43:25 +00:00
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
|
counter->oncpu = -1;
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:10 +00:00
|
|
|
|
counter->tstamp_running += ctx->time - counter->tstamp_stopped;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
if (!is_software_counter(counter))
|
|
|
|
|
cpuctx->active_oncpu++;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
ctx->nr_active++;
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.exclusive)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
cpuctx->exclusive = 1;
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-11 02:08:02 +00:00
|
|
|
|
static int
|
|
|
|
|
group_sched_in(struct perf_counter *group_counter,
|
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
struct perf_counter_context *ctx,
|
|
|
|
|
int cpu)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter, *partial_group;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (group_counter->state == PERF_COUNTER_STATE_OFF)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret < 0 ? ret : 0;
|
|
|
|
|
|
|
|
|
|
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Schedule in siblings as one group (if any):
|
|
|
|
|
*/
|
|
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
|
|
|
|
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
|
|
|
|
|
partial_group = counter;
|
|
|
|
|
goto group_error;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
group_error:
|
|
|
|
|
/*
|
|
|
|
|
* Groups can be scheduled in as one unit only, so undo any
|
|
|
|
|
* partial group before returning:
|
|
|
|
|
*/
|
|
|
|
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
|
|
|
|
if (counter == partial_group)
|
|
|
|
|
break;
|
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
}
|
|
|
|
|
counter_sched_out(group_counter, cpuctx, ctx);
|
|
|
|
|
|
|
|
|
|
return -EAGAIN;
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* Return 1 for a group consisting entirely of software counters,
|
|
|
|
|
* 0 if the group contains any hardware counters.
|
|
|
|
|
*/
|
|
|
|
|
static int is_software_only_group(struct perf_counter *leader)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
if (!is_software_counter(leader))
|
|
|
|
|
return 0;
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
list_for_each_entry(counter, &leader->sibling_list, list_entry)
|
|
|
|
|
if (!is_software_counter(counter))
|
|
|
|
|
return 0;
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Work out whether we can put this counter group on the CPU now.
|
|
|
|
|
*/
|
|
|
|
|
static int group_can_go_on(struct perf_counter *counter,
|
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
|
int can_add_hw)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* Groups consisting entirely of software counters can always go on.
|
|
|
|
|
*/
|
|
|
|
|
if (is_software_only_group(counter))
|
|
|
|
|
return 1;
|
|
|
|
|
/*
|
|
|
|
|
* If an exclusive group is already on, no other hardware
|
|
|
|
|
* counters can go on.
|
|
|
|
|
*/
|
|
|
|
|
if (cpuctx->exclusive)
|
|
|
|
|
return 0;
|
|
|
|
|
/*
|
|
|
|
|
* If this group is exclusive and there are already
|
|
|
|
|
* counters on the CPU, it can't go on.
|
|
|
|
|
*/
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.exclusive && cpuctx->active_oncpu)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
return 0;
|
|
|
|
|
/*
|
|
|
|
|
* Otherwise, try to add it if all previous groups were able
|
|
|
|
|
* to go on.
|
|
|
|
|
*/
|
|
|
|
|
return can_add_hw;
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
static void add_counter_to_ctx(struct perf_counter *counter,
|
|
|
|
|
struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
list_add_counter(counter, ctx);
|
2009-04-06 09:45:10 +00:00
|
|
|
|
counter->tstamp_enabled = ctx->time;
|
|
|
|
|
counter->tstamp_running = ctx->time;
|
|
|
|
|
counter->tstamp_stopped = ctx->time;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
2008-12-21 13:43:25 +00:00
|
|
|
|
* Cross CPU call to install and enable a performance counter
|
2009-05-23 16:28:57 +00:00
|
|
|
|
*
|
|
|
|
|
* Must be called with ctx->mutex held
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
|
|
|
|
static void __perf_install_in_context(void *info)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
struct perf_counter *leader = counter->group_leader;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
int cpu = smp_processor_id();
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
int err;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
|
* scheduled out before the smp call arrived.
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
* Or possibly this is the right context but it isn't
|
|
|
|
|
* on this cpu because it had no counters.
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
2009-05-29 12:51:57 +00:00
|
|
|
|
if (cpuctx->task_ctx || ctx->task != current)
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
return;
|
|
|
|
|
cpuctx->task_ctx = ctx;
|
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-05-29 09:25:09 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
ctx->is_active = 1;
|
2009-04-06 09:45:10 +00:00
|
|
|
|
update_context_time(ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Protect the list operation against NMI by disabling the
|
|
|
|
|
* counters on a global level. NOP for non NMI based counters.
|
|
|
|
|
*/
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_disable();
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
add_counter_to_ctx(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Don't put the counter on if it is disabled or if
|
|
|
|
|
* it is in a group and the group isn't on.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
|
|
|
|
|
(leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* An exclusive counter can't go on if there are already active
|
|
|
|
|
* hardware counters, and no hardware counter can go on if there
|
|
|
|
|
* is already an exclusive counter on.
|
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (!group_can_go_on(counter, cpuctx, 1))
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
err = -EEXIST;
|
|
|
|
|
else
|
|
|
|
|
err = counter_sched_in(counter, cpuctx, ctx, cpu);
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (err) {
|
|
|
|
|
/*
|
|
|
|
|
* This counter couldn't go on. If it is in a group
|
|
|
|
|
* then we have to pull the whole group off.
|
|
|
|
|
* If the counter group is pinned then put it in error state.
|
|
|
|
|
*/
|
|
|
|
|
if (leader != counter)
|
|
|
|
|
group_sched_out(leader, cpuctx, ctx);
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (leader->attr.pinned) {
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
update_group_times(leader);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
leader->state = PERF_COUNTER_STATE_ERROR;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
if (!err && !ctx->task && cpuctx->max_pertask)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
cpuctx->max_pertask--;
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
unlock:
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_enable();
|
2008-12-21 13:43:25 +00:00
|
|
|
|
|
2009-05-29 12:51:57 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Attach a performance counter to a context
|
|
|
|
|
*
|
|
|
|
|
* First we add the counter to the list with the hardware enable bit
|
|
|
|
|
* in counter->hw_config cleared.
|
|
|
|
|
*
|
|
|
|
|
* If the counter is attached to a task which is on a CPU we use a smp
|
|
|
|
|
* call to enable it in the task context. The task might have been
|
|
|
|
|
* scheduled away, but we check this in the smp call again.
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
*
|
|
|
|
|
* Must be called with ctx->mutex held.
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
perf_install_in_context(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_counter *counter,
|
|
|
|
|
int cpu)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
|
/*
|
|
|
|
|
* Per cpu counters are installed via an smp call and
|
|
|
|
|
* the install is always sucessful.
|
|
|
|
|
*/
|
|
|
|
|
smp_call_function_single(cpu, __perf_install_in_context,
|
|
|
|
|
counter, 1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
retry:
|
|
|
|
|
task_oncpu_function_call(task, __perf_install_in_context,
|
|
|
|
|
counter);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
/*
|
|
|
|
|
* we need to retry the smp call.
|
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (ctx->is_active && list_empty(&counter->list_entry)) {
|
2008-12-04 19:12:29 +00:00
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The lock prevents that this context is scheduled in so we
|
|
|
|
|
* can add the counter safely, if it the call above did not
|
|
|
|
|
* succeed.
|
|
|
|
|
*/
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
if (list_empty(&counter->list_entry))
|
|
|
|
|
add_counter_to_ctx(counter, ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Cross CPU call to enable a performance counter
|
|
|
|
|
*/
|
|
|
|
|
static void __perf_counter_enable(void *info)
|
2008-12-11 07:38:42 +00:00
|
|
|
|
{
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
struct perf_counter *counter = info;
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
struct perf_counter *leader = counter->group_leader;
|
|
|
|
|
int err;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* If this is a per-task counter, need to check whether this
|
|
|
|
|
* counter's task is the current task on this cpu.
|
|
|
|
|
*/
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx) {
|
2009-05-29 12:51:57 +00:00
|
|
|
|
if (cpuctx->task_ctx || ctx->task != current)
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
return;
|
|
|
|
|
cpuctx->task_ctx = ctx;
|
|
|
|
|
}
|
2009-01-09 05:43:42 +00:00
|
|
|
|
|
2009-05-29 09:25:09 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
ctx->is_active = 1;
|
2009-04-06 09:45:10 +00:00
|
|
|
|
update_context_time(ctx);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
goto unlock;
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2009-04-06 09:45:10 +00:00
|
|
|
|
counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
* If the counter is in a group and isn't the group leader,
|
|
|
|
|
* then don't put it on unless the group is on.
|
2008-12-11 07:38:42 +00:00
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
goto unlock;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
|
2009-05-12 11:59:01 +00:00
|
|
|
|
if (!group_can_go_on(counter, cpuctx, 1)) {
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
err = -EEXIST;
|
2009-05-12 11:59:01 +00:00
|
|
|
|
} else {
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_disable();
|
2009-05-12 11:59:01 +00:00
|
|
|
|
if (counter == leader)
|
|
|
|
|
err = group_sched_in(counter, cpuctx, ctx,
|
|
|
|
|
smp_processor_id());
|
|
|
|
|
else
|
|
|
|
|
err = counter_sched_in(counter, cpuctx, ctx,
|
|
|
|
|
smp_processor_id());
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_enable();
|
2009-05-12 11:59:01 +00:00
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
|
/*
|
|
|
|
|
* If this counter can't go on and it's part of a
|
|
|
|
|
* group, then the whole group has to come off.
|
|
|
|
|
*/
|
|
|
|
|
if (leader != counter)
|
|
|
|
|
group_sched_out(leader, cpuctx, ctx);
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (leader->attr.pinned) {
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
update_group_times(leader);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
leader->state = PERF_COUNTER_STATE_ERROR;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unlock:
|
2009-05-29 12:51:57 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Enable a counter.
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
*
|
|
|
|
|
* If counter->ctx is a cloned context, callers must make sure that
|
|
|
|
|
* every task struct that counter->ctx->task could possibly point to
|
|
|
|
|
* remains valid. This condition is satisfied when called through
|
|
|
|
|
* perf_counter_for_each_child or perf_counter_for_each as described
|
|
|
|
|
* for perf_counter_disable.
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
*/
|
|
|
|
|
static void perf_counter_enable(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
struct task_struct *task = ctx->task;
|
|
|
|
|
|
|
|
|
|
if (!task) {
|
|
|
|
|
/*
|
|
|
|
|
* Enable the counter on the cpu that it's on
|
|
|
|
|
*/
|
|
|
|
|
smp_call_function_single(counter->cpu, __perf_counter_enable,
|
|
|
|
|
counter, 1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the counter is in error state, clear that first.
|
|
|
|
|
* That way, if we see the counter in error state below, we
|
|
|
|
|
* know that it has gone back into error state, as distinct
|
|
|
|
|
* from the task having been scheduled away before the
|
|
|
|
|
* cross-call arrived.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
|
|
|
|
retry:
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
task_oncpu_function_call(task, __perf_counter_enable, counter);
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the context is active and the counter is still off,
|
|
|
|
|
* we need to retry the cross-call.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
|
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Since we have the lock this context can't be scheduled
|
|
|
|
|
* in, so we can change the state safely.
|
|
|
|
|
*/
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_OFF) {
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2009-04-06 09:45:10 +00:00
|
|
|
|
counter->tstamp_enabled =
|
|
|
|
|
ctx->time - counter->total_time_enabled;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
out:
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-05 15:50:26 +00:00
|
|
|
|
static int perf_counter_refresh(struct perf_counter *counter, int refresh)
|
2009-04-06 09:45:07 +00:00
|
|
|
|
{
|
2009-05-05 15:50:26 +00:00
|
|
|
|
/*
|
|
|
|
|
* not supported on inherited counters
|
|
|
|
|
*/
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.inherit)
|
2009-05-05 15:50:26 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-04-06 09:45:07 +00:00
|
|
|
|
atomic_add(refresh, &counter->event_limit);
|
|
|
|
|
perf_counter_enable(counter);
|
2009-05-05 15:50:26 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
2009-04-06 09:45:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
void __perf_counter_sched_out(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_cpu_context *cpuctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
ctx->is_active = 0;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
if (likely(!ctx->nr_counters))
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
goto out;
|
2009-04-06 09:45:10 +00:00
|
|
|
|
update_context_time(ctx);
|
2008-12-21 13:43:25 +00:00
|
|
|
|
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_disable();
|
2008-12-21 13:43:25 +00:00
|
|
|
|
if (ctx->nr_active) {
|
2009-05-20 10:21:22 +00:00
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
|
if (counter != counter->group_leader)
|
|
|
|
|
counter_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
else
|
|
|
|
|
group_sched_out(counter, cpuctx, ctx);
|
|
|
|
|
}
|
2008-12-21 13:43:25 +00:00
|
|
|
|
}
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_enable();
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
out:
|
2008-12-21 13:43:25 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Test whether two contexts are equivalent, i.e. whether they
|
|
|
|
|
* have both been cloned from the same version of the same context
|
|
|
|
|
* and they both have the same number of enabled counters.
|
|
|
|
|
* If the number of enabled counters is the same, then the set
|
|
|
|
|
* of enabled counters should be the same, because these are both
|
|
|
|
|
* inherited contexts, therefore we can't access individual counters
|
|
|
|
|
* in them directly with an fd; we can only enable/disable all
|
|
|
|
|
* counters via prctl, or enable/disable all counters in a family
|
|
|
|
|
* via ioctl, which will have the same effect on both contexts.
|
|
|
|
|
*/
|
|
|
|
|
static int context_equiv(struct perf_counter_context *ctx1,
|
|
|
|
|
struct perf_counter_context *ctx2)
|
|
|
|
|
{
|
|
|
|
|
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
|
2009-05-29 06:06:20 +00:00
|
|
|
|
&& ctx1->parent_gen == ctx2->parent_gen
|
2009-06-01 07:48:12 +00:00
|
|
|
|
&& !ctx1->pin_count && !ctx2->pin_count;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-24 19:11:59 +00:00
|
|
|
|
static void __perf_counter_read(void *counter);
|
|
|
|
|
|
|
|
|
|
static void __perf_counter_sync_stat(struct perf_counter *counter,
|
|
|
|
|
struct perf_counter *next_counter)
|
|
|
|
|
{
|
|
|
|
|
u64 value;
|
|
|
|
|
|
|
|
|
|
if (!counter->attr.inherit_stat)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Update the counter value, we cannot use perf_counter_read()
|
|
|
|
|
* because we're in the middle of a context switch and have IRQs
|
|
|
|
|
* disabled, which upsets smp_call_function_single(), however
|
|
|
|
|
* we know the counter must be on the current CPU, therefore we
|
|
|
|
|
* don't need to use it.
|
|
|
|
|
*/
|
|
|
|
|
switch (counter->state) {
|
|
|
|
|
case PERF_COUNTER_STATE_ACTIVE:
|
|
|
|
|
__perf_counter_read(counter);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case PERF_COUNTER_STATE_INACTIVE:
|
|
|
|
|
update_counter_times(counter);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* In order to keep per-task stats reliable we need to flip the counter
|
|
|
|
|
* values when we flip the contexts.
|
|
|
|
|
*/
|
|
|
|
|
value = atomic64_read(&next_counter->count);
|
|
|
|
|
value = atomic64_xchg(&counter->count, value);
|
|
|
|
|
atomic64_set(&next_counter->count, value);
|
|
|
|
|
|
2009-06-26 11:10:23 +00:00
|
|
|
|
swap(counter->total_time_enabled, next_counter->total_time_enabled);
|
|
|
|
|
swap(counter->total_time_running, next_counter->total_time_running);
|
|
|
|
|
|
2009-06-24 19:11:59 +00:00
|
|
|
|
/*
|
2009-06-26 11:10:23 +00:00
|
|
|
|
* Since we swizzled the values, update the user visible data too.
|
2009-06-24 19:11:59 +00:00
|
|
|
|
*/
|
2009-06-26 11:10:23 +00:00
|
|
|
|
perf_counter_update_userpage(counter);
|
|
|
|
|
perf_counter_update_userpage(next_counter);
|
2009-06-24 19:11:59 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define list_next_entry(pos, member) \
|
|
|
|
|
list_entry(pos->member.next, typeof(*pos), member)
|
|
|
|
|
|
|
|
|
|
static void perf_counter_sync_stat(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_counter_context *next_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter, *next_counter;
|
|
|
|
|
|
|
|
|
|
if (!ctx->nr_stat)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
counter = list_first_entry(&ctx->event_list,
|
|
|
|
|
struct perf_counter, event_entry);
|
|
|
|
|
|
|
|
|
|
next_counter = list_first_entry(&next_ctx->event_list,
|
|
|
|
|
struct perf_counter, event_entry);
|
|
|
|
|
|
|
|
|
|
while (&counter->event_entry != &ctx->event_list &&
|
|
|
|
|
&next_counter->event_entry != &next_ctx->event_list) {
|
|
|
|
|
|
|
|
|
|
__perf_counter_sync_stat(counter, next_counter);
|
|
|
|
|
|
|
|
|
|
counter = list_next_entry(counter, event_entry);
|
2009-08-06 16:06:26 +00:00
|
|
|
|
next_counter = list_next_entry(next_counter, event_entry);
|
2009-06-24 19:11:59 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Called from scheduler to remove the counters of the current task,
|
|
|
|
|
* with interrupts disabled.
|
|
|
|
|
*
|
|
|
|
|
* We stop each counter and update the counter value in counter->count.
|
|
|
|
|
*
|
2008-12-17 13:20:28 +00:00
|
|
|
|
* This does not protect us against NMI, but disable()
|
2008-12-04 19:12:29 +00:00
|
|
|
|
* sets the disabled bit in the control field of counter _before_
|
|
|
|
|
* accessing the counter control register. If a NMI hits, then it will
|
|
|
|
|
* not restart the counter.
|
|
|
|
|
*/
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
void perf_counter_task_sched_out(struct task_struct *task,
|
|
|
|
|
struct task_struct *next, int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
struct perf_counter_context *ctx = task->perf_counter_ctxp;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
struct perf_counter_context *next_ctx;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
struct perf_counter_context *parent;
|
2009-03-19 19:26:12 +00:00
|
|
|
|
struct pt_regs *regs;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
int do_switch = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-05-25 12:45:28 +00:00
|
|
|
|
regs = task_pt_regs(task);
|
2009-06-11 12:06:28 +00:00
|
|
|
|
perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
|
2009-05-25 12:45:28 +00:00
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (likely(!ctx || !cpuctx->task_ctx))
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-04-06 09:45:13 +00:00
|
|
|
|
update_context_time(ctx);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
parent = rcu_dereference(ctx->parent_ctx);
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
next_ctx = next->perf_counter_ctxp;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
if (parent && next_ctx &&
|
|
|
|
|
rcu_dereference(next_ctx->parent_ctx) == parent) {
|
|
|
|
|
/*
|
|
|
|
|
* Looks like the two contexts are clones, so we might be
|
|
|
|
|
* able to optimize the context switch. We lock both
|
|
|
|
|
* contexts and check that they are clones under the
|
|
|
|
|
* lock (including re-checking that neither has been
|
|
|
|
|
* uncloned in the meantime). It doesn't matter which
|
|
|
|
|
* order we take the locks because no other cpu could
|
|
|
|
|
* be trying to lock both of these tasks.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
|
|
|
|
|
if (context_equiv(ctx, next_ctx)) {
|
2009-05-29 12:51:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* XXX do we need a memory barrier of sorts
|
|
|
|
|
* wrt to rcu_dereference() of perf_counter_ctxp
|
|
|
|
|
*/
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
task->perf_counter_ctxp = next_ctx;
|
|
|
|
|
next->perf_counter_ctxp = ctx;
|
|
|
|
|
ctx->task = next;
|
|
|
|
|
next_ctx->task = task;
|
|
|
|
|
do_switch = 0;
|
2009-06-24 19:11:59 +00:00
|
|
|
|
|
|
|
|
|
perf_counter_sync_stat(ctx, next_ctx);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
}
|
|
|
|
|
spin_unlock(&next_ctx->lock);
|
|
|
|
|
spin_unlock(&ctx->lock);
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
}
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
rcu_read_unlock();
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
if (do_switch) {
|
|
|
|
|
__perf_counter_sched_out(ctx, cpuctx);
|
|
|
|
|
cpuctx->task_ctx = NULL;
|
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-29 12:51:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* Called with IRQs disabled
|
|
|
|
|
*/
|
2009-05-11 05:46:10 +00:00
|
|
|
|
static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (!cpuctx->task_ctx)
|
|
|
|
|
return;
|
2009-05-17 09:08:41 +00:00
|
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
|
|
|
|
|
return;
|
|
|
|
|
|
2009-05-11 05:46:10 +00:00
|
|
|
|
__perf_counter_sched_out(ctx, cpuctx);
|
|
|
|
|
cpuctx->task_ctx = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-29 12:51:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* Called with IRQs disabled
|
|
|
|
|
*/
|
2008-12-21 13:43:25 +00:00
|
|
|
|
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
|
2008-12-11 07:38:42 +00:00
|
|
|
|
{
|
2008-12-21 13:43:25 +00:00
|
|
|
|
__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
static void
|
|
|
|
|
__perf_counter_sched_in(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_cpu_context *cpuctx, int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
2009-01-12 04:11:00 +00:00
|
|
|
|
int can_add_hw = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
ctx->is_active = 1;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
if (likely(!ctx->nr_counters))
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
goto out;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-04-06 09:45:10 +00:00
|
|
|
|
ctx->timestamp = perf_clock();
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_disable();
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* First go through the list and put on any pinned groups
|
|
|
|
|
* in order to give them the best chance of going on.
|
|
|
|
|
*/
|
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF ||
|
2009-06-02 17:22:16 +00:00
|
|
|
|
!counter->attr.pinned)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
continue;
|
|
|
|
|
if (counter->cpu != -1 && counter->cpu != cpu)
|
|
|
|
|
continue;
|
|
|
|
|
|
2009-05-20 10:21:22 +00:00
|
|
|
|
if (counter != counter->group_leader)
|
|
|
|
|
counter_sched_in(counter, cpuctx, ctx, cpu);
|
|
|
|
|
else {
|
|
|
|
|
if (group_can_go_on(counter, cpuctx, 1))
|
|
|
|
|
group_sched_in(counter, cpuctx, ctx, cpu);
|
|
|
|
|
}
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If this pinned group hasn't been scheduled,
|
|
|
|
|
* put it in error state.
|
|
|
|
|
*/
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
|
|
|
|
|
update_group_times(counter);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
counter->state = PERF_COUNTER_STATE_ERROR;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
}
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* Ignore counters in OFF or ERROR state, and
|
|
|
|
|
* ignore pinned counters since we did them already.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state <= PERF_COUNTER_STATE_OFF ||
|
2009-06-02 17:22:16 +00:00
|
|
|
|
counter->attr.pinned)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
continue;
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
/*
|
|
|
|
|
* Listen to the 'cpu' scheduling filter constraint
|
|
|
|
|
* of counters:
|
|
|
|
|
*/
|
2008-12-04 19:12:29 +00:00
|
|
|
|
if (counter->cpu != -1 && counter->cpu != cpu)
|
|
|
|
|
continue;
|
|
|
|
|
|
2009-05-20 10:21:22 +00:00
|
|
|
|
if (counter != counter->group_leader) {
|
|
|
|
|
if (counter_sched_in(counter, cpuctx, ctx, cpu))
|
2009-01-12 04:11:00 +00:00
|
|
|
|
can_add_hw = 0;
|
2009-05-20 10:21:22 +00:00
|
|
|
|
} else {
|
|
|
|
|
if (group_can_go_on(counter, cpuctx, can_add_hw)) {
|
|
|
|
|
if (group_sched_in(counter, cpuctx, ctx, cpu))
|
|
|
|
|
can_add_hw = 0;
|
|
|
|
|
}
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
}
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_enable();
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
out:
|
2008-12-04 19:12:29 +00:00
|
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-21 13:43:25 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Called from scheduler to add the counters of the current task
|
|
|
|
|
* with interrupts disabled.
|
|
|
|
|
*
|
|
|
|
|
* We restore the counter value and then enable it.
|
|
|
|
|
*
|
|
|
|
|
* This does not protect us against NMI, but enable()
|
|
|
|
|
* sets the enabled bit in the control field of counter _before_
|
|
|
|
|
* accessing the counter control register. If a NMI hits, then it will
|
|
|
|
|
* keep the counter running.
|
|
|
|
|
*/
|
|
|
|
|
void perf_counter_task_sched_in(struct task_struct *task, int cpu)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
struct perf_counter_context *ctx = task->perf_counter_ctxp;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (likely(!ctx))
|
|
|
|
|
return;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
if (cpuctx->task_ctx == ctx)
|
|
|
|
|
return;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
__perf_counter_sched_in(ctx, cpuctx, cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
cpuctx->task_ctx = ctx;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
|
|
|
|
|
|
__perf_counter_sched_in(ctx, cpuctx, cpu);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-25 15:39:05 +00:00
|
|
|
|
#define MAX_INTERRUPTS (~0ULL)
|
|
|
|
|
|
|
|
|
|
static void perf_log_throttle(struct perf_counter *counter, int enable);
|
2009-05-20 10:21:20 +00:00
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
static void perf_adjust_period(struct perf_counter *counter, u64 events)
|
|
|
|
|
{
|
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
|
|
|
|
u64 period, sample_period;
|
|
|
|
|
s64 delta;
|
|
|
|
|
|
|
|
|
|
events *= hwc->sample_period;
|
|
|
|
|
period = div64_u64(events, counter->attr.sample_freq);
|
|
|
|
|
|
|
|
|
|
delta = (s64)(period - hwc->sample_period);
|
|
|
|
|
delta = (delta + 7) / 8; /* low pass filter */
|
|
|
|
|
|
|
|
|
|
sample_period = hwc->sample_period + delta;
|
|
|
|
|
|
|
|
|
|
if (!sample_period)
|
|
|
|
|
sample_period = 1;
|
|
|
|
|
|
|
|
|
|
hwc->sample_period = sample_period;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
|
2009-05-15 13:19:28 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
2009-06-05 16:01:29 +00:00
|
|
|
|
struct hw_perf_counter *hwc;
|
2009-06-10 11:40:57 +00:00
|
|
|
|
u64 interrupts, freq;
|
2009-05-15 13:19:28 +00:00
|
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
continue;
|
|
|
|
|
|
2009-06-05 16:01:29 +00:00
|
|
|
|
hwc = &counter->hw;
|
|
|
|
|
|
|
|
|
|
interrupts = hwc->interrupts;
|
|
|
|
|
hwc->interrupts = 0;
|
2009-05-25 15:39:05 +00:00
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* unthrottle counters on the tick
|
|
|
|
|
*/
|
2009-05-25 15:39:05 +00:00
|
|
|
|
if (interrupts == MAX_INTERRUPTS) {
|
|
|
|
|
perf_log_throttle(counter, 1);
|
|
|
|
|
counter->pmu->unthrottle(counter);
|
2009-06-11 09:25:05 +00:00
|
|
|
|
interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
|
2009-05-25 15:39:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (!counter->attr.freq || !counter->attr.sample_freq)
|
2009-05-15 13:19:28 +00:00
|
|
|
|
continue;
|
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* if the specified freq < HZ then we need to skip ticks
|
|
|
|
|
*/
|
2009-06-05 16:01:29 +00:00
|
|
|
|
if (counter->attr.sample_freq < HZ) {
|
|
|
|
|
freq = counter->attr.sample_freq;
|
|
|
|
|
|
|
|
|
|
hwc->freq_count += freq;
|
|
|
|
|
hwc->freq_interrupts += interrupts;
|
|
|
|
|
|
|
|
|
|
if (hwc->freq_count < HZ)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
interrupts = hwc->freq_interrupts;
|
|
|
|
|
hwc->freq_interrupts = 0;
|
|
|
|
|
hwc->freq_count -= HZ;
|
|
|
|
|
} else
|
|
|
|
|
freq = HZ;
|
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
perf_adjust_period(counter, freq * interrupts);
|
2009-05-15 13:19:28 +00:00
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
/*
|
|
|
|
|
* In order to avoid being stalled by an (accidental) huge
|
|
|
|
|
* sample period, force reset the sample period if we didn't
|
|
|
|
|
* get any events in this freq period.
|
|
|
|
|
*/
|
|
|
|
|
if (!interrupts) {
|
|
|
|
|
perf_disable();
|
|
|
|
|
counter->pmu->disable(counter);
|
2009-06-13 07:06:50 +00:00
|
|
|
|
atomic64_set(&hwc->period_left, 0);
|
2009-06-10 11:40:57 +00:00
|
|
|
|
counter->pmu->enable(counter);
|
|
|
|
|
perf_enable();
|
|
|
|
|
}
|
2009-05-15 13:19:28 +00:00
|
|
|
|
}
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
/*
|
|
|
|
|
* Round-robin a context's counters:
|
|
|
|
|
*/
|
|
|
|
|
static void rotate_ctx(struct perf_counter_context *ctx)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
2008-12-21 13:43:25 +00:00
|
|
|
|
if (!ctx->nr_counters)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
/*
|
2008-12-11 07:38:42 +00:00
|
|
|
|
* Rotate the first entry last (works just fine for group counters too):
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_disable();
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
2009-03-13 11:21:29 +00:00
|
|
|
|
list_move_tail(&counter->list_entry, &ctx->counter_list);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
2009-05-13 14:21:38 +00:00
|
|
|
|
perf_enable();
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
2008-12-21 13:43:25 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_counter_task_tick(struct task_struct *curr, int cpu)
|
|
|
|
|
{
|
2009-05-08 16:52:21 +00:00
|
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
|
|
|
|
|
if (!atomic_read(&nr_counters))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
ctx = curr->perf_counter_ctxp;
|
2008-12-21 13:43:25 +00:00
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
perf_ctx_adjust_freq(&cpuctx->ctx);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx)
|
2009-06-10 11:40:57 +00:00
|
|
|
|
perf_ctx_adjust_freq(ctx);
|
2009-05-15 13:19:28 +00:00
|
|
|
|
|
2009-05-04 16:54:32 +00:00
|
|
|
|
perf_counter_cpu_sched_out(cpuctx);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx)
|
|
|
|
|
__perf_counter_task_sched_out(ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-05-04 16:54:32 +00:00
|
|
|
|
rotate_ctx(&cpuctx->ctx);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx)
|
|
|
|
|
rotate_ctx(ctx);
|
2008-12-21 13:43:25 +00:00
|
|
|
|
|
2009-05-04 16:54:32 +00:00
|
|
|
|
perf_counter_cpu_sched_in(cpuctx, cpu);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (ctx)
|
|
|
|
|
perf_counter_task_sched_in(curr, cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-30 06:07:19 +00:00
|
|
|
|
/*
|
|
|
|
|
* Enable all of a task's counters that have been marked enable-on-exec.
|
|
|
|
|
* This expects task == current.
|
|
|
|
|
*/
|
|
|
|
|
static void perf_counter_enable_on_exec(struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
int enabled = 0;
|
|
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
ctx = task->perf_counter_ctxp;
|
|
|
|
|
if (!ctx || !ctx->nr_counters)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
__perf_counter_task_sched_out(ctx);
|
|
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
|
|
|
|
if (!counter->attr.enable_on_exec)
|
|
|
|
|
continue;
|
|
|
|
|
counter->attr.enable_on_exec = 0;
|
|
|
|
|
if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
continue;
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
|
counter->tstamp_enabled =
|
|
|
|
|
ctx->time - counter->total_time_enabled;
|
|
|
|
|
enabled = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Unclone this context if we enabled any counter.
|
|
|
|
|
*/
|
2009-07-10 07:06:56 +00:00
|
|
|
|
if (enabled)
|
|
|
|
|
unclone_ctx(ctx);
|
2009-06-30 06:07:19 +00:00
|
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
|
|
|
|
|
|
perf_counter_task_sched_in(task, smp_processor_id());
|
|
|
|
|
out:
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Cross CPU call to read the hardware counter
|
|
|
|
|
*/
|
2009-06-24 19:11:59 +00:00
|
|
|
|
static void __perf_counter_read(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
2009-08-14 05:39:10 +00:00
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
2008-12-11 11:46:46 +00:00
|
|
|
|
struct perf_counter *counter = info;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
2008-12-17 13:10:57 +00:00
|
|
|
|
unsigned long flags;
|
2008-12-11 11:46:46 +00:00
|
|
|
|
|
2009-08-14 05:39:10 +00:00
|
|
|
|
/*
|
|
|
|
|
* If this is a task context, we need to check whether it is
|
|
|
|
|
* the current task context of this cpu. If not it has been
|
|
|
|
|
* scheduled out before the smp call arrived. In that case
|
|
|
|
|
* counter->count would have been updated to a recent sample
|
|
|
|
|
* when the counter was scheduled out.
|
|
|
|
|
*/
|
|
|
|
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-04-06 09:45:12 +00:00
|
|
|
|
local_irq_save(flags);
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
if (ctx->is_active)
|
2009-04-06 09:45:10 +00:00
|
|
|
|
update_context_time(ctx);
|
2009-04-29 10:47:03 +00:00
|
|
|
|
counter->pmu->read(counter);
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
update_counter_times(counter);
|
2009-04-06 09:45:12 +00:00
|
|
|
|
local_irq_restore(flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static u64 perf_counter_read(struct perf_counter *counter)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* If counter is enabled and currently active on a CPU, update the
|
|
|
|
|
* value in the counter structure:
|
|
|
|
|
*/
|
2008-12-11 14:17:03 +00:00
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
2008-12-04 19:12:29 +00:00
|
|
|
|
smp_call_function_single(counter->oncpu,
|
2009-06-24 19:11:59 +00:00
|
|
|
|
__perf_counter_read, counter, 1);
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
|
|
|
|
|
update_counter_times(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-13 08:00:03 +00:00
|
|
|
|
return atomic64_read(&counter->count);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
/*
|
|
|
|
|
* Initialize the perf_counter context in a task_struct:
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
__perf_counter_init_context(struct perf_counter_context *ctx,
|
|
|
|
|
struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
|
spin_lock_init(&ctx->lock);
|
|
|
|
|
mutex_init(&ctx->mutex);
|
|
|
|
|
INIT_LIST_HEAD(&ctx->counter_list);
|
|
|
|
|
INIT_LIST_HEAD(&ctx->event_list);
|
|
|
|
|
atomic_set(&ctx->refcount, 1);
|
|
|
|
|
ctx->task = task;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
|
|
|
|
|
{
|
2009-06-01 08:13:37 +00:00
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
|
struct perf_cpu_context *cpuctx;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
struct task_struct *task;
|
2009-06-01 07:48:12 +00:00
|
|
|
|
unsigned long flags;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
int err;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If cpu is not a wildcard then this is a percpu counter:
|
|
|
|
|
*/
|
|
|
|
|
if (cpu != -1) {
|
|
|
|
|
/* Must be root to operate on a CPU counter: */
|
2009-06-11 09:18:36 +00:00
|
|
|
|
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return ERR_PTR(-EACCES);
|
|
|
|
|
|
|
|
|
|
if (cpu < 0 || cpu > num_possible_cpus())
|
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We could be clever and allow to attach a counter to an
|
|
|
|
|
* offline CPU and activate it when the CPU comes up, but
|
|
|
|
|
* that's for later.
|
|
|
|
|
*/
|
|
|
|
|
if (!cpu_isset(cpu, cpu_online_map))
|
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
|
ctx = &cpuctx->ctx;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
get_ctx(ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
if (!pid)
|
|
|
|
|
task = current;
|
|
|
|
|
else
|
|
|
|
|
task = find_task_by_vpid(pid);
|
|
|
|
|
if (task)
|
|
|
|
|
get_task_struct(task);
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
|
|
if (!task)
|
|
|
|
|
return ERR_PTR(-ESRCH);
|
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
/*
|
|
|
|
|
* Can't attach counters to a dying task.
|
|
|
|
|
*/
|
|
|
|
|
err = -ESRCH;
|
|
|
|
|
if (task->flags & PF_EXITING)
|
|
|
|
|
goto errout;
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/* Reuse ptrace permission checks for now. */
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
err = -EACCES;
|
|
|
|
|
if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
|
|
|
goto errout;
|
|
|
|
|
|
|
|
|
|
retry:
|
2009-06-01 07:48:12 +00:00
|
|
|
|
ctx = perf_lock_task_context(task, &flags);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
if (ctx) {
|
2009-07-10 07:06:56 +00:00
|
|
|
|
unclone_ctx(ctx);
|
2009-06-01 07:48:12 +00:00
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
if (!ctx) {
|
|
|
|
|
ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
if (!ctx)
|
|
|
|
|
goto errout;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
__perf_counter_init_context(ctx, task);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
get_ctx(ctx);
|
|
|
|
|
if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
/*
|
|
|
|
|
* We raced with some other task; use
|
|
|
|
|
* the context they set.
|
|
|
|
|
*/
|
|
|
|
|
kfree(ctx);
|
2009-06-01 07:48:12 +00:00
|
|
|
|
goto retry;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
}
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
get_task_struct(task);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
put_task_struct(task);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return ctx;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
|
|
|
|
|
errout:
|
|
|
|
|
put_task_struct(task);
|
|
|
|
|
return ERR_PTR(err);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:36 +00:00
|
|
|
|
static void free_counter_rcu(struct rcu_head *head)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
counter = container_of(head, struct perf_counter, rcu_head);
|
2009-06-02 12:13:15 +00:00
|
|
|
|
if (counter->ns)
|
|
|
|
|
put_pid_ns(counter->ns);
|
2009-03-13 11:21:36 +00:00
|
|
|
|
kfree(counter);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-30 17:07:02 +00:00
|
|
|
|
static void perf_pending_sync(struct perf_counter *counter);
|
|
|
|
|
|
2009-03-19 19:26:16 +00:00
|
|
|
|
static void free_counter(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-03-30 17:07:02 +00:00
|
|
|
|
perf_pending_sync(counter);
|
|
|
|
|
|
2009-06-22 11:58:35 +00:00
|
|
|
|
if (!counter->parent) {
|
|
|
|
|
atomic_dec(&nr_counters);
|
|
|
|
|
if (counter->attr.mmap)
|
|
|
|
|
atomic_dec(&nr_mmap_counters);
|
|
|
|
|
if (counter->attr.comm)
|
|
|
|
|
atomic_dec(&nr_comm_counters);
|
2009-07-23 12:46:33 +00:00
|
|
|
|
if (counter->attr.task)
|
|
|
|
|
atomic_dec(&nr_task_counters);
|
2009-06-22 11:58:35 +00:00
|
|
|
|
}
|
2009-04-09 08:53:44 +00:00
|
|
|
|
|
2009-03-19 19:26:17 +00:00
|
|
|
|
if (counter->destroy)
|
|
|
|
|
counter->destroy(counter);
|
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
put_ctx(counter->ctx);
|
2009-03-19 19:26:16 +00:00
|
|
|
|
call_rcu(&counter->rcu_head, free_counter_rcu);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Called when the last reference to the file is gone.
|
|
|
|
|
*/
|
|
|
|
|
static int perf_release(struct inode *inode, struct file *file)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
|
|
|
|
|
file->private_data = NULL;
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(ctx->parent_ctx);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
perf_counter_remove_from_context(counter);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-05-23 16:29:00 +00:00
|
|
|
|
mutex_lock(&counter->owner->perf_counter_mutex);
|
|
|
|
|
list_del_init(&counter->owner_entry);
|
|
|
|
|
mutex_unlock(&counter->owner->perf_counter_mutex);
|
|
|
|
|
put_task_struct(counter->owner);
|
|
|
|
|
|
2009-03-19 19:26:16 +00:00
|
|
|
|
free_counter(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
static int perf_counter_read_size(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
int entry = sizeof(u64); /* value */
|
|
|
|
|
int size = 0;
|
|
|
|
|
int nr = 1;
|
|
|
|
|
|
|
|
|
|
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
|
|
|
|
size += sizeof(u64);
|
|
|
|
|
|
|
|
|
|
if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
|
|
|
|
size += sizeof(u64);
|
|
|
|
|
|
|
|
|
|
if (counter->attr.read_format & PERF_FORMAT_ID)
|
|
|
|
|
entry += sizeof(u64);
|
|
|
|
|
|
|
|
|
|
if (counter->attr.read_format & PERF_FORMAT_GROUP) {
|
|
|
|
|
nr += counter->group_leader->nr_siblings;
|
|
|
|
|
size += sizeof(u64);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size += entry * nr;
|
|
|
|
|
|
|
|
|
|
return size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u64 perf_counter_read_value(struct perf_counter *counter)
|
2009-07-24 12:42:10 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *child;
|
|
|
|
|
u64 total = 0;
|
|
|
|
|
|
|
|
|
|
total += perf_counter_read(counter);
|
|
|
|
|
list_for_each_entry(child, &counter->child_list, child_list)
|
|
|
|
|
total += perf_counter_read(child);
|
|
|
|
|
|
|
|
|
|
return total;
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
static int perf_counter_read_entry(struct perf_counter *counter,
|
|
|
|
|
u64 read_format, char __user *buf)
|
|
|
|
|
{
|
|
|
|
|
int n = 0, count = 0;
|
|
|
|
|
u64 values[2];
|
|
|
|
|
|
|
|
|
|
values[n++] = perf_counter_read_value(counter);
|
|
|
|
|
if (read_format & PERF_FORMAT_ID)
|
|
|
|
|
values[n++] = primary_counter_id(counter);
|
|
|
|
|
|
|
|
|
|
count = n * sizeof(u64);
|
|
|
|
|
|
|
|
|
|
if (copy_to_user(buf, values, count))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
return count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_counter_read_group(struct perf_counter *counter,
|
|
|
|
|
u64 read_format, char __user *buf)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *leader = counter->group_leader, *sub;
|
|
|
|
|
int n = 0, size = 0, err = -EFAULT;
|
|
|
|
|
u64 values[3];
|
|
|
|
|
|
|
|
|
|
values[n++] = 1 + leader->nr_siblings;
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
|
|
|
|
values[n++] = leader->total_time_enabled +
|
|
|
|
|
atomic64_read(&leader->child_total_time_enabled);
|
|
|
|
|
}
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
|
|
|
|
values[n++] = leader->total_time_running +
|
|
|
|
|
atomic64_read(&leader->child_total_time_running);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size = n * sizeof(u64);
|
|
|
|
|
|
|
|
|
|
if (copy_to_user(buf, values, size))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
err = perf_counter_read_entry(leader, read_format, buf + size);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
size += err;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
|
|
|
|
|
err = perf_counter_read_entry(counter, read_format,
|
|
|
|
|
buf + size);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
size += err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_counter_read_one(struct perf_counter *counter,
|
|
|
|
|
u64 read_format, char __user *buf)
|
|
|
|
|
{
|
|
|
|
|
u64 values[4];
|
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
|
|
values[n++] = perf_counter_read_value(counter);
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
|
|
|
|
values[n++] = counter->total_time_enabled +
|
|
|
|
|
atomic64_read(&counter->child_total_time_enabled);
|
|
|
|
|
}
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
|
|
|
|
values[n++] = counter->total_time_running +
|
|
|
|
|
atomic64_read(&counter->child_total_time_running);
|
|
|
|
|
}
|
|
|
|
|
if (read_format & PERF_FORMAT_ID)
|
|
|
|
|
values[n++] = primary_counter_id(counter);
|
|
|
|
|
|
|
|
|
|
if (copy_to_user(buf, values, n * sizeof(u64)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
return n * sizeof(u64);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Read the performance counter - simple non blocking version for now
|
|
|
|
|
*/
|
|
|
|
|
static ssize_t
|
|
|
|
|
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
|
|
|
|
|
{
|
2009-08-13 09:47:53 +00:00
|
|
|
|
u64 read_format = counter->attr.read_format;
|
|
|
|
|
int ret;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* Return end-of-file for a read on a counter that is in
|
|
|
|
|
* error state (i.e. because it was pinned but it couldn't be
|
|
|
|
|
* scheduled on to the CPU at some point).
|
|
|
|
|
*/
|
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ERROR)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
if (count < perf_counter_read_size(counter))
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_lock(&counter->child_mutex);
|
2009-08-13 09:47:53 +00:00
|
|
|
|
if (read_format & PERF_FORMAT_GROUP)
|
|
|
|
|
ret = perf_counter_read_group(counter, read_format, buf);
|
|
|
|
|
else
|
|
|
|
|
ret = perf_counter_read_one(counter, read_format, buf);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_unlock(&counter->child_mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
return ret;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
|
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = file->private_data;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
return perf_read_hw(counter, buf, count);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static unsigned int perf_poll(struct file *file, poll_table *wait)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = file->private_data;
|
2009-03-24 12:18:16 +00:00
|
|
|
|
struct perf_mmap_data *data;
|
2009-05-01 10:23:16 +00:00
|
|
|
|
unsigned int events = POLL_HUP;
|
2009-03-24 12:18:16 +00:00
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
data = rcu_dereference(counter->data);
|
|
|
|
|
if (data)
|
2009-05-01 10:23:16 +00:00
|
|
|
|
events = atomic_xchg(&data->poll, 0);
|
2009-03-24 12:18:16 +00:00
|
|
|
|
rcu_read_unlock();
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
poll_wait(file, &counter->waitq, wait);
|
|
|
|
|
|
|
|
|
|
return events;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-05 15:50:23 +00:00
|
|
|
|
static void perf_counter_reset(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-05-08 16:52:22 +00:00
|
|
|
|
(void)perf_counter_read(counter);
|
2009-05-11 05:50:21 +00:00
|
|
|
|
atomic64_set(&counter->count, 0);
|
2009-05-08 16:52:22 +00:00
|
|
|
|
perf_counter_update_userpage(counter);
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
/*
|
|
|
|
|
* Holding the top-level counter's child_mutex means that any
|
|
|
|
|
* descendant process that has inherited this counter will block
|
|
|
|
|
* in sync_child_counter if it goes to exit, thus satisfying the
|
|
|
|
|
* task existence requirements of perf_counter_enable/disable.
|
|
|
|
|
*/
|
2009-05-08 16:52:22 +00:00
|
|
|
|
static void perf_counter_for_each_child(struct perf_counter *counter,
|
|
|
|
|
void (*func)(struct perf_counter *))
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *child;
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_lock(&counter->child_mutex);
|
2009-05-08 16:52:22 +00:00
|
|
|
|
func(counter);
|
|
|
|
|
list_for_each_entry(child, &counter->child_list, child_list)
|
|
|
|
|
func(child);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_unlock(&counter->child_mutex);
|
2009-05-08 16:52:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_counter_for_each(struct perf_counter *counter,
|
|
|
|
|
void (*func)(struct perf_counter *))
|
|
|
|
|
{
|
2009-06-15 13:05:12 +00:00
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
struct perf_counter *sibling;
|
2009-05-08 16:52:22 +00:00
|
|
|
|
|
2009-06-15 13:05:12 +00:00
|
|
|
|
WARN_ON_ONCE(ctx->parent_ctx);
|
|
|
|
|
mutex_lock(&ctx->mutex);
|
|
|
|
|
counter = counter->group_leader;
|
|
|
|
|
|
|
|
|
|
perf_counter_for_each_child(counter, func);
|
|
|
|
|
func(counter);
|
|
|
|
|
list_for_each_entry(sibling, &counter->sibling_list, list_entry)
|
|
|
|
|
perf_counter_for_each_child(counter, func);
|
|
|
|
|
mutex_unlock(&ctx->mutex);
|
2009-05-05 15:50:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 14:46:57 +00:00
|
|
|
|
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = counter->ctx;
|
|
|
|
|
unsigned long size;
|
|
|
|
|
int ret = 0;
|
|
|
|
|
u64 value;
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (!counter->attr.sample_period)
|
2009-06-02 14:46:57 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
size = copy_from_user(&value, arg, sizeof(value));
|
|
|
|
|
if (size != sizeof(value))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
if (!value)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
spin_lock_irq(&ctx->lock);
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.freq) {
|
2009-06-11 09:25:05 +00:00
|
|
|
|
if (value > sysctl_perf_counter_sample_rate) {
|
2009-06-02 14:46:57 +00:00
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
counter->attr.sample_freq = value;
|
2009-06-02 14:46:57 +00:00
|
|
|
|
} else {
|
2009-06-02 17:22:16 +00:00
|
|
|
|
counter->attr.sample_period = value;
|
2009-06-02 14:46:57 +00:00
|
|
|
|
counter->hw.sample_period = value;
|
|
|
|
|
}
|
|
|
|
|
unlock:
|
|
|
|
|
spin_unlock_irq(&ctx->lock);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = file->private_data;
|
2009-05-08 16:52:22 +00:00
|
|
|
|
void (*func)(struct perf_counter *);
|
|
|
|
|
u32 flags = arg;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
case PERF_COUNTER_IOC_ENABLE:
|
2009-05-08 16:52:22 +00:00
|
|
|
|
func = perf_counter_enable;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
break;
|
|
|
|
|
case PERF_COUNTER_IOC_DISABLE:
|
2009-05-08 16:52:22 +00:00
|
|
|
|
func = perf_counter_disable;
|
2009-04-06 09:45:07 +00:00
|
|
|
|
break;
|
2009-05-05 15:50:23 +00:00
|
|
|
|
case PERF_COUNTER_IOC_RESET:
|
2009-05-08 16:52:22 +00:00
|
|
|
|
func = perf_counter_reset;
|
2009-05-05 15:50:23 +00:00
|
|
|
|
break;
|
2009-05-08 16:52:22 +00:00
|
|
|
|
|
|
|
|
|
case PERF_COUNTER_IOC_REFRESH:
|
|
|
|
|
return perf_counter_refresh(counter, arg);
|
2009-06-02 14:46:57 +00:00
|
|
|
|
|
|
|
|
|
case PERF_COUNTER_IOC_PERIOD:
|
|
|
|
|
return perf_counter_period(counter, (u64 __user *)arg);
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
default:
|
2009-05-08 16:52:22 +00:00
|
|
|
|
return -ENOTTY;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
2009-05-08 16:52:22 +00:00
|
|
|
|
|
|
|
|
|
if (flags & PERF_IOC_FLAG_GROUP)
|
|
|
|
|
perf_counter_for_each(counter, func);
|
|
|
|
|
else
|
|
|
|
|
perf_counter_for_each_child(counter, func);
|
|
|
|
|
|
|
|
|
|
return 0;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-25 12:45:26 +00:00
|
|
|
|
int perf_counter_task_enable(void)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
mutex_lock(¤t->perf_counter_mutex);
|
|
|
|
|
list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
|
|
|
|
|
perf_counter_for_each_child(counter, perf_counter_enable);
|
|
|
|
|
mutex_unlock(¤t->perf_counter_mutex);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int perf_counter_task_disable(void)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
mutex_lock(¤t->perf_counter_mutex);
|
|
|
|
|
list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
|
|
|
|
|
perf_counter_for_each_child(counter, perf_counter_disable);
|
|
|
|
|
mutex_unlock(¤t->perf_counter_mutex);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-18 09:32:24 +00:00
|
|
|
|
#ifndef PERF_COUNTER_INDEX_OFFSET
|
|
|
|
|
# define PERF_COUNTER_INDEX_OFFSET 0
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-06-22 14:35:24 +00:00
|
|
|
|
static int perf_counter_index(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-30 17:07:03 +00:00
|
|
|
|
/*
|
|
|
|
|
* Callers need to ensure there can be no nesting of this function, otherwise
|
|
|
|
|
* the seqlock logic goes bad. We can not serialize this because the arch
|
|
|
|
|
* code calls this from NMI context.
|
|
|
|
|
*/
|
|
|
|
|
void perf_counter_update_userpage(struct perf_counter *counter)
|
2009-03-23 17:22:08 +00:00
|
|
|
|
{
|
2009-03-30 17:07:03 +00:00
|
|
|
|
struct perf_counter_mmap_page *userpg;
|
2009-06-01 08:13:37 +00:00
|
|
|
|
struct perf_mmap_data *data;
|
2009-03-30 17:07:03 +00:00
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
data = rcu_dereference(counter->data);
|
|
|
|
|
if (!data)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
|
|
userpg = data->user_page;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
/*
|
|
|
|
|
* Disable preemption so as to not let the corresponding user-space
|
|
|
|
|
* spin too long if we get preempted.
|
|
|
|
|
*/
|
|
|
|
|
preempt_disable();
|
2009-03-23 17:22:08 +00:00
|
|
|
|
++userpg->lock;
|
2009-04-02 09:12:04 +00:00
|
|
|
|
barrier();
|
2009-06-22 14:35:24 +00:00
|
|
|
|
userpg->index = perf_counter_index(counter);
|
2009-03-23 17:22:08 +00:00
|
|
|
|
userpg->offset = atomic64_read(&counter->count);
|
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
userpg->offset -= atomic64_read(&counter->hw.prev_count);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-06-22 12:34:35 +00:00
|
|
|
|
userpg->time_enabled = counter->total_time_enabled +
|
|
|
|
|
atomic64_read(&counter->child_total_time_enabled);
|
|
|
|
|
|
|
|
|
|
userpg->time_running = counter->total_time_running +
|
|
|
|
|
atomic64_read(&counter->child_total_time_running);
|
|
|
|
|
|
2009-04-02 09:12:04 +00:00
|
|
|
|
barrier();
|
2009-03-23 17:22:08 +00:00
|
|
|
|
++userpg->lock;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
preempt_enable();
|
2009-03-30 17:07:03 +00:00
|
|
|
|
unlock:
|
2009-03-23 17:22:10 +00:00
|
|
|
|
rcu_read_unlock();
|
2009-03-23 17:22:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = vma->vm_file->private_data;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
struct perf_mmap_data *data;
|
|
|
|
|
int ret = VM_FAULT_SIGBUS;
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (vmf->flags & FAULT_FLAG_MKWRITE) {
|
|
|
|
|
if (vmf->pgoff == 0)
|
|
|
|
|
ret = 0;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
rcu_read_lock();
|
|
|
|
|
data = rcu_dereference(counter->data);
|
|
|
|
|
if (!data)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
|
|
if (vmf->pgoff == 0) {
|
|
|
|
|
vmf->page = virt_to_page(data->user_page);
|
|
|
|
|
} else {
|
|
|
|
|
int nr = vmf->pgoff - 1;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
if ((unsigned)nr > data->nr_pages)
|
|
|
|
|
goto unlock;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (vmf->flags & FAULT_FLAG_WRITE)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
vmf->page = virt_to_page(data->data_pages[nr]);
|
|
|
|
|
}
|
2009-03-25 18:39:37 +00:00
|
|
|
|
|
2009-03-23 17:22:08 +00:00
|
|
|
|
get_page(vmf->page);
|
2009-03-25 18:39:37 +00:00
|
|
|
|
vmf->page->mapping = vma->vm_file->f_mapping;
|
|
|
|
|
vmf->page->index = vmf->pgoff;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
ret = 0;
|
|
|
|
|
unlock:
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
|
|
|
|
|
{
|
|
|
|
|
struct perf_mmap_data *data;
|
|
|
|
|
unsigned long size;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
WARN_ON(atomic_read(&counter->mmap_count));
|
|
|
|
|
|
|
|
|
|
size = sizeof(struct perf_mmap_data);
|
|
|
|
|
size += nr_pages * sizeof(void *);
|
|
|
|
|
|
|
|
|
|
data = kzalloc(size, GFP_KERNEL);
|
|
|
|
|
if (!data)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
|
|
data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
|
|
|
|
|
if (!data->user_page)
|
|
|
|
|
goto fail_user_page;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
|
|
data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
|
|
|
|
|
if (!data->data_pages[i])
|
|
|
|
|
goto fail_data_pages;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data->nr_pages = nr_pages;
|
2009-05-05 15:50:25 +00:00
|
|
|
|
atomic_set(&data->lock, -1);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
rcu_assign_pointer(counter->data, data);
|
|
|
|
|
|
2009-03-23 17:22:08 +00:00
|
|
|
|
return 0;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
fail_data_pages:
|
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
|
free_page((unsigned long)data->data_pages[i]);
|
|
|
|
|
|
|
|
|
|
free_page((unsigned long)data->user_page);
|
|
|
|
|
|
|
|
|
|
fail_user_page:
|
|
|
|
|
kfree(data);
|
|
|
|
|
|
|
|
|
|
fail:
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
static void perf_mmap_free_page(unsigned long addr)
|
|
|
|
|
{
|
2009-07-05 19:08:19 +00:00
|
|
|
|
struct page *page = virt_to_page((void *)addr);
|
2009-03-25 18:39:37 +00:00
|
|
|
|
|
|
|
|
|
page->mapping = NULL;
|
|
|
|
|
__free_page(page);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
|
|
|
|
|
{
|
2009-06-01 08:13:37 +00:00
|
|
|
|
struct perf_mmap_data *data;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
int i;
|
|
|
|
|
|
2009-06-01 08:13:37 +00:00
|
|
|
|
data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
perf_mmap_free_page((unsigned long)data->user_page);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
for (i = 0; i < data->nr_pages; i++)
|
2009-03-25 18:39:37 +00:00
|
|
|
|
perf_mmap_free_page((unsigned long)data->data_pages[i]);
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
kfree(data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_mmap_data_free(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct perf_mmap_data *data = counter->data;
|
|
|
|
|
|
|
|
|
|
WARN_ON(atomic_read(&counter->mmap_count));
|
|
|
|
|
|
|
|
|
|
rcu_assign_pointer(counter->data, NULL);
|
|
|
|
|
call_rcu(&data->rcu_head, __perf_mmap_data_free);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_mmap_open(struct vm_area_struct *vma)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = vma->vm_file->private_data;
|
|
|
|
|
|
|
|
|
|
atomic_inc(&counter->mmap_count);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_mmap_close(struct vm_area_struct *vma)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = vma->vm_file->private_data;
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
2009-06-01 08:13:37 +00:00
|
|
|
|
if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
|
2009-05-15 13:19:27 +00:00
|
|
|
|
struct user_struct *user = current_user();
|
|
|
|
|
|
|
|
|
|
atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
|
2009-05-05 15:50:24 +00:00
|
|
|
|
vma->vm_mm->locked_vm -= counter->data->nr_locked;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
perf_mmap_data_free(counter);
|
|
|
|
|
mutex_unlock(&counter->mmap_mutex);
|
|
|
|
|
}
|
2009-03-23 17:22:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct vm_operations_struct perf_mmap_vmops = {
|
2009-03-25 18:39:37 +00:00
|
|
|
|
.open = perf_mmap_open,
|
|
|
|
|
.close = perf_mmap_close,
|
|
|
|
|
.fault = perf_mmap_fault,
|
|
|
|
|
.page_mkwrite = perf_mmap_fault,
|
2009-03-23 17:22:08 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = file->private_data;
|
2009-06-01 08:13:37 +00:00
|
|
|
|
unsigned long user_locked, user_lock_limit;
|
2009-05-15 13:19:27 +00:00
|
|
|
|
struct user_struct *user = current_user();
|
2009-06-01 08:13:37 +00:00
|
|
|
|
unsigned long locked, lock_limit;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
unsigned long vma_size;
|
|
|
|
|
unsigned long nr_pages;
|
2009-05-15 13:19:27 +00:00
|
|
|
|
long user_extra, extra;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
int ret = 0;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (!(vma->vm_flags & VM_SHARED))
|
2009-03-23 17:22:08 +00:00
|
|
|
|
return -EINVAL;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
vma_size = vma->vm_end - vma->vm_start;
|
|
|
|
|
nr_pages = (vma_size / PAGE_SIZE) - 1;
|
|
|
|
|
|
2009-03-25 11:48:31 +00:00
|
|
|
|
/*
|
|
|
|
|
* If we have data pages ensure they're a power-of-two number, so we
|
|
|
|
|
* can do bitmasks instead of modulo.
|
|
|
|
|
*/
|
|
|
|
|
if (nr_pages != 0 && !is_power_of_2(nr_pages))
|
2009-03-23 17:22:08 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
if (vma_size != PAGE_SIZE * (1 + nr_pages))
|
2009-03-23 17:22:08 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
if (vma->vm_pgoff != 0)
|
|
|
|
|
return -EINVAL;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(counter->ctx->parent_ctx);
|
2009-04-06 09:45:05 +00:00
|
|
|
|
mutex_lock(&counter->mmap_mutex);
|
|
|
|
|
if (atomic_inc_not_zero(&counter->mmap_count)) {
|
|
|
|
|
if (nr_pages != counter->data->nr_pages)
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-15 13:19:27 +00:00
|
|
|
|
user_extra = nr_pages + 1;
|
|
|
|
|
user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
|
2009-05-24 07:02:37 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Increase the limit linearly with more CPUs:
|
|
|
|
|
*/
|
|
|
|
|
user_lock_limit *= num_online_cpus();
|
|
|
|
|
|
2009-05-15 13:19:27 +00:00
|
|
|
|
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
|
2009-05-05 15:50:24 +00:00
|
|
|
|
|
2009-05-15 13:19:27 +00:00
|
|
|
|
extra = 0;
|
|
|
|
|
if (user_locked > user_lock_limit)
|
|
|
|
|
extra = user_locked - user_lock_limit;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
|
|
|
|
|
lock_limit >>= PAGE_SHIFT;
|
2009-05-15 13:19:27 +00:00
|
|
|
|
locked = vma->vm_mm->locked_vm + extra;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-04-06 09:45:05 +00:00
|
|
|
|
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
|
|
|
|
|
ret = -EPERM;
|
|
|
|
|
goto unlock;
|
|
|
|
|
}
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
WARN_ON(counter->data);
|
|
|
|
|
ret = perf_mmap_data_alloc(counter, nr_pages);
|
2009-04-06 09:45:05 +00:00
|
|
|
|
if (ret)
|
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
|
|
atomic_set(&counter->mmap_count, 1);
|
2009-05-15 13:19:27 +00:00
|
|
|
|
atomic_long_add(user_extra, &user->locked_vm);
|
2009-05-05 15:50:24 +00:00
|
|
|
|
vma->vm_mm->locked_vm += extra;
|
|
|
|
|
counter->data->nr_locked = extra;
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (vma->vm_flags & VM_WRITE)
|
|
|
|
|
counter->data->writable = 1;
|
|
|
|
|
|
2009-04-06 09:45:05 +00:00
|
|
|
|
unlock:
|
2009-03-23 17:22:10 +00:00
|
|
|
|
mutex_unlock(&counter->mmap_mutex);
|
2009-03-23 17:22:08 +00:00
|
|
|
|
|
|
|
|
|
vma->vm_flags |= VM_RESERVED;
|
|
|
|
|
vma->vm_ops = &perf_mmap_vmops;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
|
|
|
|
return ret;
|
2009-03-23 17:22:08 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:01 +00:00
|
|
|
|
static int perf_fasync(int fd, struct file *filp, int on)
|
|
|
|
|
{
|
|
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2009-06-01 08:13:37 +00:00
|
|
|
|
struct perf_counter *counter = filp->private_data;
|
2009-04-06 09:45:01 +00:00
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
|
retval = fasync_helper(fd, filp, on, &counter->fasync);
|
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
|
|
|
|
|
|
|
|
|
if (retval < 0)
|
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
static const struct file_operations perf_fops = {
|
|
|
|
|
.release = perf_release,
|
|
|
|
|
.read = perf_read,
|
|
|
|
|
.poll = perf_poll,
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
.unlocked_ioctl = perf_ioctl,
|
|
|
|
|
.compat_ioctl = perf_ioctl,
|
2009-03-23 17:22:08 +00:00
|
|
|
|
.mmap = perf_mmap,
|
2009-04-06 09:45:01 +00:00
|
|
|
|
.fasync = perf_fasync,
|
2008-12-04 19:12:29 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-03-30 17:07:02 +00:00
|
|
|
|
/*
|
|
|
|
|
* Perf counter wakeup
|
|
|
|
|
*
|
|
|
|
|
* If there's data, ensure we set the poll() state and publish everything
|
|
|
|
|
* to user-space before waking everybody up.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
void perf_counter_wakeup(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
wake_up_all(&counter->waitq);
|
2009-04-06 09:45:09 +00:00
|
|
|
|
|
|
|
|
|
if (counter->pending_kill) {
|
|
|
|
|
kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
|
|
|
|
|
counter->pending_kill = 0;
|
|
|
|
|
}
|
2009-03-30 17:07:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Pending wakeups
|
|
|
|
|
*
|
|
|
|
|
* Handle the case where we need to wakeup up from NMI (or rq->lock) context.
|
|
|
|
|
*
|
|
|
|
|
* The NMI bit means we cannot possibly take locks. Therefore, maintain a
|
|
|
|
|
* single linked list and use cmpxchg() to add entries lockless.
|
|
|
|
|
*/
|
|
|
|
|
|
2009-04-06 09:45:07 +00:00
|
|
|
|
static void perf_pending_counter(struct perf_pending_entry *entry)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter = container_of(entry,
|
|
|
|
|
struct perf_counter, pending);
|
|
|
|
|
|
|
|
|
|
if (counter->pending_disable) {
|
|
|
|
|
counter->pending_disable = 0;
|
2009-08-13 09:47:54 +00:00
|
|
|
|
__perf_counter_disable(counter);
|
2009-04-06 09:45:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (counter->pending_wakeup) {
|
|
|
|
|
counter->pending_wakeup = 0;
|
|
|
|
|
perf_counter_wakeup(counter);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
|
2009-03-30 17:07:02 +00:00
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
|
2009-03-30 17:07:02 +00:00
|
|
|
|
PENDING_TAIL,
|
|
|
|
|
};
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
static void perf_pending_queue(struct perf_pending_entry *entry,
|
|
|
|
|
void (*func)(struct perf_pending_entry *))
|
2009-03-30 17:07:02 +00:00
|
|
|
|
{
|
2009-04-06 09:45:02 +00:00
|
|
|
|
struct perf_pending_entry **head;
|
2009-03-30 17:07:02 +00:00
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
|
2009-03-30 17:07:02 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
entry->func = func;
|
|
|
|
|
|
|
|
|
|
head = &get_cpu_var(perf_pending_head);
|
2009-03-30 17:07:02 +00:00
|
|
|
|
|
|
|
|
|
do {
|
2009-04-06 09:45:02 +00:00
|
|
|
|
entry->next = *head;
|
|
|
|
|
} while (cmpxchg(head, entry->next, entry) != entry->next);
|
2009-03-30 17:07:02 +00:00
|
|
|
|
|
|
|
|
|
set_perf_counter_pending();
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
put_cpu_var(perf_pending_head);
|
2009-03-30 17:07:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int __perf_pending_run(void)
|
|
|
|
|
{
|
2009-04-06 09:45:02 +00:00
|
|
|
|
struct perf_pending_entry *list;
|
2009-03-30 17:07:02 +00:00
|
|
|
|
int nr = 0;
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
|
2009-03-30 17:07:02 +00:00
|
|
|
|
while (list != PENDING_TAIL) {
|
2009-04-06 09:45:02 +00:00
|
|
|
|
void (*func)(struct perf_pending_entry *);
|
|
|
|
|
struct perf_pending_entry *entry = list;
|
2009-03-30 17:07:02 +00:00
|
|
|
|
|
|
|
|
|
list = list->next;
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
func = entry->func;
|
|
|
|
|
entry->next = NULL;
|
2009-03-30 17:07:02 +00:00
|
|
|
|
/*
|
|
|
|
|
* Ensure we observe the unqueue before we issue the wakeup,
|
|
|
|
|
* so that we won't be waiting forever.
|
|
|
|
|
* -- see perf_not_pending().
|
|
|
|
|
*/
|
|
|
|
|
smp_wmb();
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
func(entry);
|
2009-03-30 17:07:02 +00:00
|
|
|
|
nr++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int perf_not_pending(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* If we flush on whatever cpu we run, there is a chance we don't
|
|
|
|
|
* need to wait.
|
|
|
|
|
*/
|
|
|
|
|
get_cpu();
|
|
|
|
|
__perf_pending_run();
|
|
|
|
|
put_cpu();
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Ensure we see the proper queue state before going to sleep
|
|
|
|
|
* so that we do not miss the wakeup. -- see perf_pending_handle()
|
|
|
|
|
*/
|
|
|
|
|
smp_rmb();
|
2009-04-06 09:45:02 +00:00
|
|
|
|
return counter->pending.next == NULL;
|
2009-03-30 17:07:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_pending_sync(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
wait_event(counter->waitq, perf_not_pending(counter));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_counter_do_pending(void)
|
|
|
|
|
{
|
|
|
|
|
__perf_pending_run();
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-30 17:07:14 +00:00
|
|
|
|
/*
|
|
|
|
|
* Callchain support -- arch specific
|
|
|
|
|
*/
|
|
|
|
|
|
2009-04-06 09:45:00 +00:00
|
|
|
|
__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
2009-03-30 17:07:14 +00:00
|
|
|
|
{
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-19 19:26:19 +00:00
|
|
|
|
/*
|
|
|
|
|
* Output
|
|
|
|
|
*/
|
|
|
|
|
|
2009-03-25 11:30:22 +00:00
|
|
|
|
struct perf_output_handle {
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
struct perf_mmap_data *data;
|
2009-06-02 14:16:02 +00:00
|
|
|
|
unsigned long head;
|
|
|
|
|
unsigned long offset;
|
2009-03-30 17:07:11 +00:00
|
|
|
|
int nmi;
|
2009-03-25 18:39:37 +00:00
|
|
|
|
int sample;
|
2009-05-01 10:23:16 +00:00
|
|
|
|
int locked;
|
|
|
|
|
unsigned long flags;
|
2009-03-25 11:30:22 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
static bool perf_output_space(struct perf_mmap_data *data,
|
|
|
|
|
unsigned int offset, unsigned int head)
|
|
|
|
|
{
|
|
|
|
|
unsigned long tail;
|
|
|
|
|
unsigned long mask;
|
|
|
|
|
|
|
|
|
|
if (!data->writable)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
mask = (data->nr_pages << PAGE_SHIFT) - 1;
|
|
|
|
|
/*
|
|
|
|
|
* Userspace could choose to issue a mb() before updating the tail
|
|
|
|
|
* pointer. So that all reads will be completed before the write is
|
|
|
|
|
* issued.
|
|
|
|
|
*/
|
|
|
|
|
tail = ACCESS_ONCE(data->user_page->data_tail);
|
|
|
|
|
smp_rmb();
|
|
|
|
|
|
|
|
|
|
offset = (offset - tail) & mask;
|
|
|
|
|
head = (head - tail) & mask;
|
|
|
|
|
|
|
|
|
|
if ((int)(head - offset) < 0)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-01 10:23:16 +00:00
|
|
|
|
static void perf_output_wakeup(struct perf_output_handle *handle)
|
2009-03-30 17:07:11 +00:00
|
|
|
|
{
|
2009-05-01 10:23:16 +00:00
|
|
|
|
atomic_set(&handle->data->poll, POLL_IN);
|
|
|
|
|
|
2009-04-06 09:45:02 +00:00
|
|
|
|
if (handle->nmi) {
|
2009-04-06 09:45:07 +00:00
|
|
|
|
handle->counter->pending_wakeup = 1;
|
2009-04-06 09:45:02 +00:00
|
|
|
|
perf_pending_queue(&handle->counter->pending,
|
2009-04-06 09:45:07 +00:00
|
|
|
|
perf_pending_counter);
|
2009-04-06 09:45:02 +00:00
|
|
|
|
} else
|
2009-03-30 17:07:11 +00:00
|
|
|
|
perf_counter_wakeup(handle->counter);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-01 10:23:16 +00:00
|
|
|
|
/*
|
|
|
|
|
* Curious locking construct.
|
|
|
|
|
*
|
|
|
|
|
* We need to ensure a later event doesn't publish a head when a former
|
|
|
|
|
* event isn't done writing. However since we need to deal with NMIs we
|
|
|
|
|
* cannot fully serialize things.
|
|
|
|
|
*
|
|
|
|
|
* What we do is serialize between CPUs so we only have to deal with NMI
|
|
|
|
|
* nesting on a single CPU.
|
|
|
|
|
*
|
|
|
|
|
* We only publish the head (and generate a wakeup) when the outer-most
|
|
|
|
|
* event completes.
|
|
|
|
|
*/
|
|
|
|
|
static void perf_output_lock(struct perf_output_handle *handle)
|
|
|
|
|
{
|
|
|
|
|
struct perf_mmap_data *data = handle->data;
|
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
|
|
handle->locked = 0;
|
|
|
|
|
|
|
|
|
|
local_irq_save(handle->flags);
|
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
|
|
|
|
|
|
if (in_nmi() && atomic_read(&data->lock) == cpu)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-05-05 15:50:25 +00:00
|
|
|
|
while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
|
2009-05-01 10:23:16 +00:00
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
|
|
handle->locked = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_output_unlock(struct perf_output_handle *handle)
|
|
|
|
|
{
|
|
|
|
|
struct perf_mmap_data *data = handle->data;
|
2009-06-02 14:16:02 +00:00
|
|
|
|
unsigned long head;
|
|
|
|
|
int cpu;
|
2009-05-01 10:23:16 +00:00
|
|
|
|
|
2009-05-05 15:50:22 +00:00
|
|
|
|
data->done_head = data->head;
|
2009-05-01 10:23:16 +00:00
|
|
|
|
|
|
|
|
|
if (!handle->locked)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
/*
|
|
|
|
|
* The xchg implies a full barrier that ensures all writes are done
|
|
|
|
|
* before we publish the new head, matched by a rmb() in userspace when
|
|
|
|
|
* reading this position.
|
|
|
|
|
*/
|
2009-06-02 14:16:02 +00:00
|
|
|
|
while ((head = atomic_long_xchg(&data->done_head, 0)))
|
2009-05-01 10:23:16 +00:00
|
|
|
|
data->user_page->data_head = head;
|
|
|
|
|
|
|
|
|
|
/*
|
2009-05-05 15:50:22 +00:00
|
|
|
|
* NMI can happen here, which means we can miss a done_head update.
|
2009-05-01 10:23:16 +00:00
|
|
|
|
*/
|
|
|
|
|
|
2009-05-05 15:50:25 +00:00
|
|
|
|
cpu = atomic_xchg(&data->lock, -1);
|
2009-05-01 10:23:16 +00:00
|
|
|
|
WARN_ON_ONCE(cpu != smp_processor_id());
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Therefore we have to validate we did not indeed do so.
|
|
|
|
|
*/
|
2009-06-02 14:16:02 +00:00
|
|
|
|
if (unlikely(atomic_long_read(&data->done_head))) {
|
2009-05-01 10:23:16 +00:00
|
|
|
|
/*
|
|
|
|
|
* Since we had it locked, we can lock it again.
|
|
|
|
|
*/
|
2009-05-05 15:50:25 +00:00
|
|
|
|
while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
|
2009-05-01 10:23:16 +00:00
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-05 15:50:22 +00:00
|
|
|
|
if (atomic_xchg(&data->wakeup, 0))
|
2009-05-01 10:23:16 +00:00
|
|
|
|
perf_output_wakeup(handle);
|
|
|
|
|
out:
|
|
|
|
|
local_irq_restore(handle->flags);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
static void perf_output_copy(struct perf_output_handle *handle,
|
|
|
|
|
const void *buf, unsigned int len)
|
|
|
|
|
{
|
|
|
|
|
unsigned int pages_mask;
|
|
|
|
|
unsigned int offset;
|
|
|
|
|
unsigned int size;
|
|
|
|
|
void **pages;
|
|
|
|
|
|
|
|
|
|
offset = handle->offset;
|
|
|
|
|
pages_mask = handle->data->nr_pages - 1;
|
|
|
|
|
pages = handle->data->data_pages;
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
unsigned int page_offset;
|
|
|
|
|
int nr;
|
|
|
|
|
|
|
|
|
|
nr = (offset >> PAGE_SHIFT) & pages_mask;
|
|
|
|
|
page_offset = offset & (PAGE_SIZE - 1);
|
|
|
|
|
size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
|
|
|
|
|
|
|
|
|
|
memcpy(pages[nr] + page_offset, buf, size);
|
|
|
|
|
|
|
|
|
|
len -= size;
|
|
|
|
|
buf += size;
|
|
|
|
|
offset += size;
|
|
|
|
|
} while (len);
|
|
|
|
|
|
|
|
|
|
handle->offset = offset;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check we didn't copy past our reservation window, taking the
|
|
|
|
|
* possible unsigned int wrap into account.
|
|
|
|
|
*/
|
|
|
|
|
WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define perf_output_put(handle, x) \
|
|
|
|
|
perf_output_copy((handle), &(x), sizeof(x))
|
|
|
|
|
|
2009-03-25 11:30:22 +00:00
|
|
|
|
static int perf_output_begin(struct perf_output_handle *handle,
|
2009-03-30 17:07:11 +00:00
|
|
|
|
struct perf_counter *counter, unsigned int size,
|
2009-03-25 18:39:37 +00:00
|
|
|
|
int nmi, int sample)
|
2009-03-19 19:26:19 +00:00
|
|
|
|
{
|
2009-03-23 17:22:10 +00:00
|
|
|
|
struct perf_mmap_data *data;
|
2009-03-25 11:30:22 +00:00
|
|
|
|
unsigned int offset, head;
|
2009-03-25 18:39:37 +00:00
|
|
|
|
int have_lost;
|
|
|
|
|
struct {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
u64 id;
|
|
|
|
|
u64 lost;
|
|
|
|
|
} lost_event;
|
2009-03-19 19:26:19 +00:00
|
|
|
|
|
2009-05-05 15:50:26 +00:00
|
|
|
|
/*
|
|
|
|
|
* For inherited counters we send all the output towards the parent.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->parent)
|
|
|
|
|
counter = counter->parent;
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
rcu_read_lock();
|
|
|
|
|
data = rcu_dereference(counter->data);
|
|
|
|
|
if (!data)
|
|
|
|
|
goto out;
|
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
handle->data = data;
|
|
|
|
|
handle->counter = counter;
|
|
|
|
|
handle->nmi = nmi;
|
|
|
|
|
handle->sample = sample;
|
2009-03-30 17:07:11 +00:00
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
if (!data->nr_pages)
|
2009-03-30 17:07:11 +00:00
|
|
|
|
goto fail;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
have_lost = atomic_read(&data->lost);
|
|
|
|
|
if (have_lost)
|
|
|
|
|
size += sizeof(lost_event);
|
|
|
|
|
|
2009-05-01 10:23:16 +00:00
|
|
|
|
perf_output_lock(handle);
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
do {
|
2009-06-05 02:36:28 +00:00
|
|
|
|
offset = head = atomic_long_read(&data->head);
|
2009-03-24 12:18:16 +00:00
|
|
|
|
head += size;
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (unlikely(!perf_output_space(data, offset, head)))
|
|
|
|
|
goto fail;
|
2009-06-02 14:16:02 +00:00
|
|
|
|
} while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-03-25 11:30:22 +00:00
|
|
|
|
handle->offset = offset;
|
2009-03-25 11:30:24 +00:00
|
|
|
|
handle->head = head;
|
2009-05-05 15:50:22 +00:00
|
|
|
|
|
|
|
|
|
if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
|
|
|
|
|
atomic_set(&data->wakeup, 1);
|
2009-03-19 19:26:19 +00:00
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (have_lost) {
|
|
|
|
|
lost_event.header.type = PERF_EVENT_LOST;
|
|
|
|
|
lost_event.header.misc = 0;
|
|
|
|
|
lost_event.header.size = sizeof(lost_event);
|
|
|
|
|
lost_event.id = counter->id;
|
|
|
|
|
lost_event.lost = atomic_xchg(&data->lost, 0);
|
|
|
|
|
|
|
|
|
|
perf_output_put(handle, lost_event);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-25 11:30:22 +00:00
|
|
|
|
return 0;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-03-30 17:07:11 +00:00
|
|
|
|
fail:
|
2009-03-25 18:39:37 +00:00
|
|
|
|
atomic_inc(&data->lost);
|
|
|
|
|
perf_output_unlock(handle);
|
2009-03-25 11:30:22 +00:00
|
|
|
|
out:
|
|
|
|
|
rcu_read_unlock();
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-03-25 11:30:22 +00:00
|
|
|
|
return -ENOSPC;
|
|
|
|
|
}
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-03-30 17:07:11 +00:00
|
|
|
|
static void perf_output_end(struct perf_output_handle *handle)
|
2009-03-25 11:30:22 +00:00
|
|
|
|
{
|
2009-05-01 10:23:16 +00:00
|
|
|
|
struct perf_counter *counter = handle->counter;
|
|
|
|
|
struct perf_mmap_data *data = handle->data;
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
int wakeup_events = counter->attr.wakeup_events;
|
2009-04-02 09:12:01 +00:00
|
|
|
|
|
2009-03-25 18:39:37 +00:00
|
|
|
|
if (handle->sample && wakeup_events) {
|
2009-05-01 10:23:16 +00:00
|
|
|
|
int events = atomic_inc_return(&data->events);
|
2009-04-02 09:12:01 +00:00
|
|
|
|
if (events >= wakeup_events) {
|
2009-05-01 10:23:16 +00:00
|
|
|
|
atomic_sub(wakeup_events, &data->events);
|
2009-05-05 15:50:22 +00:00
|
|
|
|
atomic_set(&data->wakeup, 1);
|
2009-04-02 09:12:01 +00:00
|
|
|
|
}
|
2009-05-01 10:23:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
perf_output_unlock(handle);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
rcu_read_unlock();
|
2009-03-25 11:30:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 12:13:15 +00:00
|
|
|
|
static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* only top level counters have the pid namespace they were created in
|
|
|
|
|
*/
|
|
|
|
|
if (counter->parent)
|
|
|
|
|
counter = counter->parent;
|
|
|
|
|
|
|
|
|
|
return task_tgid_nr_ns(p, counter->ns);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* only top level counters have the pid namespace they were created in
|
|
|
|
|
*/
|
|
|
|
|
if (counter->parent)
|
|
|
|
|
counter = counter->parent;
|
|
|
|
|
|
|
|
|
|
return task_pid_nr_ns(p, counter->ns);
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
static void perf_output_read_one(struct perf_output_handle *handle,
|
|
|
|
|
struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
u64 read_format = counter->attr.read_format;
|
|
|
|
|
u64 values[4];
|
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
|
|
values[n++] = atomic64_read(&counter->count);
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
|
|
|
|
|
values[n++] = counter->total_time_enabled +
|
|
|
|
|
atomic64_read(&counter->child_total_time_enabled);
|
|
|
|
|
}
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
|
|
|
|
|
values[n++] = counter->total_time_running +
|
|
|
|
|
atomic64_read(&counter->child_total_time_running);
|
|
|
|
|
}
|
|
|
|
|
if (read_format & PERF_FORMAT_ID)
|
|
|
|
|
values[n++] = primary_counter_id(counter);
|
|
|
|
|
|
|
|
|
|
perf_output_copy(handle, values, n * sizeof(u64));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
|
|
|
|
|
*/
|
|
|
|
|
static void perf_output_read_group(struct perf_output_handle *handle,
|
|
|
|
|
struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *leader = counter->group_leader, *sub;
|
|
|
|
|
u64 read_format = counter->attr.read_format;
|
|
|
|
|
u64 values[5];
|
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
|
|
values[n++] = 1 + leader->nr_siblings;
|
|
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
|
|
|
|
values[n++] = leader->total_time_enabled;
|
|
|
|
|
|
|
|
|
|
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
|
|
|
|
values[n++] = leader->total_time_running;
|
|
|
|
|
|
|
|
|
|
if (leader != counter)
|
|
|
|
|
leader->pmu->read(leader);
|
|
|
|
|
|
|
|
|
|
values[n++] = atomic64_read(&leader->count);
|
|
|
|
|
if (read_format & PERF_FORMAT_ID)
|
|
|
|
|
values[n++] = primary_counter_id(leader);
|
|
|
|
|
|
|
|
|
|
perf_output_copy(handle, values, n * sizeof(u64));
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(sub, &leader->sibling_list, list_entry) {
|
|
|
|
|
n = 0;
|
|
|
|
|
|
|
|
|
|
if (sub != counter)
|
|
|
|
|
sub->pmu->read(sub);
|
|
|
|
|
|
|
|
|
|
values[n++] = atomic64_read(&sub->count);
|
|
|
|
|
if (read_format & PERF_FORMAT_ID)
|
|
|
|
|
values[n++] = primary_counter_id(sub);
|
|
|
|
|
|
|
|
|
|
perf_output_copy(handle, values, n * sizeof(u64));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_output_read(struct perf_output_handle *handle,
|
|
|
|
|
struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
if (counter->attr.read_format & PERF_FORMAT_GROUP)
|
|
|
|
|
perf_output_read_group(handle, counter);
|
|
|
|
|
else
|
|
|
|
|
perf_output_read_one(handle, counter);
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-13 08:13:22 +00:00
|
|
|
|
void perf_counter_output(struct perf_counter *counter, int nmi,
|
2009-06-10 19:02:22 +00:00
|
|
|
|
struct perf_sample_data *data)
|
2009-03-23 17:22:10 +00:00
|
|
|
|
{
|
2009-03-30 17:07:12 +00:00
|
|
|
|
int ret;
|
2009-06-02 17:22:16 +00:00
|
|
|
|
u64 sample_type = counter->attr.sample_type;
|
2009-03-30 17:07:12 +00:00
|
|
|
|
struct perf_output_handle handle;
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
u64 ip;
|
2009-03-25 11:30:23 +00:00
|
|
|
|
struct {
|
2009-03-25 11:30:25 +00:00
|
|
|
|
u32 pid, tid;
|
2009-03-30 17:07:12 +00:00
|
|
|
|
} tid_entry;
|
2009-03-30 17:07:14 +00:00
|
|
|
|
struct perf_callchain_entry *callchain = NULL;
|
|
|
|
|
int callchain_size = 0;
|
2009-04-06 09:45:06 +00:00
|
|
|
|
u64 time;
|
2009-05-08 16:52:24 +00:00
|
|
|
|
struct {
|
|
|
|
|
u32 cpu, reserved;
|
|
|
|
|
} cpu_entry;
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
header.type = PERF_EVENT_SAMPLE;
|
2009-03-30 17:07:12 +00:00
|
|
|
|
header.size = sizeof(header);
|
2009-03-23 17:22:10 +00:00
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
header.misc = 0;
|
2009-06-10 19:02:22 +00:00
|
|
|
|
header.misc |= perf_misc_flags(data->regs);
|
2009-04-08 13:01:26 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_IP) {
|
2009-06-10 19:02:22 +00:00
|
|
|
|
ip = perf_instruction_pointer(data->regs);
|
2009-04-02 09:11:59 +00:00
|
|
|
|
header.size += sizeof(ip);
|
|
|
|
|
}
|
2009-03-25 11:30:25 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_TID) {
|
2009-03-25 11:30:25 +00:00
|
|
|
|
/* namespace issues */
|
2009-06-02 12:13:15 +00:00
|
|
|
|
tid_entry.pid = perf_counter_pid(counter, current);
|
|
|
|
|
tid_entry.tid = perf_counter_tid(counter, current);
|
2009-03-30 17:07:12 +00:00
|
|
|
|
|
|
|
|
|
header.size += sizeof(tid_entry);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_TIME) {
|
2009-04-08 13:01:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* Maybe do better on x86 and provide cpu_clock_nmi()
|
|
|
|
|
*/
|
|
|
|
|
time = sched_clock();
|
|
|
|
|
|
|
|
|
|
header.size += sizeof(u64);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_ADDR)
|
2009-04-08 13:01:33 +00:00
|
|
|
|
header.size += sizeof(u64);
|
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_ID)
|
2009-05-08 16:52:23 +00:00
|
|
|
|
header.size += sizeof(u64);
|
|
|
|
|
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 11:19:40 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
|
|
|
|
header.size += sizeof(u64);
|
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_CPU) {
|
2009-05-08 16:52:24 +00:00
|
|
|
|
header.size += sizeof(cpu_entry);
|
|
|
|
|
|
|
|
|
|
cpu_entry.cpu = raw_smp_processor_id();
|
2009-07-21 07:55:05 +00:00
|
|
|
|
cpu_entry.reserved = 0;
|
2009-05-08 16:52:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_PERIOD)
|
2009-06-05 13:05:43 +00:00
|
|
|
|
header.size += sizeof(u64);
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_READ)
|
|
|
|
|
header.size += perf_counter_read_size(counter);
|
2009-04-02 09:11:59 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
2009-06-10 19:02:22 +00:00
|
|
|
|
callchain = perf_callchain(data->regs);
|
2009-03-30 17:07:14 +00:00
|
|
|
|
|
|
|
|
|
if (callchain) {
|
2009-04-06 09:45:00 +00:00
|
|
|
|
callchain_size = (1 + callchain->nr) * sizeof(u64);
|
2009-03-30 17:07:14 +00:00
|
|
|
|
header.size += callchain_size;
|
2009-06-25 09:27:12 +00:00
|
|
|
|
} else
|
|
|
|
|
header.size += sizeof(u64);
|
2009-03-30 17:07:14 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-08-08 02:26:37 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_RAW) {
|
2009-08-10 09:16:52 +00:00
|
|
|
|
int size = sizeof(u32);
|
|
|
|
|
|
|
|
|
|
if (data->raw)
|
|
|
|
|
size += data->raw->size;
|
|
|
|
|
else
|
|
|
|
|
size += sizeof(u32);
|
|
|
|
|
|
|
|
|
|
WARN_ON_ONCE(size & (sizeof(u64)-1));
|
|
|
|
|
header.size += size;
|
2009-08-06 23:25:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:09 +00:00
|
|
|
|
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
|
2009-03-30 17:07:12 +00:00
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
2009-03-25 11:30:25 +00:00
|
|
|
|
|
2009-03-30 17:07:12 +00:00
|
|
|
|
perf_output_put(&handle, header);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_IP)
|
2009-04-02 09:11:59 +00:00
|
|
|
|
perf_output_put(&handle, ip);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_TID)
|
2009-04-02 09:11:59 +00:00
|
|
|
|
perf_output_put(&handle, tid_entry);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_TIME)
|
2009-04-08 13:01:32 +00:00
|
|
|
|
perf_output_put(&handle, time);
|
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_ADDR)
|
2009-06-10 19:02:22 +00:00
|
|
|
|
perf_output_put(&handle, data->addr);
|
2009-04-08 13:01:33 +00:00
|
|
|
|
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 11:19:40 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_ID) {
|
|
|
|
|
u64 id = primary_counter_id(counter);
|
|
|
|
|
|
|
|
|
|
perf_output_put(&handle, id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (sample_type & PERF_SAMPLE_STREAM_ID)
|
2009-06-05 12:44:52 +00:00
|
|
|
|
perf_output_put(&handle, counter->id);
|
2009-05-08 16:52:23 +00:00
|
|
|
|
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_CPU)
|
2009-05-08 16:52:24 +00:00
|
|
|
|
perf_output_put(&handle, cpu_entry);
|
|
|
|
|
|
2009-06-05 13:05:43 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_PERIOD)
|
2009-06-10 19:34:59 +00:00
|
|
|
|
perf_output_put(&handle, data->period);
|
2009-06-05 13:05:43 +00:00
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_READ)
|
|
|
|
|
perf_output_read(&handle, counter);
|
2009-03-25 11:30:23 +00:00
|
|
|
|
|
2009-06-25 09:27:12 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
|
|
|
|
|
if (callchain)
|
|
|
|
|
perf_output_copy(&handle, callchain, callchain_size);
|
|
|
|
|
else {
|
|
|
|
|
u64 nr = 0;
|
|
|
|
|
perf_output_put(&handle, nr);
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-03-19 19:26:19 +00:00
|
|
|
|
|
2009-08-10 09:16:52 +00:00
|
|
|
|
if (sample_type & PERF_SAMPLE_RAW) {
|
|
|
|
|
if (data->raw) {
|
|
|
|
|
perf_output_put(&handle, data->raw->size);
|
|
|
|
|
perf_output_copy(&handle, data->raw->data, data->raw->size);
|
|
|
|
|
} else {
|
|
|
|
|
struct {
|
|
|
|
|
u32 size;
|
|
|
|
|
u32 data;
|
|
|
|
|
} raw = {
|
|
|
|
|
.size = sizeof(u32),
|
|
|
|
|
.data = 0,
|
|
|
|
|
};
|
|
|
|
|
perf_output_put(&handle, raw);
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-08-06 23:25:54 +00:00
|
|
|
|
|
2009-04-02 09:11:59 +00:00
|
|
|
|
perf_output_end(&handle);
|
2009-03-19 19:26:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-23 18:13:11 +00:00
|
|
|
|
/*
|
|
|
|
|
* read event
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct perf_read_event {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
|
|
|
|
|
u32 pid;
|
|
|
|
|
u32 tid;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
perf_counter_read_event(struct perf_counter *counter,
|
|
|
|
|
struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
struct perf_output_handle handle;
|
|
|
|
|
struct perf_read_event event = {
|
|
|
|
|
.header = {
|
|
|
|
|
.type = PERF_EVENT_READ,
|
|
|
|
|
.misc = 0,
|
2009-08-13 09:47:53 +00:00
|
|
|
|
.size = sizeof(event) + perf_counter_read_size(counter),
|
2009-06-23 18:13:11 +00:00
|
|
|
|
},
|
|
|
|
|
.pid = perf_counter_pid(counter, task),
|
|
|
|
|
.tid = perf_counter_tid(counter, task),
|
|
|
|
|
};
|
2009-08-13 09:47:53 +00:00
|
|
|
|
int ret;
|
2009-06-23 18:13:11 +00:00
|
|
|
|
|
|
|
|
|
ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
|
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-08-13 09:47:53 +00:00
|
|
|
|
perf_output_put(&handle, event);
|
|
|
|
|
perf_output_read(&handle, counter);
|
|
|
|
|
|
2009-06-23 18:13:11 +00:00
|
|
|
|
perf_output_end(&handle);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-04 14:53:44 +00:00
|
|
|
|
/*
|
2009-07-23 12:46:33 +00:00
|
|
|
|
* task tracking -- fork/exit
|
|
|
|
|
*
|
|
|
|
|
* enabled by: attr.comm | attr.mmap | attr.task
|
2009-06-04 14:53:44 +00:00
|
|
|
|
*/
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
struct perf_task_event {
|
2009-08-07 17:49:01 +00:00
|
|
|
|
struct task_struct *task;
|
|
|
|
|
struct perf_counter_context *task_ctx;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
|
|
|
|
|
u32 pid;
|
|
|
|
|
u32 ppid;
|
2009-07-23 12:46:33 +00:00
|
|
|
|
u32 tid;
|
|
|
|
|
u32 ptid;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
} event;
|
|
|
|
|
};
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
static void perf_counter_task_output(struct perf_counter *counter,
|
|
|
|
|
struct perf_task_event *task_event)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_output_handle handle;
|
2009-07-23 12:46:33 +00:00
|
|
|
|
int size = task_event->event.header.size;
|
|
|
|
|
struct task_struct *task = task_event->task;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
int ret = perf_output_begin(&handle, counter, size, 0, 0);
|
|
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
task_event->event.pid = perf_counter_pid(counter, task);
|
2009-08-13 14:14:42 +00:00
|
|
|
|
task_event->event.ppid = perf_counter_pid(counter, current);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
task_event->event.tid = perf_counter_tid(counter, task);
|
2009-08-13 14:14:42 +00:00
|
|
|
|
task_event->event.ptid = perf_counter_tid(counter, current);
|
2009-07-23 12:46:33 +00:00
|
|
|
|
|
|
|
|
|
perf_output_put(&handle, task_event->event);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
perf_output_end(&handle);
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
static int perf_counter_task_match(struct perf_counter *counter)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
{
|
2009-07-23 12:46:33 +00:00
|
|
|
|
if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
static void perf_counter_task_ctx(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_task_event *task_event)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
|
2009-07-23 12:46:33 +00:00
|
|
|
|
if (perf_counter_task_match(counter))
|
|
|
|
|
perf_counter_task_output(counter, task_event);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
}
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
static void perf_counter_task_event(struct perf_task_event *task_event)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx;
|
2009-08-07 17:49:01 +00:00
|
|
|
|
struct perf_counter_context *ctx = task_event->task_ctx;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
|
|
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
2009-07-23 12:46:33 +00:00
|
|
|
|
perf_counter_task_ctx(&cpuctx->ctx, task_event);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
put_cpu_var(perf_cpu_context);
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2009-08-07 17:49:01 +00:00
|
|
|
|
if (!ctx)
|
|
|
|
|
ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
if (ctx)
|
2009-07-23 12:46:33 +00:00
|
|
|
|
perf_counter_task_ctx(ctx, task_event);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
2009-08-07 17:49:01 +00:00
|
|
|
|
static void perf_counter_task(struct task_struct *task,
|
|
|
|
|
struct perf_counter_context *task_ctx,
|
|
|
|
|
int new)
|
2009-06-04 14:53:44 +00:00
|
|
|
|
{
|
2009-07-23 12:46:33 +00:00
|
|
|
|
struct perf_task_event task_event;
|
2009-06-04 14:53:44 +00:00
|
|
|
|
|
|
|
|
|
if (!atomic_read(&nr_comm_counters) &&
|
2009-07-23 12:46:33 +00:00
|
|
|
|
!atomic_read(&nr_mmap_counters) &&
|
|
|
|
|
!atomic_read(&nr_task_counters))
|
2009-06-04 14:53:44 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
task_event = (struct perf_task_event){
|
2009-08-07 17:49:01 +00:00
|
|
|
|
.task = task,
|
|
|
|
|
.task_ctx = task_ctx,
|
|
|
|
|
.event = {
|
2009-06-04 14:53:44 +00:00
|
|
|
|
.header = {
|
2009-07-23 12:46:33 +00:00
|
|
|
|
.type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
|
2009-07-22 09:13:50 +00:00
|
|
|
|
.misc = 0,
|
2009-07-23 12:46:33 +00:00
|
|
|
|
.size = sizeof(task_event.event),
|
2009-06-04 14:53:44 +00:00
|
|
|
|
},
|
2009-07-22 09:13:50 +00:00
|
|
|
|
/* .pid */
|
|
|
|
|
/* .ppid */
|
2009-07-23 12:46:33 +00:00
|
|
|
|
/* .tid */
|
|
|
|
|
/* .ptid */
|
2009-06-04 14:53:44 +00:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
perf_counter_task_event(&task_event);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_counter_fork(struct task_struct *task)
|
|
|
|
|
{
|
2009-08-07 17:49:01 +00:00
|
|
|
|
perf_counter_task(task, NULL, 1);
|
2009-06-04 14:53:44 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-08 13:01:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* comm tracking
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct perf_comm_event {
|
2009-06-01 08:13:37 +00:00
|
|
|
|
struct task_struct *task;
|
|
|
|
|
char *comm;
|
2009-04-08 13:01:30 +00:00
|
|
|
|
int comm_size;
|
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
|
|
|
|
|
u32 pid;
|
|
|
|
|
u32 tid;
|
|
|
|
|
} event;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void perf_counter_comm_output(struct perf_counter *counter,
|
|
|
|
|
struct perf_comm_event *comm_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_output_handle handle;
|
|
|
|
|
int size = comm_event->event.header.size;
|
|
|
|
|
int ret = perf_output_begin(&handle, counter, size, 0, 0);
|
|
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-06-02 12:13:15 +00:00
|
|
|
|
comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
|
|
|
|
|
comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
|
|
|
|
|
|
2009-04-08 13:01:30 +00:00
|
|
|
|
perf_output_put(&handle, comm_event->event);
|
|
|
|
|
perf_output_copy(&handle, comm_event->comm,
|
|
|
|
|
comm_event->comm_size);
|
|
|
|
|
perf_output_end(&handle);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-04 14:53:44 +00:00
|
|
|
|
static int perf_counter_comm_match(struct perf_counter *counter)
|
2009-04-08 13:01:30 +00:00
|
|
|
|
{
|
2009-06-04 14:53:44 +00:00
|
|
|
|
if (counter->attr.comm)
|
2009-04-08 13:01:30 +00:00
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_comm_event *comm_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
|
2009-06-04 14:53:44 +00:00
|
|
|
|
if (perf_counter_comm_match(counter))
|
2009-04-08 13:01:30 +00:00
|
|
|
|
perf_counter_comm_output(counter, comm_event);
|
|
|
|
|
}
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_counter_comm_event(struct perf_comm_event *comm_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx;
|
2009-05-29 12:51:57 +00:00
|
|
|
|
struct perf_counter_context *ctx;
|
2009-04-08 13:01:30 +00:00
|
|
|
|
unsigned int size;
|
2009-07-16 13:15:52 +00:00
|
|
|
|
char comm[TASK_COMM_LEN];
|
2009-04-08 13:01:30 +00:00
|
|
|
|
|
2009-07-16 13:15:52 +00:00
|
|
|
|
memset(comm, 0, sizeof(comm));
|
|
|
|
|
strncpy(comm, comm_event->task->comm, sizeof(comm));
|
2009-04-09 07:48:22 +00:00
|
|
|
|
size = ALIGN(strlen(comm)+1, sizeof(u64));
|
2009-04-08 13:01:30 +00:00
|
|
|
|
|
|
|
|
|
comm_event->comm = comm;
|
|
|
|
|
comm_event->comm_size = size;
|
|
|
|
|
|
|
|
|
|
comm_event->event.header.size = sizeof(comm_event->event) + size;
|
|
|
|
|
|
|
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
|
|
perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
|
|
|
|
|
put_cpu_var(perf_cpu_context);
|
2009-05-29 12:51:57 +00:00
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
/*
|
|
|
|
|
* doesn't really matter which of the child contexts the
|
|
|
|
|
* events ends up in.
|
|
|
|
|
*/
|
|
|
|
|
ctx = rcu_dereference(current->perf_counter_ctxp);
|
|
|
|
|
if (ctx)
|
|
|
|
|
perf_counter_comm_ctx(ctx, comm_event);
|
|
|
|
|
rcu_read_unlock();
|
2009-04-08 13:01:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void perf_counter_comm(struct task_struct *task)
|
|
|
|
|
{
|
2009-04-09 08:53:44 +00:00
|
|
|
|
struct perf_comm_event comm_event;
|
|
|
|
|
|
2009-06-30 06:07:19 +00:00
|
|
|
|
if (task->perf_counter_ctxp)
|
|
|
|
|
perf_counter_enable_on_exec(task);
|
|
|
|
|
|
2009-06-04 14:53:44 +00:00
|
|
|
|
if (!atomic_read(&nr_comm_counters))
|
2009-04-09 08:53:44 +00:00
|
|
|
|
return;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
|
2009-04-09 08:53:44 +00:00
|
|
|
|
comm_event = (struct perf_comm_event){
|
2009-04-08 13:01:30 +00:00
|
|
|
|
.task = task,
|
2009-07-22 09:13:50 +00:00
|
|
|
|
/* .comm */
|
|
|
|
|
/* .comm_size */
|
2009-04-08 13:01:30 +00:00
|
|
|
|
.event = {
|
2009-07-22 09:13:50 +00:00
|
|
|
|
.header = {
|
|
|
|
|
.type = PERF_EVENT_COMM,
|
|
|
|
|
.misc = 0,
|
|
|
|
|
/* .size */
|
|
|
|
|
},
|
|
|
|
|
/* .pid */
|
|
|
|
|
/* .tid */
|
2009-04-08 13:01:30 +00:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
perf_counter_comm_event(&comm_event);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-30 17:07:05 +00:00
|
|
|
|
/*
|
|
|
|
|
* mmap tracking
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct perf_mmap_event {
|
2009-06-05 12:04:55 +00:00
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
|
|
const char *file_name;
|
|
|
|
|
int file_size;
|
2009-03-30 17:07:05 +00:00
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
|
|
|
|
|
u32 pid;
|
|
|
|
|
u32 tid;
|
|
|
|
|
u64 start;
|
|
|
|
|
u64 len;
|
|
|
|
|
u64 pgoff;
|
|
|
|
|
} event;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void perf_counter_mmap_output(struct perf_counter *counter,
|
|
|
|
|
struct perf_mmap_event *mmap_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_output_handle handle;
|
|
|
|
|
int size = mmap_event->event.header.size;
|
2009-04-06 09:45:09 +00:00
|
|
|
|
int ret = perf_output_begin(&handle, counter, size, 0, 0);
|
2009-03-30 17:07:05 +00:00
|
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
|
|
|
|
|
2009-06-02 12:13:15 +00:00
|
|
|
|
mmap_event->event.pid = perf_counter_pid(counter, current);
|
|
|
|
|
mmap_event->event.tid = perf_counter_tid(counter, current);
|
|
|
|
|
|
2009-03-30 17:07:05 +00:00
|
|
|
|
perf_output_put(&handle, mmap_event->event);
|
|
|
|
|
perf_output_copy(&handle, mmap_event->file_name,
|
|
|
|
|
mmap_event->file_size);
|
2009-03-30 17:07:11 +00:00
|
|
|
|
perf_output_end(&handle);
|
2009-03-30 17:07:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_counter_mmap_match(struct perf_counter *counter,
|
|
|
|
|
struct perf_mmap_event *mmap_event)
|
|
|
|
|
{
|
2009-06-04 15:08:58 +00:00
|
|
|
|
if (counter->attr.mmap)
|
2009-03-30 17:07:05 +00:00
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
|
|
|
|
|
struct perf_mmap_event *mmap_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
|
|
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
|
|
|
|
|
if (perf_counter_mmap_match(counter, mmap_event))
|
|
|
|
|
perf_counter_mmap_output(counter, mmap_event);
|
|
|
|
|
}
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx;
|
2009-05-29 12:51:57 +00:00
|
|
|
|
struct perf_counter_context *ctx;
|
2009-06-05 12:04:55 +00:00
|
|
|
|
struct vm_area_struct *vma = mmap_event->vma;
|
|
|
|
|
struct file *file = vma->vm_file;
|
2009-03-30 17:07:05 +00:00
|
|
|
|
unsigned int size;
|
|
|
|
|
char tmp[16];
|
|
|
|
|
char *buf = NULL;
|
2009-06-05 12:04:55 +00:00
|
|
|
|
const char *name;
|
2009-03-30 17:07:05 +00:00
|
|
|
|
|
2009-07-16 13:15:52 +00:00
|
|
|
|
memset(tmp, 0, sizeof(tmp));
|
|
|
|
|
|
2009-03-30 17:07:05 +00:00
|
|
|
|
if (file) {
|
2009-07-16 13:15:52 +00:00
|
|
|
|
/*
|
|
|
|
|
* d_path works from the end of the buffer backwards, so we
|
|
|
|
|
* need to add enough zero bytes after the string to handle
|
|
|
|
|
* the 64bit alignment we do later.
|
|
|
|
|
*/
|
|
|
|
|
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
|
2009-03-30 17:07:05 +00:00
|
|
|
|
if (!buf) {
|
|
|
|
|
name = strncpy(tmp, "//enomem", sizeof(tmp));
|
|
|
|
|
goto got_name;
|
|
|
|
|
}
|
2009-04-09 08:53:46 +00:00
|
|
|
|
name = d_path(&file->f_path, buf, PATH_MAX);
|
2009-03-30 17:07:05 +00:00
|
|
|
|
if (IS_ERR(name)) {
|
|
|
|
|
name = strncpy(tmp, "//toolong", sizeof(tmp));
|
|
|
|
|
goto got_name;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2009-07-16 13:15:52 +00:00
|
|
|
|
if (arch_vma_name(mmap_event->vma)) {
|
|
|
|
|
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
|
|
|
|
|
sizeof(tmp));
|
2009-06-05 12:04:55 +00:00
|
|
|
|
goto got_name;
|
2009-07-16 13:15:52 +00:00
|
|
|
|
}
|
2009-06-05 12:04:55 +00:00
|
|
|
|
|
|
|
|
|
if (!vma->vm_mm) {
|
|
|
|
|
name = strncpy(tmp, "[vdso]", sizeof(tmp));
|
|
|
|
|
goto got_name;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-30 17:07:05 +00:00
|
|
|
|
name = strncpy(tmp, "//anon", sizeof(tmp));
|
|
|
|
|
goto got_name;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
got_name:
|
2009-04-09 07:48:22 +00:00
|
|
|
|
size = ALIGN(strlen(name)+1, sizeof(u64));
|
2009-03-30 17:07:05 +00:00
|
|
|
|
|
|
|
|
|
mmap_event->file_name = name;
|
|
|
|
|
mmap_event->file_size = size;
|
|
|
|
|
|
|
|
|
|
mmap_event->event.header.size = sizeof(mmap_event->event) + size;
|
|
|
|
|
|
|
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
|
|
perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
|
|
|
|
|
put_cpu_var(perf_cpu_context);
|
|
|
|
|
|
2009-05-29 12:51:57 +00:00
|
|
|
|
rcu_read_lock();
|
|
|
|
|
/*
|
|
|
|
|
* doesn't really matter which of the child contexts the
|
|
|
|
|
* events ends up in.
|
|
|
|
|
*/
|
|
|
|
|
ctx = rcu_dereference(current->perf_counter_ctxp);
|
|
|
|
|
if (ctx)
|
|
|
|
|
perf_counter_mmap_ctx(ctx, mmap_event);
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
2009-03-30 17:07:05 +00:00
|
|
|
|
kfree(buf);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-05 12:04:55 +00:00
|
|
|
|
void __perf_counter_mmap(struct vm_area_struct *vma)
|
2009-03-30 17:07:05 +00:00
|
|
|
|
{
|
2009-04-09 08:53:44 +00:00
|
|
|
|
struct perf_mmap_event mmap_event;
|
|
|
|
|
|
2009-06-04 14:53:44 +00:00
|
|
|
|
if (!atomic_read(&nr_mmap_counters))
|
2009-04-09 08:53:44 +00:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mmap_event = (struct perf_mmap_event){
|
2009-06-05 12:04:55 +00:00
|
|
|
|
.vma = vma,
|
2009-07-22 09:13:50 +00:00
|
|
|
|
/* .file_name */
|
|
|
|
|
/* .file_size */
|
2009-03-30 17:07:05 +00:00
|
|
|
|
.event = {
|
2009-07-22 09:13:50 +00:00
|
|
|
|
.header = {
|
|
|
|
|
.type = PERF_EVENT_MMAP,
|
|
|
|
|
.misc = 0,
|
|
|
|
|
/* .size */
|
|
|
|
|
},
|
|
|
|
|
/* .pid */
|
|
|
|
|
/* .tid */
|
2009-06-05 12:04:55 +00:00
|
|
|
|
.start = vma->vm_start,
|
|
|
|
|
.len = vma->vm_end - vma->vm_start,
|
|
|
|
|
.pgoff = vma->vm_pgoff,
|
2009-03-30 17:07:05 +00:00
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
perf_counter_mmap_event(&mmap_event);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-25 15:39:05 +00:00
|
|
|
|
/*
|
|
|
|
|
* IRQ throttle logging
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void perf_log_throttle(struct perf_counter *counter, int enable)
|
|
|
|
|
{
|
|
|
|
|
struct perf_output_handle handle;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
struct perf_event_header header;
|
|
|
|
|
u64 time;
|
2009-06-11 12:57:55 +00:00
|
|
|
|
u64 id;
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 11:19:40 +00:00
|
|
|
|
u64 stream_id;
|
2009-05-25 15:39:05 +00:00
|
|
|
|
} throttle_event = {
|
|
|
|
|
.header = {
|
2009-07-22 13:05:46 +00:00
|
|
|
|
.type = PERF_EVENT_THROTTLE,
|
2009-05-25 15:39:05 +00:00
|
|
|
|
.misc = 0,
|
|
|
|
|
.size = sizeof(throttle_event),
|
|
|
|
|
},
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 11:19:40 +00:00
|
|
|
|
.time = sched_clock(),
|
|
|
|
|
.id = primary_counter_id(counter),
|
|
|
|
|
.stream_id = counter->id,
|
2009-05-25 15:39:05 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-07-22 13:05:46 +00:00
|
|
|
|
if (enable)
|
|
|
|
|
throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
|
|
|
|
|
|
2009-05-25 20:03:26 +00:00
|
|
|
|
ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
|
2009-05-25 15:39:05 +00:00
|
|
|
|
if (ret)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
perf_output_put(&handle, throttle_event);
|
|
|
|
|
perf_output_end(&handle);
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-06 09:45:04 +00:00
|
|
|
|
/*
|
2009-03-25 18:39:37 +00:00
|
|
|
|
* Generic counter overflow handling, sampling.
|
2009-04-06 09:45:04 +00:00
|
|
|
|
*/
|
|
|
|
|
|
2009-06-10 19:02:22 +00:00
|
|
|
|
int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|
|
|
|
struct perf_sample_data *data)
|
2009-04-06 09:45:04 +00:00
|
|
|
|
{
|
2009-04-06 09:45:07 +00:00
|
|
|
|
int events = atomic_read(&counter->event_limit);
|
2009-05-25 15:39:05 +00:00
|
|
|
|
int throttle = counter->pmu->unthrottle != NULL;
|
2009-06-10 11:40:57 +00:00
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
2009-04-06 09:45:07 +00:00
|
|
|
|
int ret = 0;
|
|
|
|
|
|
2009-05-25 15:39:05 +00:00
|
|
|
|
if (!throttle) {
|
2009-06-10 11:40:57 +00:00
|
|
|
|
hwc->interrupts++;
|
2009-06-03 20:19:36 +00:00
|
|
|
|
} else {
|
2009-06-10 11:40:57 +00:00
|
|
|
|
if (hwc->interrupts != MAX_INTERRUPTS) {
|
|
|
|
|
hwc->interrupts++;
|
2009-06-11 09:25:05 +00:00
|
|
|
|
if (HZ * hwc->interrupts >
|
|
|
|
|
(u64)sysctl_perf_counter_sample_rate) {
|
2009-06-10 11:40:57 +00:00
|
|
|
|
hwc->interrupts = MAX_INTERRUPTS;
|
2009-06-03 20:19:36 +00:00
|
|
|
|
perf_log_throttle(counter, 0);
|
|
|
|
|
ret = 1;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Keep re-disabling counters even though on the previous
|
|
|
|
|
* pass we disabled it - just in case we raced with a
|
|
|
|
|
* sched-in and the counter got enabled again:
|
|
|
|
|
*/
|
2009-05-25 15:39:05 +00:00
|
|
|
|
ret = 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-05-15 13:19:28 +00:00
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
if (counter->attr.freq) {
|
|
|
|
|
u64 now = sched_clock();
|
|
|
|
|
s64 delta = now - hwc->freq_stamp;
|
|
|
|
|
|
|
|
|
|
hwc->freq_stamp = now;
|
|
|
|
|
|
|
|
|
|
if (delta > 0 && delta < TICK_NSEC)
|
|
|
|
|
perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-05 15:50:26 +00:00
|
|
|
|
/*
|
|
|
|
|
* XXX event_limit might not quite work as expected on inherited
|
|
|
|
|
* counters
|
|
|
|
|
*/
|
|
|
|
|
|
2009-04-06 09:45:09 +00:00
|
|
|
|
counter->pending_kill = POLL_IN;
|
2009-04-06 09:45:07 +00:00
|
|
|
|
if (events && atomic_dec_and_test(&counter->event_limit)) {
|
|
|
|
|
ret = 1;
|
2009-04-06 09:45:09 +00:00
|
|
|
|
counter->pending_kill = POLL_HUP;
|
2009-04-06 09:45:07 +00:00
|
|
|
|
if (nmi) {
|
|
|
|
|
counter->pending_disable = 1;
|
|
|
|
|
perf_pending_queue(&counter->pending,
|
|
|
|
|
perf_pending_counter);
|
|
|
|
|
} else
|
|
|
|
|
perf_counter_disable(counter);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-10 19:02:22 +00:00
|
|
|
|
perf_counter_output(counter, nmi, data);
|
2009-04-06 09:45:07 +00:00
|
|
|
|
return ret;
|
2009-04-06 09:45:04 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* Generic software counter infrastructure
|
|
|
|
|
*/
|
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* We directly increment counter->count and keep a second value in
|
|
|
|
|
* counter->hw.period_left to count intervals. This period counter
|
|
|
|
|
* is kept in the range [-sample_period, 0] so that we can use the
|
|
|
|
|
* sign as trigger.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static u64 perf_swcounter_set_period(struct perf_counter *counter)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
{
|
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
2009-07-22 07:29:32 +00:00
|
|
|
|
u64 period = hwc->last_period;
|
|
|
|
|
u64 nr, offset;
|
|
|
|
|
s64 old, val;
|
|
|
|
|
|
|
|
|
|
hwc->last_period = hwc->sample_period;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
|
|
|
|
again:
|
2009-07-22 07:29:32 +00:00
|
|
|
|
old = val = atomic64_read(&hwc->period_left);
|
|
|
|
|
if (val < 0)
|
|
|
|
|
return 0;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
nr = div64_u64(period + val, period);
|
|
|
|
|
offset = nr * period;
|
|
|
|
|
val -= offset;
|
|
|
|
|
if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
|
|
|
|
|
goto again;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
return nr;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
static void perf_swcounter_overflow(struct perf_counter *counter,
|
|
|
|
|
int nmi, struct perf_sample_data *data)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
{
|
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
2009-07-22 07:29:32 +00:00
|
|
|
|
u64 overflow;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
data->period = counter->hw.last_period;
|
|
|
|
|
overflow = perf_swcounter_set_period(counter);
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
if (hwc->interrupts == MAX_INTERRUPTS)
|
|
|
|
|
return;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
for (; overflow; overflow--) {
|
|
|
|
|
if (perf_counter_overflow(counter, nmi, data)) {
|
|
|
|
|
/*
|
|
|
|
|
* We inhibit the overflow from happening when
|
|
|
|
|
* hwc->interrupts == MAX_INTERRUPTS.
|
|
|
|
|
*/
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-03-13 11:21:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
static void perf_swcounter_unthrottle(struct perf_counter *counter)
|
2009-03-13 11:21:35 +00:00
|
|
|
|
{
|
|
|
|
|
/*
|
2009-07-22 07:29:32 +00:00
|
|
|
|
* Nothing to do, we already reset hwc->interrupts.
|
2009-03-13 11:21:35 +00:00
|
|
|
|
*/
|
2009-07-22 07:29:32 +00:00
|
|
|
|
}
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
|
|
|
|
|
int nmi, struct perf_sample_data *data)
|
|
|
|
|
{
|
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
atomic64_add(nr, &counter->count);
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
if (!hwc->sample_period)
|
|
|
|
|
return;
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
if (!data->regs)
|
|
|
|
|
return;
|
2009-06-10 19:02:22 +00:00
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
if (!atomic64_add_negative(nr, &hwc->period_left))
|
|
|
|
|
perf_swcounter_overflow(counter, nmi, data);
|
2009-03-13 11:21:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
static int perf_swcounter_is_counting(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-08-13 07:51:55 +00:00
|
|
|
|
/*
|
|
|
|
|
* The counter is active, we're good!
|
|
|
|
|
*/
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
|
|
|
|
|
return 1;
|
|
|
|
|
|
2009-08-13 07:51:55 +00:00
|
|
|
|
/*
|
|
|
|
|
* The counter is off/error, not counting.
|
|
|
|
|
*/
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
if (counter->state != PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/*
|
2009-08-13 07:51:55 +00:00
|
|
|
|
* The counter is inactive, if the context is active
|
|
|
|
|
* we're part of a group that didn't make it on the 'pmu',
|
|
|
|
|
* not counting.
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
*/
|
2009-08-13 07:51:55 +00:00
|
|
|
|
if (counter->ctx->is_active)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We're inactive and the context is too, this means the
|
|
|
|
|
* task is scheduled out, we're counting events that happen
|
|
|
|
|
* to us, like migration events.
|
|
|
|
|
*/
|
|
|
|
|
return 1;
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:32 +00:00
|
|
|
|
static int perf_swcounter_match(struct perf_counter *counter,
|
2009-06-11 11:19:29 +00:00
|
|
|
|
enum perf_type_id type,
|
2009-03-19 19:26:18 +00:00
|
|
|
|
u32 event, struct pt_regs *regs)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
{
|
perf_counter: Allow software counters to count while task is not running
This changes perf_swcounter_match() so that per-task software
counters can count events that occur while their associated
task is not running. This will allow us to use the generic
software counter code for counting task migrations, which can
occur while the task is not scheduled in.
To do this, we have to distinguish between the situations where
the counter is inactive because its task has been scheduled
out, and those where the counter is inactive because it is part
of a group that was not able to go on the PMU. In the former
case we want the counter to count, but not in the latter case.
If the context is active, we have the latter case. If the
context is inactive then we need to know whether the counter
was counting when the context was last active, which we can
determine by comparing its ->tstamp_stopped timestamp with the
context's timestamp.
This also folds three checks in perf_swcounter_match, checking
perf_event_raw(), perf_event_type() and perf_event_id()
individually, into a single 64-bit comparison on
counter->hw_event.config, as an optimization.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18979.34810.259718.955621@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-01 07:49:14 +00:00
|
|
|
|
if (!perf_swcounter_is_counting(counter))
|
2009-03-13 11:21:32 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
if (counter->attr.type != type)
|
|
|
|
|
return 0;
|
|
|
|
|
if (counter->attr.config != event)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
2009-06-01 07:52:30 +00:00
|
|
|
|
if (regs) {
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.exclude_user && user_mode(regs))
|
2009-06-01 07:52:30 +00:00
|
|
|
|
return 0;
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (counter->attr.exclude_kernel && !user_mode(regs))
|
2009-06-01 07:52:30 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
|
2009-06-19 16:11:53 +00:00
|
|
|
|
enum perf_type_id type,
|
|
|
|
|
u32 event, u64 nr, int nmi,
|
|
|
|
|
struct perf_sample_data *data)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
2009-03-19 19:26:11 +00:00
|
|
|
|
if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
|
2009-03-13 11:21:32 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2009-03-13 11:21:36 +00:00
|
|
|
|
rcu_read_lock();
|
|
|
|
|
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
|
2009-06-19 16:11:53 +00:00
|
|
|
|
if (perf_swcounter_match(counter, type, event, data->regs))
|
|
|
|
|
perf_swcounter_add(counter, nr, nmi, data);
|
2009-03-13 11:21:32 +00:00
|
|
|
|
}
|
2009-03-13 11:21:36 +00:00
|
|
|
|
rcu_read_unlock();
|
2009-03-13 11:21:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-03-23 17:22:07 +00:00
|
|
|
|
static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
|
|
|
|
|
{
|
|
|
|
|
if (in_nmi())
|
|
|
|
|
return &cpuctx->recursion[3];
|
|
|
|
|
|
|
|
|
|
if (in_irq())
|
|
|
|
|
return &cpuctx->recursion[2];
|
|
|
|
|
|
|
|
|
|
if (in_softirq())
|
|
|
|
|
return &cpuctx->recursion[1];
|
|
|
|
|
|
|
|
|
|
return &cpuctx->recursion[0];
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 16:11:53 +00:00
|
|
|
|
static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
|
|
|
|
|
u64 nr, int nmi,
|
|
|
|
|
struct perf_sample_data *data)
|
2009-03-13 11:21:32 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
|
2009-03-23 17:22:07 +00:00
|
|
|
|
int *recursion = perf_swcounter_recursion_context(cpuctx);
|
2009-05-29 12:51:57 +00:00
|
|
|
|
struct perf_counter_context *ctx;
|
2009-03-23 17:22:07 +00:00
|
|
|
|
|
|
|
|
|
if (*recursion)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
(*recursion)++;
|
|
|
|
|
barrier();
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-04-08 13:01:33 +00:00
|
|
|
|
perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
|
2009-06-19 16:11:53 +00:00
|
|
|
|
nr, nmi, data);
|
2009-05-29 12:51:57 +00:00
|
|
|
|
rcu_read_lock();
|
|
|
|
|
/*
|
|
|
|
|
* doesn't really matter which of the child contexts the
|
|
|
|
|
* events ends up in.
|
|
|
|
|
*/
|
|
|
|
|
ctx = rcu_dereference(current->perf_counter_ctxp);
|
|
|
|
|
if (ctx)
|
2009-06-19 16:11:53 +00:00
|
|
|
|
perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
|
2009-05-29 12:51:57 +00:00
|
|
|
|
rcu_read_unlock();
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-03-23 17:22:07 +00:00
|
|
|
|
barrier();
|
|
|
|
|
(*recursion)--;
|
|
|
|
|
|
|
|
|
|
out:
|
2009-03-13 11:21:32 +00:00
|
|
|
|
put_cpu_var(perf_cpu_context);
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-19 16:27:26 +00:00
|
|
|
|
void __perf_swcounter_event(u32 event, u64 nr, int nmi,
|
|
|
|
|
struct pt_regs *regs, u64 addr)
|
2009-03-19 19:26:18 +00:00
|
|
|
|
{
|
2009-06-19 16:11:53 +00:00
|
|
|
|
struct perf_sample_data data = {
|
|
|
|
|
.regs = regs,
|
|
|
|
|
.addr = addr,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
|
2009-03-19 19:26:18 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:32 +00:00
|
|
|
|
static void perf_swcounter_read(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int perf_swcounter_enable(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-07-22 07:29:32 +00:00
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
|
|
|
|
|
|
|
|
|
if (hwc->sample_period) {
|
|
|
|
|
hwc->last_period = hwc->sample_period;
|
|
|
|
|
perf_swcounter_set_period(counter);
|
|
|
|
|
}
|
2009-03-13 11:21:32 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void perf_swcounter_disable(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu perf_ops_generic = {
|
2009-03-13 11:21:34 +00:00
|
|
|
|
.enable = perf_swcounter_enable,
|
|
|
|
|
.disable = perf_swcounter_disable,
|
|
|
|
|
.read = perf_swcounter_read,
|
2009-07-22 07:29:32 +00:00
|
|
|
|
.unthrottle = perf_swcounter_unthrottle,
|
2009-03-13 11:21:34 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-07-22 07:29:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* hrtimer based swcounter callback
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
|
|
|
|
|
{
|
|
|
|
|
enum hrtimer_restart ret = HRTIMER_RESTART;
|
|
|
|
|
struct perf_sample_data data;
|
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
u64 period;
|
|
|
|
|
|
|
|
|
|
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
|
|
|
|
|
counter->pmu->read(counter);
|
|
|
|
|
|
|
|
|
|
data.addr = 0;
|
|
|
|
|
data.regs = get_irq_regs();
|
|
|
|
|
/*
|
|
|
|
|
* In case we exclude kernel IPs or are somehow not in interrupt
|
|
|
|
|
* context, provide the next best thing, the user IP.
|
|
|
|
|
*/
|
|
|
|
|
if ((counter->attr.exclude_kernel || !data.regs) &&
|
|
|
|
|
!counter->attr.exclude_user)
|
|
|
|
|
data.regs = task_pt_regs(current);
|
|
|
|
|
|
|
|
|
|
if (data.regs) {
|
|
|
|
|
if (perf_counter_overflow(counter, 0, &data))
|
|
|
|
|
ret = HRTIMER_NORESTART;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
period = max_t(u64, 10000, counter->hw.sample_period);
|
|
|
|
|
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* Software counter: cpu wall time clock
|
|
|
|
|
*/
|
|
|
|
|
|
2009-01-09 05:26:43 +00:00
|
|
|
|
static void cpu_clock_perf_counter_update(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
|
s64 prev;
|
|
|
|
|
u64 now;
|
|
|
|
|
|
|
|
|
|
now = cpu_clock(cpu);
|
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
|
|
|
|
atomic64_set(&counter->hw.prev_count, now);
|
|
|
|
|
atomic64_add(now - prev, &counter->count);
|
|
|
|
|
}
|
|
|
|
|
|
2009-03-13 11:21:35 +00:00
|
|
|
|
static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
|
|
|
|
|
|
atomic64_set(&hwc->prev_count, cpu_clock(cpu));
|
2009-03-13 15:43:47 +00:00
|
|
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (hwc->sample_period) {
|
|
|
|
|
u64 period = max_t(u64, 10000, hwc->sample_period);
|
2009-03-13 11:21:35 +00:00
|
|
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
2009-05-15 13:19:28 +00:00
|
|
|
|
ns_to_ktime(period), 0,
|
2009-03-13 11:21:35 +00:00
|
|
|
|
HRTIMER_MODE_REL, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
|
static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (counter->hw.sample_period)
|
2009-05-20 10:21:21 +00:00
|
|
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
2009-01-09 05:26:43 +00:00
|
|
|
|
cpu_clock_perf_counter_update(counter);
|
2008-12-11 12:21:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cpu_clock_perf_counter_read(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-01-09 05:26:43 +00:00
|
|
|
|
cpu_clock_perf_counter_update(counter);
|
2008-12-11 12:21:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu perf_ops_cpu_clock = {
|
2008-12-17 13:20:28 +00:00
|
|
|
|
.enable = cpu_clock_perf_counter_enable,
|
|
|
|
|
.disable = cpu_clock_perf_counter_disable,
|
|
|
|
|
.read = cpu_clock_perf_counter_read,
|
2008-12-11 12:21:10 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-03-13 11:21:32 +00:00
|
|
|
|
/*
|
|
|
|
|
* Software counter: task time clock
|
|
|
|
|
*/
|
|
|
|
|
|
2009-04-08 13:01:25 +00:00
|
|
|
|
static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
|
2008-12-17 13:10:57 +00:00
|
|
|
|
{
|
2009-04-08 13:01:25 +00:00
|
|
|
|
u64 prev;
|
2008-12-14 11:22:31 +00:00
|
|
|
|
s64 delta;
|
|
|
|
|
|
2009-04-06 09:45:11 +00:00
|
|
|
|
prev = atomic64_xchg(&counter->hw.prev_count, now);
|
2008-12-14 11:22:31 +00:00
|
|
|
|
delta = now - prev;
|
|
|
|
|
atomic64_add(delta, &counter->count);
|
2008-12-11 13:03:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-21 12:50:42 +00:00
|
|
|
|
static int task_clock_perf_counter_enable(struct perf_counter *counter)
|
2008-12-14 11:22:31 +00:00
|
|
|
|
{
|
2009-03-13 11:21:35 +00:00
|
|
|
|
struct hw_perf_counter *hwc = &counter->hw;
|
2009-04-06 09:45:11 +00:00
|
|
|
|
u64 now;
|
|
|
|
|
|
|
|
|
|
now = counter->ctx->time;
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2009-04-06 09:45:11 +00:00
|
|
|
|
atomic64_set(&hwc->prev_count, now);
|
2009-03-13 15:43:47 +00:00
|
|
|
|
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
|
hwc->hrtimer.function = perf_swcounter_hrtimer;
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (hwc->sample_period) {
|
|
|
|
|
u64 period = max_t(u64, 10000, hwc->sample_period);
|
2009-03-13 11:21:35 +00:00
|
|
|
|
__hrtimer_start_range_ns(&hwc->hrtimer,
|
2009-05-15 13:19:28 +00:00
|
|
|
|
ns_to_ktime(period), 0,
|
2009-03-13 11:21:35 +00:00
|
|
|
|
HRTIMER_MODE_REL, 0);
|
|
|
|
|
}
|
2008-12-21 12:50:42 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
2008-12-14 11:22:31 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
2008-12-11 13:03:20 +00:00
|
|
|
|
{
|
2009-06-02 13:13:03 +00:00
|
|
|
|
if (counter->hw.sample_period)
|
2009-05-20 10:21:21 +00:00
|
|
|
|
hrtimer_cancel(&counter->hw.hrtimer);
|
2009-04-08 13:01:25 +00:00
|
|
|
|
task_clock_perf_counter_update(counter, counter->ctx->time);
|
|
|
|
|
|
2009-03-13 11:21:35 +00:00
|
|
|
|
}
|
2008-12-17 13:10:57 +00:00
|
|
|
|
|
2009-03-13 11:21:35 +00:00
|
|
|
|
static void task_clock_perf_counter_read(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-04-08 13:01:25 +00:00
|
|
|
|
u64 time;
|
|
|
|
|
|
|
|
|
|
if (!in_nmi()) {
|
|
|
|
|
update_context_time(counter->ctx);
|
|
|
|
|
time = counter->ctx->time;
|
|
|
|
|
} else {
|
|
|
|
|
u64 now = perf_clock();
|
|
|
|
|
u64 delta = now - counter->ctx->timestamp;
|
|
|
|
|
time = counter->ctx->time + delta;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_clock_perf_counter_update(counter, time);
|
2008-12-11 13:03:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu perf_ops_task_clock = {
|
2008-12-17 13:20:28 +00:00
|
|
|
|
.enable = task_clock_perf_counter_enable,
|
|
|
|
|
.disable = task_clock_perf_counter_disable,
|
|
|
|
|
.read = task_clock_perf_counter_read,
|
2008-12-11 13:03:20 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-03-19 19:26:17 +00:00
|
|
|
|
#ifdef CONFIG_EVENT_PROFILE
|
2009-08-06 23:25:54 +00:00
|
|
|
|
void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
|
|
|
|
|
int entry_size)
|
2009-03-19 19:26:17 +00:00
|
|
|
|
{
|
2009-08-08 02:26:37 +00:00
|
|
|
|
struct perf_raw_record raw = {
|
2009-08-06 23:25:54 +00:00
|
|
|
|
.size = entry_size,
|
2009-08-08 02:26:37 +00:00
|
|
|
|
.data = record,
|
2009-08-06 23:25:54 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-06-19 16:11:53 +00:00
|
|
|
|
struct perf_sample_data data = {
|
2009-07-06 08:31:33 +00:00
|
|
|
|
.regs = get_irq_regs(),
|
2009-07-21 15:34:57 +00:00
|
|
|
|
.addr = addr,
|
2009-08-08 02:26:37 +00:00
|
|
|
|
.raw = &raw,
|
2009-06-19 16:11:53 +00:00
|
|
|
|
};
|
2009-03-19 19:26:18 +00:00
|
|
|
|
|
2009-06-19 16:11:53 +00:00
|
|
|
|
if (!data.regs)
|
|
|
|
|
data.regs = task_pt_regs(current);
|
2009-03-19 19:26:18 +00:00
|
|
|
|
|
2009-07-21 15:34:57 +00:00
|
|
|
|
do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
|
2009-03-19 19:26:17 +00:00
|
|
|
|
}
|
2009-04-15 15:55:05 +00:00
|
|
|
|
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
|
2009-03-19 19:26:17 +00:00
|
|
|
|
|
|
|
|
|
extern int ftrace_profile_enable(int);
|
|
|
|
|
extern void ftrace_profile_disable(int);
|
|
|
|
|
|
|
|
|
|
static void tp_perf_counter_destroy(struct perf_counter *counter)
|
|
|
|
|
{
|
2009-07-06 08:31:33 +00:00
|
|
|
|
ftrace_profile_disable(counter->attr.config);
|
2009-03-19 19:26:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
2009-03-19 19:26:17 +00:00
|
|
|
|
{
|
2009-08-10 09:20:12 +00:00
|
|
|
|
/*
|
|
|
|
|
* Raw tracepoint data is a severe data leak, only allow root to
|
|
|
|
|
* have these.
|
|
|
|
|
*/
|
|
|
|
|
if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
|
|
|
|
|
!capable(CAP_SYS_ADMIN))
|
|
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
2009-07-06 08:31:33 +00:00
|
|
|
|
if (ftrace_profile_enable(counter->attr.config))
|
2009-03-19 19:26:17 +00:00
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
counter->destroy = tp_perf_counter_destroy;
|
|
|
|
|
|
|
|
|
|
return &perf_ops_generic;
|
|
|
|
|
}
|
|
|
|
|
#else
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
|
2009-03-19 19:26:17 +00:00
|
|
|
|
{
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-06-19 16:27:26 +00:00
|
|
|
|
atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
|
|
|
|
|
|
|
|
|
|
static void sw_perf_counter_destroy(struct perf_counter *counter)
|
|
|
|
|
{
|
|
|
|
|
u64 event = counter->attr.config;
|
|
|
|
|
|
2009-06-22 11:58:35 +00:00
|
|
|
|
WARN_ON(counter->parent);
|
|
|
|
|
|
2009-06-19 16:27:26 +00:00
|
|
|
|
atomic_dec(&perf_swcounter_enabled[event]);
|
|
|
|
|
}
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
|
2008-12-11 12:21:10 +00:00
|
|
|
|
{
|
2009-04-29 10:47:03 +00:00
|
|
|
|
const struct pmu *pmu = NULL;
|
2009-06-19 16:27:26 +00:00
|
|
|
|
u64 event = counter->attr.config;
|
2008-12-11 12:21:10 +00:00
|
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 03:35:35 +00:00
|
|
|
|
/*
|
|
|
|
|
* Software counters (currently) can't in general distinguish
|
|
|
|
|
* between user, kernel and hypervisor events.
|
|
|
|
|
* However, context switches and cpu migrations are considered
|
|
|
|
|
* to be kernel events, and page faults are never hypervisor
|
|
|
|
|
* events.
|
|
|
|
|
*/
|
2009-06-19 16:27:26 +00:00
|
|
|
|
switch (event) {
|
2009-06-11 12:06:28 +00:00
|
|
|
|
case PERF_COUNT_SW_CPU_CLOCK:
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = &perf_ops_cpu_clock;
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2008-12-11 12:21:10 +00:00
|
|
|
|
break;
|
2009-06-11 12:06:28 +00:00
|
|
|
|
case PERF_COUNT_SW_TASK_CLOCK:
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
|
/*
|
|
|
|
|
* If the user instantiates this as a per-cpu counter,
|
|
|
|
|
* use the cpu_clock counter instead.
|
|
|
|
|
*/
|
|
|
|
|
if (counter->ctx->task)
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = &perf_ops_task_clock;
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
|
else
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = &perf_ops_cpu_clock;
|
2009-03-13 11:21:35 +00:00
|
|
|
|
|
2008-12-11 13:03:20 +00:00
|
|
|
|
break;
|
2009-06-11 12:06:28 +00:00
|
|
|
|
case PERF_COUNT_SW_PAGE_FAULTS:
|
|
|
|
|
case PERF_COUNT_SW_PAGE_FAULTS_MIN:
|
|
|
|
|
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
|
|
|
|
|
case PERF_COUNT_SW_CONTEXT_SWITCHES:
|
|
|
|
|
case PERF_COUNT_SW_CPU_MIGRATIONS:
|
2009-06-22 11:58:35 +00:00
|
|
|
|
if (!counter->parent) {
|
|
|
|
|
atomic_inc(&perf_swcounter_enabled[event]);
|
|
|
|
|
counter->destroy = sw_perf_counter_destroy;
|
|
|
|
|
}
|
2009-06-01 07:52:30 +00:00
|
|
|
|
pmu = &perf_ops_generic;
|
2008-12-14 11:34:15 +00:00
|
|
|
|
break;
|
2008-12-11 12:21:10 +00:00
|
|
|
|
}
|
2009-03-13 11:21:32 +00:00
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
return pmu;
|
2008-12-11 12:21:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/*
|
|
|
|
|
* Allocate and initialize a counter structure
|
|
|
|
|
*/
|
|
|
|
|
static struct perf_counter *
|
2009-06-02 17:22:16 +00:00
|
|
|
|
perf_counter_alloc(struct perf_counter_attr *attr,
|
2008-12-11 07:38:42 +00:00
|
|
|
|
int cpu,
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
|
struct perf_counter_context *ctx,
|
2008-12-12 12:49:45 +00:00
|
|
|
|
struct perf_counter *group_leader,
|
2009-06-22 11:57:40 +00:00
|
|
|
|
struct perf_counter *parent_counter,
|
2008-12-12 12:49:45 +00:00
|
|
|
|
gfp_t gfpflags)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
2009-04-29 10:47:03 +00:00
|
|
|
|
const struct pmu *pmu;
|
2008-12-11 11:46:46 +00:00
|
|
|
|
struct perf_counter *counter;
|
2009-05-15 13:19:28 +00:00
|
|
|
|
struct hw_perf_counter *hwc;
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
long err;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
counter = kzalloc(sizeof(*counter), gfpflags);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
if (!counter)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
/*
|
|
|
|
|
* Single counters are their own group leaders, with an
|
|
|
|
|
* empty sibling list:
|
|
|
|
|
*/
|
|
|
|
|
if (!group_leader)
|
|
|
|
|
group_leader = counter;
|
|
|
|
|
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_init(&counter->child_mutex);
|
|
|
|
|
INIT_LIST_HEAD(&counter->child_list);
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
INIT_LIST_HEAD(&counter->list_entry);
|
2009-03-13 11:21:36 +00:00
|
|
|
|
INIT_LIST_HEAD(&counter->event_entry);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
INIT_LIST_HEAD(&counter->sibling_list);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
init_waitqueue_head(&counter->waitq);
|
|
|
|
|
|
2009-03-23 17:22:10 +00:00
|
|
|
|
mutex_init(&counter->mmap_mutex);
|
|
|
|
|
|
2009-06-03 12:01:36 +00:00
|
|
|
|
counter->cpu = cpu;
|
2009-06-02 17:22:16 +00:00
|
|
|
|
counter->attr = *attr;
|
2009-06-03 12:01:36 +00:00
|
|
|
|
counter->group_leader = group_leader;
|
|
|
|
|
counter->pmu = NULL;
|
|
|
|
|
counter->ctx = ctx;
|
|
|
|
|
counter->oncpu = -1;
|
|
|
|
|
|
2009-06-22 11:57:40 +00:00
|
|
|
|
counter->parent = parent_counter;
|
|
|
|
|
|
2009-06-03 12:01:36 +00:00
|
|
|
|
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
|
|
|
|
|
counter->id = atomic64_inc_return(&perf_counter_id);
|
|
|
|
|
|
|
|
|
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
2009-05-26 06:10:00 +00:00
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (attr->disabled)
|
2008-12-16 23:43:10 +00:00
|
|
|
|
counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = NULL;
|
2009-03-19 19:26:18 +00:00
|
|
|
|
|
2009-05-15 13:19:28 +00:00
|
|
|
|
hwc = &counter->hw;
|
2009-06-10 11:40:57 +00:00
|
|
|
|
hwc->sample_period = attr->sample_period;
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (attr->freq && attr->sample_freq)
|
2009-06-10 11:40:57 +00:00
|
|
|
|
hwc->sample_period = 1;
|
|
|
|
|
|
|
|
|
|
atomic64_set(&hwc->period_left, hwc->sample_period);
|
2009-05-15 13:19:28 +00:00
|
|
|
|
|
2009-05-05 15:50:26 +00:00
|
|
|
|
/*
|
2009-08-13 09:47:53 +00:00
|
|
|
|
* we currently do not support PERF_FORMAT_GROUP on inherited counters
|
2009-05-05 15:50:26 +00:00
|
|
|
|
*/
|
2009-08-13 09:47:53 +00:00
|
|
|
|
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
|
2009-05-05 15:50:26 +00:00
|
|
|
|
goto done;
|
|
|
|
|
|
2009-06-06 07:58:57 +00:00
|
|
|
|
switch (attr->type) {
|
2009-06-11 15:57:21 +00:00
|
|
|
|
case PERF_TYPE_RAW:
|
2009-03-19 19:26:18 +00:00
|
|
|
|
case PERF_TYPE_HARDWARE:
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 18:22:46 +00:00
|
|
|
|
case PERF_TYPE_HW_CACHE:
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = hw_perf_counter_init(counter);
|
2009-03-19 19:26:18 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case PERF_TYPE_SOFTWARE:
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = sw_perf_counter_init(counter);
|
2009-03-19 19:26:18 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case PERF_TYPE_TRACEPOINT:
|
2009-04-29 10:47:03 +00:00
|
|
|
|
pmu = tp_perf_counter_init(counter);
|
2009-03-19 19:26:18 +00:00
|
|
|
|
break;
|
2009-06-12 10:46:55 +00:00
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
2009-03-19 19:26:18 +00:00
|
|
|
|
}
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
done:
|
|
|
|
|
err = 0;
|
2009-04-29 10:47:03 +00:00
|
|
|
|
if (!pmu)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
err = -EINVAL;
|
2009-04-29 10:47:03 +00:00
|
|
|
|
else if (IS_ERR(pmu))
|
|
|
|
|
err = PTR_ERR(pmu);
|
2008-12-11 12:21:10 +00:00
|
|
|
|
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
if (err) {
|
2009-06-03 12:01:36 +00:00
|
|
|
|
if (counter->ns)
|
|
|
|
|
put_pid_ns(counter->ns);
|
2008-12-11 11:46:46 +00:00
|
|
|
|
kfree(counter);
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
return ERR_PTR(err);
|
2008-12-11 11:46:46 +00:00
|
|
|
|
}
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
|
2009-04-29 10:47:03 +00:00
|
|
|
|
counter->pmu = pmu;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-06-22 11:58:35 +00:00
|
|
|
|
if (!counter->parent) {
|
|
|
|
|
atomic_inc(&nr_counters);
|
|
|
|
|
if (counter->attr.mmap)
|
|
|
|
|
atomic_inc(&nr_mmap_counters);
|
|
|
|
|
if (counter->attr.comm)
|
|
|
|
|
atomic_inc(&nr_comm_counters);
|
2009-07-23 12:46:33 +00:00
|
|
|
|
if (counter->attr.task)
|
|
|
|
|
atomic_inc(&nr_task_counters);
|
2009-06-22 11:58:35 +00:00
|
|
|
|
}
|
2009-04-09 08:53:44 +00:00
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return counter;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-12 10:46:55 +00:00
|
|
|
|
static int perf_copy_attr(struct perf_counter_attr __user *uattr,
|
|
|
|
|
struct perf_counter_attr *attr)
|
|
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
|
u32 size;
|
|
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* zero the full structure, so that a short copy will be nice.
|
|
|
|
|
*/
|
|
|
|
|
memset(attr, 0, sizeof(*attr));
|
|
|
|
|
|
|
|
|
|
ret = get_user(size, &uattr->size);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
if (size > PAGE_SIZE) /* silly large */
|
|
|
|
|
goto err_size;
|
|
|
|
|
|
|
|
|
|
if (!size) /* abi compat */
|
|
|
|
|
size = PERF_ATTR_SIZE_VER0;
|
|
|
|
|
|
|
|
|
|
if (size < PERF_ATTR_SIZE_VER0)
|
|
|
|
|
goto err_size;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If we're handed a bigger struct than we know of,
|
|
|
|
|
* ensure all the unknown bits are 0.
|
|
|
|
|
*/
|
|
|
|
|
if (size > sizeof(*attr)) {
|
|
|
|
|
unsigned long val;
|
|
|
|
|
unsigned long __user *addr;
|
|
|
|
|
unsigned long __user *end;
|
|
|
|
|
|
|
|
|
|
addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
|
|
|
|
|
sizeof(unsigned long));
|
|
|
|
|
end = PTR_ALIGN((void __user *)uattr + size,
|
|
|
|
|
sizeof(unsigned long));
|
|
|
|
|
|
|
|
|
|
for (; addr < end; addr += sizeof(unsigned long)) {
|
|
|
|
|
ret = get_user(val, addr);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
if (val)
|
|
|
|
|
goto err_size;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = copy_from_user(attr, uattr, size);
|
|
|
|
|
if (ret)
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the type exists, the corresponding creation will verify
|
|
|
|
|
* the attr->config.
|
|
|
|
|
*/
|
|
|
|
|
if (attr->type >= PERF_TYPE_MAX)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
err_size:
|
|
|
|
|
put_user(sizeof(*attr), &uattr->size);
|
|
|
|
|
ret = -E2BIG;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
/**
|
2009-03-04 09:36:51 +00:00
|
|
|
|
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu
|
2008-12-10 11:33:23 +00:00
|
|
|
|
*
|
2009-06-02 17:22:16 +00:00
|
|
|
|
* @attr_uptr: event type attributes for monitoring/sampling
|
2008-12-04 19:12:29 +00:00
|
|
|
|
* @pid: target pid
|
2008-12-10 11:33:23 +00:00
|
|
|
|
* @cpu: target cpu
|
|
|
|
|
* @group_fd: group leader counter fd
|
2008-12-04 19:12:29 +00:00
|
|
|
|
*/
|
2009-03-04 09:36:51 +00:00
|
|
|
|
SYSCALL_DEFINE5(perf_counter_open,
|
2009-06-12 10:46:55 +00:00
|
|
|
|
struct perf_counter_attr __user *, attr_uptr,
|
2009-03-04 09:36:51 +00:00
|
|
|
|
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
2008-12-11 07:38:42 +00:00
|
|
|
|
struct perf_counter *counter, *group_leader;
|
2009-06-02 17:22:16 +00:00
|
|
|
|
struct perf_counter_attr attr;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
struct perf_counter_context *ctx;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
struct file *counter_file = NULL;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
struct file *group_file = NULL;
|
|
|
|
|
int fput_needed = 0;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
int fput_needed2 = 0;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
int ret;
|
|
|
|
|
|
2009-03-04 09:36:51 +00:00
|
|
|
|
/* for future expandability... */
|
|
|
|
|
if (flags)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-06-12 10:46:55 +00:00
|
|
|
|
ret = perf_copy_attr(attr_uptr, &attr);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
2008-12-08 18:26:59 +00:00
|
|
|
|
|
2009-06-11 09:18:36 +00:00
|
|
|
|
if (!attr.exclude_kernel) {
|
|
|
|
|
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-11 09:25:05 +00:00
|
|
|
|
if (attr.freq) {
|
|
|
|
|
if (attr.sample_freq > sysctl_perf_counter_sample_rate)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
/*
|
2008-12-11 10:26:29 +00:00
|
|
|
|
* Get the target context (task or percpu):
|
|
|
|
|
*/
|
|
|
|
|
ctx = find_get_context(pid, cpu);
|
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Look up the group leader (we will attach this counter to it):
|
2008-12-11 07:38:42 +00:00
|
|
|
|
*/
|
|
|
|
|
group_leader = NULL;
|
|
|
|
|
if (group_fd != -1) {
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
group_file = fget_light(group_fd, &fput_needed);
|
|
|
|
|
if (!group_file)
|
2008-12-11 10:26:29 +00:00
|
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
if (group_file->f_op != &perf_fops)
|
2008-12-11 10:26:29 +00:00
|
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
|
|
|
|
group_leader = group_file->private_data;
|
|
|
|
|
/*
|
2008-12-11 10:26:29 +00:00
|
|
|
|
* Do not allow a recursive hierarchy (this new sibling
|
|
|
|
|
* becoming part of another group-sibling):
|
|
|
|
|
*/
|
|
|
|
|
if (group_leader->group_leader != group_leader)
|
|
|
|
|
goto err_put_context;
|
|
|
|
|
/*
|
|
|
|
|
* Do not allow to attach to a group in a different
|
|
|
|
|
* task or CPU context:
|
2008-12-11 07:38:42 +00:00
|
|
|
|
*/
|
2008-12-11 10:26:29 +00:00
|
|
|
|
if (group_leader->ctx != ctx)
|
|
|
|
|
goto err_put_context;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
/*
|
|
|
|
|
* Only a group leader can be exclusive or pinned
|
|
|
|
|
*/
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (attr.exclusive || attr.pinned)
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 10:00:30 +00:00
|
|
|
|
goto err_put_context;
|
2008-12-11 07:38:42 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
|
2009-06-22 11:57:40 +00:00
|
|
|
|
NULL, GFP_KERNEL);
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
ret = PTR_ERR(counter);
|
|
|
|
|
if (IS_ERR(counter))
|
2008-12-04 19:12:29 +00:00
|
|
|
|
goto err_put_context;
|
|
|
|
|
|
|
|
|
|
ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
|
|
|
|
|
if (ret < 0)
|
2008-12-12 12:49:45 +00:00
|
|
|
|
goto err_free_put_context;
|
|
|
|
|
|
|
|
|
|
counter_file = fget_light(ret, &fput_needed2);
|
|
|
|
|
if (!counter_file)
|
|
|
|
|
goto err_free_put_context;
|
|
|
|
|
|
|
|
|
|
counter->filp = counter_file;
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(ctx->parent_ctx);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
perf_install_in_context(ctx, counter, cpu);
|
2009-05-29 06:06:20 +00:00
|
|
|
|
++ctx->generation;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
2009-05-23 16:29:00 +00:00
|
|
|
|
counter->owner = current;
|
|
|
|
|
get_task_struct(current);
|
|
|
|
|
mutex_lock(¤t->perf_counter_mutex);
|
|
|
|
|
list_add_tail(&counter->owner_entry, ¤t->perf_counter_list);
|
|
|
|
|
mutex_unlock(¤t->perf_counter_mutex);
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
fput_light(counter_file, fput_needed2);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
out_fput:
|
|
|
|
|
fput_light(group_file, fput_needed);
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
return ret;
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
err_free_put_context:
|
2008-12-04 19:12:29 +00:00
|
|
|
|
kfree(counter);
|
|
|
|
|
|
|
|
|
|
err_put_context:
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
put_ctx(ctx);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
goto out_fput;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
/*
|
|
|
|
|
* inherit a counter from parent task to child task:
|
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
static struct perf_counter *
|
2008-12-12 12:49:45 +00:00
|
|
|
|
inherit_counter(struct perf_counter *parent_counter,
|
|
|
|
|
struct task_struct *parent,
|
|
|
|
|
struct perf_counter_context *parent_ctx,
|
|
|
|
|
struct task_struct *child,
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
struct perf_counter *group_leader,
|
2008-12-12 12:49:45 +00:00
|
|
|
|
struct perf_counter_context *child_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *child_counter;
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Instead of creating recursive hierarchies of counters,
|
|
|
|
|
* we link inherited counters back to the original parent,
|
|
|
|
|
* which has a filp for sure, which we use as the reference
|
|
|
|
|
* count:
|
|
|
|
|
*/
|
|
|
|
|
if (parent_counter->parent)
|
|
|
|
|
parent_counter = parent_counter->parent;
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
child_counter = perf_counter_alloc(&parent_counter->attr,
|
perf_counters: make software counters work as per-cpu counters
Impact: kernel crash fix
Yanmin Zhang reported that using a PERF_COUNT_TASK_CLOCK software
counter as a per-cpu counter would reliably crash the system, because
it calls __task_delta_exec with a null pointer. The page fault,
context switch and cpu migration counters also won't function
correctly as per-cpu counters since they reference the current task.
This fixes the problem by redirecting the task_clock counter to the
cpu_clock counter when used as a per-cpu counter, and by implementing
per-cpu page fault, context switch and cpu migration counters.
Along the way, this:
- Initializes counter->ctx earlier, in perf_counter_alloc, so that
sw_perf_counter_init can use it
- Adds code to kernel/sched.c to count task migrations into each
cpu, in rq->nr_migrations_in
- Exports the per-cpu context switch and task migration counts
via new functions added to kernel/sched.c
- Makes sure that if sw_perf_counter_init fails, we don't try to
initialize the counter as a hardware counter. Since the user has
passed a negative, non-raw event type, they clearly don't intend
for it to be interpreted as a hardware event.
Reported-by: "Zhang Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-09 11:42:47 +00:00
|
|
|
|
parent_counter->cpu, child_ctx,
|
2009-06-22 11:57:40 +00:00
|
|
|
|
group_leader, parent_counter,
|
|
|
|
|
GFP_KERNEL);
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
if (IS_ERR(child_counter))
|
|
|
|
|
return child_counter;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
get_ctx(child_ctx);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Make the child state follow the state of the parent counter,
|
2009-06-02 17:22:16 +00:00
|
|
|
|
* not its attr.disabled bit. We hold the parent's mutex,
|
2009-06-01 08:13:37 +00:00
|
|
|
|
* so we won't race with perf_counter_{en, dis}able_family.
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
*/
|
|
|
|
|
if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
|
|
|
|
|
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
|
|
|
|
else
|
|
|
|
|
child_counter->state = PERF_COUNTER_STATE_OFF;
|
|
|
|
|
|
2009-06-10 11:40:57 +00:00
|
|
|
|
if (parent_counter->attr.freq)
|
|
|
|
|
child_counter->hw.sample_period = parent_counter->hw.sample_period;
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
/*
|
|
|
|
|
* Link it up in the child's context:
|
|
|
|
|
*/
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
add_counter_to_ctx(child_counter, child_ctx);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Get a reference to the parent filp - we will fput it
|
|
|
|
|
* when the child counter exits. This is safe to do because
|
|
|
|
|
* we are in the parent and we know that the filp still
|
|
|
|
|
* exists and has a nonzero count:
|
|
|
|
|
*/
|
|
|
|
|
atomic_long_inc(&parent_counter->filp->f_count);
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
/*
|
|
|
|
|
* Link this into the parent counter's child list
|
|
|
|
|
*/
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_lock(&parent_counter->child_mutex);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
list_add_tail(&child_counter->child_list, &parent_counter->child_list);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_unlock(&parent_counter->child_mutex);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
return child_counter;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int inherit_group(struct perf_counter *parent_counter,
|
|
|
|
|
struct task_struct *parent,
|
|
|
|
|
struct perf_counter_context *parent_ctx,
|
|
|
|
|
struct task_struct *child,
|
|
|
|
|
struct perf_counter_context *child_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *leader;
|
|
|
|
|
struct perf_counter *sub;
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
struct perf_counter *child_ctr;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
leader = inherit_counter(parent_counter, parent, parent_ctx,
|
|
|
|
|
child, NULL, child_ctx);
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
if (IS_ERR(leader))
|
|
|
|
|
return PTR_ERR(leader);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 17:07:08 +00:00
|
|
|
|
child_ctr = inherit_counter(sub, parent, parent_ctx,
|
|
|
|
|
child, leader, child_ctx);
|
|
|
|
|
if (IS_ERR(child_ctr))
|
|
|
|
|
return PTR_ERR(child_ctr);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
static void sync_child_counter(struct perf_counter *child_counter,
|
2009-06-23 18:13:11 +00:00
|
|
|
|
struct task_struct *child)
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
{
|
2009-06-23 18:13:11 +00:00
|
|
|
|
struct perf_counter *parent_counter = child_counter->parent;
|
2009-05-15 18:45:59 +00:00
|
|
|
|
u64 child_val;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
2009-06-24 19:11:59 +00:00
|
|
|
|
if (child_counter->attr.inherit_stat)
|
|
|
|
|
perf_counter_read_event(child_counter, child);
|
2009-06-23 18:13:11 +00:00
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
child_val = atomic64_read(&child_counter->count);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Add back the child's count to the parent's count:
|
|
|
|
|
*/
|
|
|
|
|
atomic64_add(child_val, &parent_counter->count);
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 11:46:58 +00:00
|
|
|
|
atomic64_add(child_counter->total_time_enabled,
|
|
|
|
|
&parent_counter->child_total_time_enabled);
|
|
|
|
|
atomic64_add(child_counter->total_time_running,
|
|
|
|
|
&parent_counter->child_total_time_running);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Remove this counter from the parent's list
|
|
|
|
|
*/
|
2009-05-29 06:06:20 +00:00
|
|
|
|
WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_lock(&parent_counter->child_mutex);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
list_del_init(&child_counter->child_list);
|
2009-05-23 16:28:56 +00:00
|
|
|
|
mutex_unlock(&parent_counter->child_mutex);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Release the parent counter, if this was the last
|
|
|
|
|
* reference to it.
|
|
|
|
|
*/
|
|
|
|
|
fput(parent_counter->filp);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
static void
|
2009-05-29 12:25:58 +00:00
|
|
|
|
__perf_counter_exit_task(struct perf_counter *child_counter,
|
2009-06-23 18:13:11 +00:00
|
|
|
|
struct perf_counter_context *child_ctx,
|
|
|
|
|
struct task_struct *child)
|
2008-12-12 12:49:45 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter *parent_counter;
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
update_counter_times(child_counter);
|
2009-05-23 16:28:59 +00:00
|
|
|
|
perf_counter_remove_from_context(child_counter);
|
2008-12-14 22:20:36 +00:00
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
parent_counter = child_counter->parent;
|
|
|
|
|
/*
|
|
|
|
|
* It can happen that parent exits first, and has counters
|
|
|
|
|
* that are still around due to the child reference. These
|
|
|
|
|
* counters need to be zapped - but otherwise linger.
|
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
if (parent_counter) {
|
2009-06-23 18:13:11 +00:00
|
|
|
|
sync_child_counter(child_counter, child);
|
2009-03-19 19:26:16 +00:00
|
|
|
|
free_counter(child_counter);
|
2009-02-11 12:53:19 +00:00
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
* When a child task exits, feed back counter values to parent counters.
|
2008-12-12 12:49:45 +00:00
|
|
|
|
*/
|
|
|
|
|
void perf_counter_exit_task(struct task_struct *child)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter *child_counter, *tmp;
|
|
|
|
|
struct perf_counter_context *child_ctx;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
unsigned long flags;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
2009-07-23 12:46:33 +00:00
|
|
|
|
if (likely(!child->perf_counter_ctxp)) {
|
2009-08-07 17:49:01 +00:00
|
|
|
|
perf_counter_task(child, NULL, 0);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
return;
|
2009-07-23 12:46:33 +00:00
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
local_irq_save(flags);
|
2009-05-29 06:06:20 +00:00
|
|
|
|
/*
|
|
|
|
|
* We can't reschedule here because interrupts are disabled,
|
|
|
|
|
* and either child is current or it is a task that can't be
|
|
|
|
|
* scheduled, so we are now safe from rescheduling changing
|
|
|
|
|
* our context.
|
|
|
|
|
*/
|
|
|
|
|
child_ctx = child->perf_counter_ctxp;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
__perf_counter_task_sched_out(child_ctx);
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Take the context lock here so that if find_get_context is
|
|
|
|
|
* reading child->perf_counter_ctxp, we wait until it has
|
|
|
|
|
* incremented the context's refcount before we do put_ctx below.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&child_ctx->lock);
|
2009-08-07 17:49:01 +00:00
|
|
|
|
child->perf_counter_ctxp = NULL;
|
2009-07-10 07:06:56 +00:00
|
|
|
|
/*
|
|
|
|
|
* If this context is a clone; unclone it so it can't get
|
|
|
|
|
* swapped to another process while we're removing all
|
|
|
|
|
* the counters from it.
|
|
|
|
|
*/
|
|
|
|
|
unclone_ctx(child_ctx);
|
2009-07-23 12:46:33 +00:00
|
|
|
|
spin_unlock_irqrestore(&child_ctx->lock, flags);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Report the task dead after unscheduling the counters so that we
|
|
|
|
|
* won't get any samples after PERF_EVENT_EXIT. We can however still
|
|
|
|
|
* get a few PERF_EVENT_READ events.
|
|
|
|
|
*/
|
2009-08-07 17:49:01 +00:00
|
|
|
|
perf_counter_task(child, child_ctx, 0);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
|
2009-06-10 20:53:37 +00:00
|
|
|
|
/*
|
|
|
|
|
* We can recurse on the same lock type through:
|
|
|
|
|
*
|
|
|
|
|
* __perf_counter_exit_task()
|
|
|
|
|
* sync_child_counter()
|
|
|
|
|
* fput(parent_counter->filp)
|
|
|
|
|
* perf_release()
|
|
|
|
|
* mutex_lock(&ctx->mutex)
|
|
|
|
|
*
|
|
|
|
|
* But since its the parent context it won't be the same instance.
|
|
|
|
|
*/
|
|
|
|
|
mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
|
2009-05-15 18:45:59 +00:00
|
|
|
|
again:
|
2008-12-12 12:49:45 +00:00
|
|
|
|
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
|
|
|
|
|
list_entry)
|
2009-06-23 18:13:11 +00:00
|
|
|
|
__perf_counter_exit_task(child_counter, child_ctx, child);
|
2009-05-15 18:45:59 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If the last counter was a group counter, it will have appended all
|
|
|
|
|
* its siblings to the list, but we obtained 'tmp' before that which
|
|
|
|
|
* will still point to the list head terminating the iteration.
|
|
|
|
|
*/
|
|
|
|
|
if (!list_empty(&child_ctx->counter_list))
|
|
|
|
|
goto again;
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
|
|
|
|
|
mutex_unlock(&child_ctx->mutex);
|
|
|
|
|
|
|
|
|
|
put_ctx(child_ctx);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-29 12:25:58 +00:00
|
|
|
|
/*
|
|
|
|
|
* free an unexposed, unused context as created by inheritance by
|
|
|
|
|
* init_task below, used by fork() in case of fail.
|
|
|
|
|
*/
|
|
|
|
|
void perf_counter_free_task(struct task_struct *task)
|
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *ctx = task->perf_counter_ctxp;
|
|
|
|
|
struct perf_counter *counter, *tmp;
|
|
|
|
|
|
|
|
|
|
if (!ctx)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&ctx->mutex);
|
|
|
|
|
again:
|
|
|
|
|
list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
|
|
|
|
|
struct perf_counter *parent = counter->parent;
|
|
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!parent))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&parent->child_mutex);
|
|
|
|
|
list_del_init(&counter->child_list);
|
|
|
|
|
mutex_unlock(&parent->child_mutex);
|
|
|
|
|
|
|
|
|
|
fput(parent->filp);
|
|
|
|
|
|
|
|
|
|
list_del_counter(counter, ctx);
|
|
|
|
|
free_counter(counter);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!list_empty(&ctx->counter_list))
|
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
|
|
|
|
|
put_ctx(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
/*
|
|
|
|
|
* Initialize the perf_counter context in task_struct
|
|
|
|
|
*/
|
2009-05-25 12:45:27 +00:00
|
|
|
|
int perf_counter_init_task(struct task_struct *child)
|
2008-12-12 12:49:45 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_counter_context *child_ctx, *parent_ctx;
|
2009-05-29 06:06:20 +00:00
|
|
|
|
struct perf_counter_context *cloned_ctx;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
struct perf_counter *counter;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
struct task_struct *parent = current;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
int inherited_all = 1;
|
2009-05-25 12:45:27 +00:00
|
|
|
|
int ret = 0;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
child->perf_counter_ctxp = NULL;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
2009-05-23 16:29:00 +00:00
|
|
|
|
mutex_init(&child->perf_counter_mutex);
|
|
|
|
|
INIT_LIST_HEAD(&child->perf_counter_list);
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
if (likely(!parent->perf_counter_ctxp))
|
2009-05-25 12:45:27 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
/*
|
|
|
|
|
* This is executed from the parent task context, so inherit
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
* counters that have been marked for cloning.
|
|
|
|
|
* First allocate and initialize a context for the child.
|
2008-12-12 12:49:45 +00:00
|
|
|
|
*/
|
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
|
|
|
|
|
if (!child_ctx)
|
2009-05-25 12:45:27 +00:00
|
|
|
|
return -ENOMEM;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
__perf_counter_init_context(child_ctx, child);
|
|
|
|
|
child->perf_counter_ctxp = child_ctx;
|
perf_counter: Fix race in attaching counters to tasks and exiting
Commit 564c2b21 ("perf_counter: Optimize context switch between
identical inherited contexts") introduced a race where it is possible
that a counter being attached to a task could get attached to the
wrong task, if the task is one that has inherited its context from
another task via fork. This happens because the optimized context
switch could switch the context to another task after find_get_context
has read task->perf_counter_ctxp. In fact, it's possible that the
context could then get freed, if the other task then exits.
This fixes the problem by protecting both the context switch and the
critical code in find_get_context with spinlocks. The context switch
locks the cxt->lock of both the outgoing and incoming contexts before
swapping them. That means that once code such as find_get_context
has obtained the spinlock for the context associated with a task,
the context can't get swapped to another task. However, the context
may have been swapped in the interval between reading
task->perf_counter_ctxp and getting the lock, so it is necessary to
check and retry.
To make sure that none of the contexts being looked at in
find_get_context can get freed, this changes the context freeing code
to use RCU. Thus an rcu_read_lock() is sufficient to ensure that no
contexts can get freed. This part of the patch is lifted from a patch
posted by Peter Zijlstra.
This also adds a check to make sure that we can't add a counter to a
task that is exiting.
There is also a race between perf_counter_exit_task and
find_get_context; this solves the race by moving the get_ctx that
was in perf_counter_alloc into the locked region in find_get_context,
so that once find_get_context has got the context for a task, it
won't get freed even if the task calls perf_counter_exit_task. It
doesn't matter if new top-level (non-inherited) counters get attached
to the context after perf_counter_exit_task has detached the context
from the task. They will just stay there and never get scheduled in
until the counters' fds get closed, and then perf_release will remove
them from the context and eventually free the context.
With this, we are now doing the unclone in find_get_context rather
than when a counter was added to or removed from a context (actually,
we were missing the unclone_ctx() call when adding a counter to a
context). We don't need to unclone when removing a counter from a
context because we have no way to remove a counter from a cloned
context.
This also takes out the smp_wmb() in find_get_context, which Peter
Zijlstra pointed out was unnecessary because the cmpxchg implies a
full barrier anyway.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <18974.33033.667187.273886@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-28 12:18:17 +00:00
|
|
|
|
get_task_struct(child);
|
perf_counter: Dynamically allocate tasks' perf_counter_context struct
This replaces the struct perf_counter_context in the task_struct with
a pointer to a dynamically allocated perf_counter_context struct. The
main reason for doing is this is to allow us to transfer a
perf_counter_context from one task to another when we do lazy PMU
switching in a later patch.
This has a few side-benefits: the task_struct becomes a little smaller,
we save some memory because only tasks that have perf_counters attached
get a perf_counter_context allocated for them, and we can remove the
inclusion of <linux/perf_counter.h> in sched.h, meaning that we don't
end up recompiling nearly everything whenever perf_counter.h changes.
The perf_counter_context structures are reference-counted and freed
when the last reference is dropped. A context can have references
from its task and the counters on its task. Counters can outlive the
task so it is possible that a context will be freed well after its
task has exited.
Contexts are allocated on fork if the parent had a context, or
otherwise the first time that a per-task counter is created on a task.
In the latter case, we set the context pointer in the task struct
locklessly using an atomic compare-and-exchange operation in case we
raced with some other task in creating a context for the subject task.
This also removes the task pointer from the perf_counter struct. The
task pointer was not used anywhere and would make it harder to move a
context from one task to another. Anything that needed to know which
task a counter was attached to was already using counter->ctx->task.
The __perf_counter_init_context function moves up in perf_counter.c
so that it can be called from find_get_context, and now initializes
the refcount, but is otherwise unchanged.
We were potentially calling list_del_counter twice: once from
__perf_counter_exit_task when the task exits and once from
__perf_counter_remove_from_context when the counter's fd gets closed.
This adds a check in list_del_counter so it doesn't do anything if
the counter has already been removed from the lists.
Since perf_counter_task_sched_in doesn't do anything if the task doesn't
have a context, and leaves cpuctx->task_ctx = NULL, this adds code to
__perf_install_in_context to set cpuctx->task_ctx if necessary, i.e. in
the case where the current task adds the first counter to itself and
thus creates a context for itself.
This also adds similar code to __perf_counter_enable to handle a
similar situation which can arise when the counters have been disabled
using prctl; that also leaves cpuctx->task_ctx = NULL.
[ Impact: refactor counter context management to prepare for new feature ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10075.781053.231153@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:17:31 +00:00
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
/*
|
2009-06-01 07:48:12 +00:00
|
|
|
|
* If the parent's context is a clone, pin it so it won't get
|
|
|
|
|
* swapped under us.
|
2009-05-29 06:06:20 +00:00
|
|
|
|
*/
|
2009-06-01 07:48:12 +00:00
|
|
|
|
parent_ctx = perf_pin_task_context(parent);
|
|
|
|
|
|
2009-05-29 06:06:20 +00:00
|
|
|
|
/*
|
|
|
|
|
* No need to check if parent_ctx != NULL here; since we saw
|
|
|
|
|
* it non-NULL earlier, the only reason for it to become NULL
|
|
|
|
|
* is if we exit, and since we're currently in the middle of
|
|
|
|
|
* a fork we can't be exiting at the same time.
|
|
|
|
|
*/
|
|
|
|
|
|
2008-12-12 12:49:45 +00:00
|
|
|
|
/*
|
|
|
|
|
* Lock the parent list. No need to lock the child - not PID
|
|
|
|
|
* hashed yet and not running, so nobody can access it.
|
|
|
|
|
*/
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_lock(&parent_ctx->mutex);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We dont have to disable NMIs - we are only looking at
|
|
|
|
|
* the list, not manipulating it:
|
|
|
|
|
*/
|
2009-05-20 10:21:19 +00:00
|
|
|
|
list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
|
|
|
|
|
if (counter != counter->group_leader)
|
|
|
|
|
continue;
|
|
|
|
|
|
2009-06-02 17:22:16 +00:00
|
|
|
|
if (!counter->attr.inherit) {
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
inherited_all = 0;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
continue;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
}
|
2008-12-12 12:49:45 +00:00
|
|
|
|
|
2009-05-25 12:45:27 +00:00
|
|
|
|
ret = inherit_group(counter, parent, parent_ctx,
|
|
|
|
|
child, child_ctx);
|
|
|
|
|
if (ret) {
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
inherited_all = 0;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
break;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (inherited_all) {
|
|
|
|
|
/*
|
|
|
|
|
* Mark the child context as a clone of the parent
|
|
|
|
|
* context, or of whatever the parent is a clone of.
|
2009-05-29 06:06:20 +00:00
|
|
|
|
* Note that if the parent is a clone, it could get
|
|
|
|
|
* uncloned at any point, but that doesn't matter
|
|
|
|
|
* because the list of counters and the generation
|
|
|
|
|
* count can't have changed since we took the mutex.
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
*/
|
2009-05-29 06:06:20 +00:00
|
|
|
|
cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
|
|
|
|
|
if (cloned_ctx) {
|
|
|
|
|
child_ctx->parent_ctx = cloned_ctx;
|
2009-06-01 07:48:12 +00:00
|
|
|
|
child_ctx->parent_gen = parent_ctx->parent_gen;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 04:27:22 +00:00
|
|
|
|
} else {
|
|
|
|
|
child_ctx->parent_ctx = parent_ctx;
|
|
|
|
|
child_ctx->parent_gen = parent_ctx->generation;
|
|
|
|
|
}
|
|
|
|
|
get_ctx(child_ctx->parent_ctx);
|
2008-12-12 12:49:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_unlock(&parent_ctx->mutex);
|
2009-05-25 12:45:27 +00:00
|
|
|
|
|
2009-06-01 07:48:12 +00:00
|
|
|
|
perf_unpin_context(parent_ctx);
|
2009-05-29 06:06:20 +00:00
|
|
|
|
|
2009-05-25 12:45:27 +00:00
|
|
|
|
return ret;
|
2008-12-12 12:49:45 +00:00
|
|
|
|
}
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void __cpuinit perf_counter_init_cpu(int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
2008-12-11 07:38:42 +00:00
|
|
|
|
struct perf_cpu_context *cpuctx;
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
|
__perf_counter_init_context(&cpuctx->ctx, NULL);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_lock(&perf_resource_lock);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_unlock(&perf_resource_lock);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
|
2009-01-14 02:44:19 +00:00
|
|
|
|
hw_perf_counter_setup(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void __perf_counter_exit_cpu(void *info)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
|
struct perf_counter *counter, *tmp;
|
|
|
|
|
|
2008-12-11 07:38:42 +00:00
|
|
|
|
list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
|
|
|
|
|
__perf_counter_remove_from_context(counter);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static void perf_counter_exit_cpu(int cpu)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
|
struct perf_counter_context *ctx = &cpuctx->ctx;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&ctx->mutex);
|
2008-12-11 07:38:42 +00:00
|
|
|
|
smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 07:10:22 +00:00
|
|
|
|
mutex_unlock(&ctx->mutex);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
}
|
|
|
|
|
#else
|
2008-12-11 07:38:42 +00:00
|
|
|
|
static inline void perf_counter_exit_cpu(int cpu) { }
|
2008-12-04 19:12:29 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
static int __cpuinit
|
|
|
|
|
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
|
|
|
|
{
|
|
|
|
|
unsigned int cpu = (long)hcpu;
|
|
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
|
|
|
|
|
|
case CPU_UP_PREPARE:
|
|
|
|
|
case CPU_UP_PREPARE_FROZEN:
|
2008-12-11 07:38:42 +00:00
|
|
|
|
perf_counter_init_cpu(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
break;
|
|
|
|
|
|
2009-08-13 08:13:22 +00:00
|
|
|
|
case CPU_ONLINE:
|
|
|
|
|
case CPU_ONLINE_FROZEN:
|
|
|
|
|
hw_perf_counter_setup_online(cpu);
|
|
|
|
|
break;
|
|
|
|
|
|
2008-12-04 19:12:29 +00:00
|
|
|
|
case CPU_DOWN_PREPARE:
|
|
|
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
2008-12-11 07:38:42 +00:00
|
|
|
|
perf_counter_exit_cpu(cpu);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-02 11:05:16 +00:00
|
|
|
|
/*
|
|
|
|
|
* This has to have a higher priority than migration_notifier in sched.c.
|
|
|
|
|
*/
|
2008-12-04 19:12:29 +00:00
|
|
|
|
static struct notifier_block __cpuinitdata perf_cpu_nb = {
|
|
|
|
|
.notifier_call = perf_cpu_notify,
|
2009-06-02 11:05:16 +00:00
|
|
|
|
.priority = 20,
|
2008-12-04 19:12:29 +00:00
|
|
|
|
};
|
|
|
|
|
|
2009-05-04 17:13:30 +00:00
|
|
|
|
void __init perf_counter_init(void)
|
2008-12-04 19:12:29 +00:00
|
|
|
|
{
|
|
|
|
|
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
|
|
|
|
|
(void *)(long)smp_processor_id());
|
2009-08-13 08:13:22 +00:00
|
|
|
|
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
|
|
|
|
|
(void *)(long)smp_processor_id());
|
2008-12-04 19:12:29 +00:00
|
|
|
|
register_cpu_notifier(&perf_cpu_nb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
|
|
|
|
|
{
|
|
|
|
|
return sprintf(buf, "%d\n", perf_reserved_percpu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
|
perf_set_reserve_percpu(struct sysdev_class *class,
|
|
|
|
|
const char *buf,
|
|
|
|
|
size_t count)
|
|
|
|
|
{
|
|
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
|
|
unsigned long val;
|
|
|
|
|
int err, cpu, mpt;
|
|
|
|
|
|
|
|
|
|
err = strict_strtoul(buf, 10, &val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
if (val > perf_max_counters)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_lock(&perf_resource_lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_reserved_percpu = val;
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
|
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
|
|
|
|
spin_lock_irq(&cpuctx->ctx.lock);
|
|
|
|
|
mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
|
|
|
|
|
perf_max_counters - perf_reserved_percpu);
|
|
|
|
|
cpuctx->max_pertask = mpt;
|
|
|
|
|
spin_unlock_irq(&cpuctx->ctx.lock);
|
|
|
|
|
}
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_unlock(&perf_resource_lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
return count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
|
|
|
|
|
{
|
|
|
|
|
return sprintf(buf, "%d\n", perf_overcommit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
|
perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
|
|
|
|
|
{
|
|
|
|
|
unsigned long val;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
err = strict_strtoul(buf, 10, &val);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
if (val > 1)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_lock(&perf_resource_lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
perf_overcommit = val;
|
2009-05-04 17:23:18 +00:00
|
|
|
|
spin_unlock(&perf_resource_lock);
|
2008-12-04 19:12:29 +00:00
|
|
|
|
|
|
|
|
|
return count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static SYSDEV_CLASS_ATTR(
|
|
|
|
|
reserve_percpu,
|
|
|
|
|
0644,
|
|
|
|
|
perf_show_reserve_percpu,
|
|
|
|
|
perf_set_reserve_percpu
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static SYSDEV_CLASS_ATTR(
|
|
|
|
|
overcommit,
|
|
|
|
|
0644,
|
|
|
|
|
perf_show_overcommit,
|
|
|
|
|
perf_set_overcommit
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
static struct attribute *perfclass_attrs[] = {
|
|
|
|
|
&attr_reserve_percpu.attr,
|
|
|
|
|
&attr_overcommit.attr,
|
|
|
|
|
NULL
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct attribute_group perfclass_attr_group = {
|
|
|
|
|
.attrs = perfclass_attrs,
|
|
|
|
|
.name = "perf_counters",
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static int __init perf_counter_sysfs_init(void)
|
|
|
|
|
{
|
|
|
|
|
return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
|
|
|
|
|
&perfclass_attr_group);
|
|
|
|
|
}
|
|
|
|
|
device_initcall(perf_counter_sysfs_init);
|