mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: Fix tracepoint sampling to be part of generic sampling
Based on Peter's comments, make tracepoint sampling generic just like all the other sampling bits are. This is a rename with no code changes: - PERF_SAMPLE_TP_RECORD to PERF_SAMPLE_RAW - struct perf_tracepoint_record to perf_raw_record We want the system in place that transport tracepoints raw samples events into the perf ring buffer to be generalized and usable by any type of counter. Reported-by; Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1249698400-5441-4-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
10b8e30660
commit
3a43ce68ae
2 changed files with 15 additions and 15 deletions
|
@ -121,7 +121,7 @@ enum perf_counter_sample_format {
|
|||
PERF_SAMPLE_CPU = 1U << 7,
|
||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||
PERF_SAMPLE_TP_RECORD = 1U << 10,
|
||||
PERF_SAMPLE_RAW = 1U << 10,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
|
||||
};
|
||||
|
@ -414,9 +414,9 @@ struct perf_callchain_entry {
|
|||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
};
|
||||
|
||||
struct perf_tracepoint_record {
|
||||
int size;
|
||||
char *record;
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct task_struct;
|
||||
|
@ -687,7 +687,7 @@ struct perf_sample_data {
|
|||
struct pt_regs *regs;
|
||||
u64 addr;
|
||||
u64 period;
|
||||
void *private;
|
||||
struct perf_raw_record *raw;
|
||||
};
|
||||
|
||||
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
|
|
|
@ -2646,7 +2646,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
|||
u64 counter;
|
||||
} group_entry;
|
||||
struct perf_callchain_entry *callchain = NULL;
|
||||
struct perf_tracepoint_record *tp = NULL;
|
||||
struct perf_raw_record *raw = NULL;
|
||||
int callchain_size = 0;
|
||||
u64 time;
|
||||
struct {
|
||||
|
@ -2715,10 +2715,10 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
|||
header.size += sizeof(u64);
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TP_RECORD) {
|
||||
tp = data->private;
|
||||
if (tp)
|
||||
header.size += tp->size;
|
||||
if (sample_type & PERF_SAMPLE_RAW) {
|
||||
raw = data->raw;
|
||||
if (raw)
|
||||
header.size += raw->size;
|
||||
}
|
||||
|
||||
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
|
||||
|
@ -2784,8 +2784,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
|
|||
}
|
||||
}
|
||||
|
||||
if ((sample_type & PERF_SAMPLE_TP_RECORD) && tp)
|
||||
perf_output_copy(&handle, tp->record, tp->size);
|
||||
if ((sample_type & PERF_SAMPLE_RAW) && raw)
|
||||
perf_output_copy(&handle, raw->data, raw->size);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
@ -3740,15 +3740,15 @@ static const struct pmu perf_ops_task_clock = {
|
|||
void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
|
||||
int entry_size)
|
||||
{
|
||||
struct perf_tracepoint_record tp = {
|
||||
struct perf_raw_record raw = {
|
||||
.size = entry_size,
|
||||
.record = record,
|
||||
.data = record,
|
||||
};
|
||||
|
||||
struct perf_sample_data data = {
|
||||
.regs = get_irq_regs(),
|
||||
.addr = addr,
|
||||
.private = &tp,
|
||||
.raw = &raw,
|
||||
};
|
||||
|
||||
if (!data.regs)
|
||||
|
|
Loading…
Reference in a new issue