mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perfcounters: hw ops rename
Impact: rename field names Shorten them. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
862a1a5f34
commit
7671581f16
3 changed files with 31 additions and 31 deletions
|
@ -577,9 +577,9 @@ static void pmc_generic_read(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
|
static const struct hw_perf_counter_ops x86_perf_counter_ops = {
|
||||||
.hw_perf_counter_enable = pmc_generic_enable,
|
.enable = pmc_generic_enable,
|
||||||
.hw_perf_counter_disable = pmc_generic_disable,
|
.disable = pmc_generic_disable,
|
||||||
.hw_perf_counter_read = pmc_generic_read,
|
.read = pmc_generic_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct hw_perf_counter_ops *
|
const struct hw_perf_counter_ops *
|
||||||
|
|
|
@ -128,9 +128,9 @@ struct perf_counter;
|
||||||
* struct hw_perf_counter_ops - performance counter hw ops
|
* struct hw_perf_counter_ops - performance counter hw ops
|
||||||
*/
|
*/
|
||||||
struct hw_perf_counter_ops {
|
struct hw_perf_counter_ops {
|
||||||
void (*hw_perf_counter_enable) (struct perf_counter *counter);
|
void (*enable) (struct perf_counter *counter);
|
||||||
void (*hw_perf_counter_disable) (struct perf_counter *counter);
|
void (*disable) (struct perf_counter *counter);
|
||||||
void (*hw_perf_counter_read) (struct perf_counter *counter);
|
void (*read) (struct perf_counter *counter);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -109,7 +109,7 @@ static void __perf_counter_remove_from_context(void *info)
|
||||||
spin_lock_irqsave(&ctx->lock, flags);
|
spin_lock_irqsave(&ctx->lock, flags);
|
||||||
|
|
||||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
||||||
counter->hw_ops->hw_perf_counter_disable(counter);
|
counter->hw_ops->disable(counter);
|
||||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||||
ctx->nr_active--;
|
ctx->nr_active--;
|
||||||
cpuctx->active_oncpu--;
|
cpuctx->active_oncpu--;
|
||||||
|
@ -226,7 +226,7 @@ static void __perf_install_in_context(void *info)
|
||||||
counter->oncpu = cpu;
|
counter->oncpu = cpu;
|
||||||
ctx->nr_active++;
|
ctx->nr_active++;
|
||||||
cpuctx->active_oncpu++;
|
cpuctx->active_oncpu++;
|
||||||
counter->hw_ops->hw_perf_counter_enable(counter);
|
counter->hw_ops->enable(counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ctx->task && cpuctx->max_pertask)
|
if (!ctx->task && cpuctx->max_pertask)
|
||||||
|
@ -297,7 +297,7 @@ counter_sched_out(struct perf_counter *counter,
|
||||||
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
counter->hw_ops->hw_perf_counter_disable(counter);
|
counter->hw_ops->disable(counter);
|
||||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||||
counter->oncpu = -1;
|
counter->oncpu = -1;
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ group_sched_out(struct perf_counter *group_counter,
|
||||||
*
|
*
|
||||||
* We stop each counter and update the counter value in counter->count.
|
* We stop each counter and update the counter value in counter->count.
|
||||||
*
|
*
|
||||||
* This does not protect us against NMI, but hw_perf_counter_disable()
|
* This does not protect us against NMI, but disable()
|
||||||
* sets the disabled bit in the control field of counter _before_
|
* sets the disabled bit in the control field of counter _before_
|
||||||
* accessing the counter control register. If a NMI hits, then it will
|
* accessing the counter control register. If a NMI hits, then it will
|
||||||
* not restart the counter.
|
* not restart the counter.
|
||||||
|
@ -359,7 +359,7 @@ counter_sched_in(struct perf_counter *counter,
|
||||||
if (counter->state == PERF_COUNTER_STATE_OFF)
|
if (counter->state == PERF_COUNTER_STATE_OFF)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
counter->hw_ops->hw_perf_counter_enable(counter);
|
counter->hw_ops->enable(counter);
|
||||||
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
||||||
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
||||||
|
|
||||||
|
@ -395,7 +395,7 @@ group_sched_in(struct perf_counter *group_counter,
|
||||||
*
|
*
|
||||||
* We restore the counter value and then enable it.
|
* We restore the counter value and then enable it.
|
||||||
*
|
*
|
||||||
* This does not protect us against NMI, but hw_perf_counter_enable()
|
* This does not protect us against NMI, but enable()
|
||||||
* sets the enabled bit in the control field of counter _before_
|
* sets the enabled bit in the control field of counter _before_
|
||||||
* accessing the counter control register. If a NMI hits, then it will
|
* accessing the counter control register. If a NMI hits, then it will
|
||||||
* keep the counter running.
|
* keep the counter running.
|
||||||
|
@ -537,11 +537,11 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
|
||||||
/*
|
/*
|
||||||
* Cross CPU call to read the hardware counter
|
* Cross CPU call to read the hardware counter
|
||||||
*/
|
*/
|
||||||
static void __hw_perf_counter_read(void *info)
|
static void __read(void *info)
|
||||||
{
|
{
|
||||||
struct perf_counter *counter = info;
|
struct perf_counter *counter = info;
|
||||||
|
|
||||||
counter->hw_ops->hw_perf_counter_read(counter);
|
counter->hw_ops->read(counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 perf_counter_read(struct perf_counter *counter)
|
static u64 perf_counter_read(struct perf_counter *counter)
|
||||||
|
@ -552,7 +552,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
|
||||||
*/
|
*/
|
||||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
||||||
smp_call_function_single(counter->oncpu,
|
smp_call_function_single(counter->oncpu,
|
||||||
__hw_perf_counter_read, counter, 1);
|
__read, counter, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return atomic64_read(&counter->count);
|
return atomic64_read(&counter->count);
|
||||||
|
@ -855,9 +855,9 @@ static void cpu_clock_perf_counter_read(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
|
static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
|
||||||
.hw_perf_counter_enable = cpu_clock_perf_counter_enable,
|
.enable = cpu_clock_perf_counter_enable,
|
||||||
.hw_perf_counter_disable = cpu_clock_perf_counter_disable,
|
.disable = cpu_clock_perf_counter_disable,
|
||||||
.hw_perf_counter_read = cpu_clock_perf_counter_read,
|
.read = cpu_clock_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void task_clock_perf_counter_update(struct perf_counter *counter)
|
static void task_clock_perf_counter_update(struct perf_counter *counter)
|
||||||
|
@ -891,9 +891,9 @@ static void task_clock_perf_counter_disable(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_task_clock = {
|
static const struct hw_perf_counter_ops perf_ops_task_clock = {
|
||||||
.hw_perf_counter_enable = task_clock_perf_counter_enable,
|
.enable = task_clock_perf_counter_enable,
|
||||||
.hw_perf_counter_disable = task_clock_perf_counter_disable,
|
.disable = task_clock_perf_counter_disable,
|
||||||
.hw_perf_counter_read = task_clock_perf_counter_read,
|
.read = task_clock_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 get_page_faults(void)
|
static u64 get_page_faults(void)
|
||||||
|
@ -937,9 +937,9 @@ static void page_faults_perf_counter_disable(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_page_faults = {
|
static const struct hw_perf_counter_ops perf_ops_page_faults = {
|
||||||
.hw_perf_counter_enable = page_faults_perf_counter_enable,
|
.enable = page_faults_perf_counter_enable,
|
||||||
.hw_perf_counter_disable = page_faults_perf_counter_disable,
|
.disable = page_faults_perf_counter_disable,
|
||||||
.hw_perf_counter_read = page_faults_perf_counter_read,
|
.read = page_faults_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 get_context_switches(void)
|
static u64 get_context_switches(void)
|
||||||
|
@ -983,9 +983,9 @@ static void context_switches_perf_counter_disable(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_context_switches = {
|
static const struct hw_perf_counter_ops perf_ops_context_switches = {
|
||||||
.hw_perf_counter_enable = context_switches_perf_counter_enable,
|
.enable = context_switches_perf_counter_enable,
|
||||||
.hw_perf_counter_disable = context_switches_perf_counter_disable,
|
.disable = context_switches_perf_counter_disable,
|
||||||
.hw_perf_counter_read = context_switches_perf_counter_read,
|
.read = context_switches_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u64 get_cpu_migrations(void)
|
static inline u64 get_cpu_migrations(void)
|
||||||
|
@ -1027,9 +1027,9 @@ static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
|
static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
|
||||||
.hw_perf_counter_enable = cpu_migrations_perf_counter_enable,
|
.enable = cpu_migrations_perf_counter_enable,
|
||||||
.hw_perf_counter_disable = cpu_migrations_perf_counter_disable,
|
.disable = cpu_migrations_perf_counter_disable,
|
||||||
.hw_perf_counter_read = cpu_migrations_perf_counter_read,
|
.read = cpu_migrations_perf_counter_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hw_perf_counter_ops *
|
static const struct hw_perf_counter_ops *
|
||||||
|
@ -1283,7 +1283,7 @@ __perf_counter_exit_task(struct task_struct *child,
|
||||||
|
|
||||||
cpuctx = &__get_cpu_var(perf_cpu_context);
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||||
|
|
||||||
child_counter->hw_ops->hw_perf_counter_disable(child_counter);
|
child_counter->hw_ops->disable(child_counter);
|
||||||
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
child_counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||||
child_counter->oncpu = -1;
|
child_counter->oncpu = -1;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue