2009-01-09 04:21:55 -05:00
|
|
|
/*
|
|
|
|
* Performance counter support - powerpc architecture code
|
|
|
|
*
|
|
|
|
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/perf_counter.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/pmc.h>
|
2009-01-13 21:44:19 -05:00
|
|
|
#include <asm/machdep.h>
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
#include <asm/firmware.h>
|
2009-01-09 04:21:55 -05:00
|
|
|
|
|
|
|
struct cpu_hw_counters {
|
|
|
|
int n_counters;
|
|
|
|
int n_percpu;
|
|
|
|
int disabled;
|
|
|
|
int n_added;
|
|
|
|
struct perf_counter *counter[MAX_HWCOUNTERS];
|
|
|
|
unsigned int events[MAX_HWCOUNTERS];
|
|
|
|
u64 mmcr[3];
|
2009-01-13 21:44:19 -05:00
|
|
|
u8 pmcs_enabled;
|
2009-01-09 04:21:55 -05:00
|
|
|
};
|
|
|
|
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
|
|
|
|
|
|
|
|
struct power_pmu *ppmu;
|
|
|
|
|
2009-02-23 07:01:28 -05:00
|
|
|
/*
|
|
|
|
* Normally, to ignore kernel events we set the FCS (freeze counters
|
|
|
|
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
|
|
|
|
* hypervisor bit set in the MSR, or if we are running on a processor
|
|
|
|
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
|
|
|
|
* then we need to use the FCHV bit to ignore kernel events.
|
|
|
|
*/
|
|
|
|
static unsigned int freeze_counters_kernel = MMCR0_FCS;
|
|
|
|
|
perf_counter: powerpc: only reserve PMU hardware when we need it
Impact: cooperate with oprofile
At present, on PowerPC, if you have perf_counters compiled in, oprofile
doesn't work. There is code to allow the PMU to be shared between
competing subsystems, such as perf_counters and oprofile, but currently
the perf_counter subsystem reserves the PMU for itself at boot time,
and never releases it.
This makes perf_counter play nicely with oprofile. Now we keep a count
of how many perf_counter instances are counting hardware events, and
reserve the PMU when that count becomes non-zero, and release the PMU
when that count becomes zero. This means that it is possible to have
perf_counters compiled in and still use oprofile, as long as there are
no hardware perf_counters active. This also means that if oprofile is
active, sys_perf_counter_open will fail if the hw_event specifies a
hardware event.
To avoid races with other tasks creating and destroying perf_counters,
we use a mutex. We use atomic_inc_not_zero and atomic_add_unless to
avoid having to take the mutex unless there is a possibility of the
count going between 0 and 1.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.627912475@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:07 -04:00
|
|
|
static void perf_counter_interrupt(struct pt_regs *regs);
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
void perf_counter_print_debug(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read one performance monitor counter (PMC).
|
|
|
|
*/
|
|
|
|
static unsigned long read_pmc(int idx)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
switch (idx) {
|
|
|
|
case 1:
|
|
|
|
val = mfspr(SPRN_PMC1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
val = mfspr(SPRN_PMC2);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
val = mfspr(SPRN_PMC3);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
val = mfspr(SPRN_PMC4);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
val = mfspr(SPRN_PMC5);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
val = mfspr(SPRN_PMC6);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
val = mfspr(SPRN_PMC7);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
val = mfspr(SPRN_PMC8);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write one PMC.
|
|
|
|
*/
|
|
|
|
static void write_pmc(int idx, unsigned long val)
|
|
|
|
{
|
|
|
|
switch (idx) {
|
|
|
|
case 1:
|
|
|
|
mtspr(SPRN_PMC1, val);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
mtspr(SPRN_PMC2, val);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
mtspr(SPRN_PMC3, val);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
mtspr(SPRN_PMC4, val);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
mtspr(SPRN_PMC5, val);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
mtspr(SPRN_PMC6, val);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
mtspr(SPRN_PMC7, val);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
mtspr(SPRN_PMC8, val);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if a set of events can all go on the PMU at once.
|
|
|
|
* If they can't, this will look at alternative codes for the events
|
|
|
|
* and see if any combination of alternative codes is feasible.
|
|
|
|
* The feasible set is returned in event[].
|
|
|
|
*/
|
|
|
|
static int power_check_constraints(unsigned int event[], int n_ev)
|
|
|
|
{
|
|
|
|
u64 mask, value, nv;
|
|
|
|
unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
|
|
|
|
u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
|
|
|
|
u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
|
|
|
|
u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
|
|
|
|
int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
|
|
|
|
int i, j;
|
|
|
|
u64 addf = ppmu->add_fields;
|
|
|
|
u64 tadd = ppmu->test_adder;
|
|
|
|
|
|
|
|
if (n_ev > ppmu->n_counter)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* First see if the events will go on as-is */
|
|
|
|
for (i = 0; i < n_ev; ++i) {
|
|
|
|
alternatives[i][0] = event[i];
|
|
|
|
if (ppmu->get_constraint(event[i], &amasks[i][0],
|
|
|
|
&avalues[i][0]))
|
|
|
|
return -1;
|
|
|
|
choice[i] = 0;
|
|
|
|
}
|
|
|
|
value = mask = 0;
|
|
|
|
for (i = 0; i < n_ev; ++i) {
|
|
|
|
nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
|
|
|
|
if ((((nv + tadd) ^ value) & mask) != 0 ||
|
|
|
|
(((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
|
|
|
|
break;
|
|
|
|
value = nv;
|
|
|
|
mask |= amasks[i][0];
|
|
|
|
}
|
|
|
|
if (i == n_ev)
|
|
|
|
return 0; /* all OK */
|
|
|
|
|
|
|
|
/* doesn't work, gather alternatives... */
|
|
|
|
if (!ppmu->get_alternatives)
|
|
|
|
return -1;
|
|
|
|
for (i = 0; i < n_ev; ++i) {
|
|
|
|
n_alt[i] = ppmu->get_alternatives(event[i], alternatives[i]);
|
|
|
|
for (j = 1; j < n_alt[i]; ++j)
|
|
|
|
ppmu->get_constraint(alternatives[i][j],
|
|
|
|
&amasks[i][j], &avalues[i][j]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enumerate all possibilities and see if any will work */
|
|
|
|
i = 0;
|
|
|
|
j = -1;
|
|
|
|
value = mask = nv = 0;
|
|
|
|
while (i < n_ev) {
|
|
|
|
if (j >= 0) {
|
|
|
|
/* we're backtracking, restore context */
|
|
|
|
value = svalues[i];
|
|
|
|
mask = smasks[i];
|
|
|
|
j = choice[i];
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* See if any alternative k for event i,
|
|
|
|
* where k > j, will satisfy the constraints.
|
|
|
|
*/
|
|
|
|
while (++j < n_alt[i]) {
|
|
|
|
nv = (value | avalues[i][j]) +
|
|
|
|
(value & avalues[i][j] & addf);
|
|
|
|
if ((((nv + tadd) ^ value) & mask) == 0 &&
|
|
|
|
(((nv + tadd) ^ avalues[i][j])
|
|
|
|
& amasks[i][j]) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j >= n_alt[i]) {
|
|
|
|
/*
|
|
|
|
* No feasible alternative, backtrack
|
|
|
|
* to event i-1 and continue enumerating its
|
|
|
|
* alternatives from where we got up to.
|
|
|
|
*/
|
|
|
|
if (--i < 0)
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Found a feasible alternative for event i,
|
|
|
|
* remember where we got up to with this event,
|
|
|
|
* go on to the next event, and start with
|
|
|
|
* the first alternative for it.
|
|
|
|
*/
|
|
|
|
choice[i] = j;
|
|
|
|
svalues[i] = value;
|
|
|
|
smasks[i] = mask;
|
|
|
|
value = nv;
|
|
|
|
mask |= amasks[i][j];
|
|
|
|
++i;
|
|
|
|
j = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we have a feasible combination, tell the caller the solution */
|
|
|
|
for (i = 0; i < n_ev; ++i)
|
|
|
|
event[i] = alternatives[i][choice[i]];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
/*
|
|
|
|
* Check if newly-added counters have consistent settings for
|
|
|
|
* exclude_{user,kernel,hv} with each other and any previously
|
|
|
|
* added counters.
|
|
|
|
*/
|
|
|
|
static int check_excludes(struct perf_counter **ctrs, int n_prev, int n_new)
|
|
|
|
{
|
|
|
|
int eu, ek, eh;
|
|
|
|
int i, n;
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
n = n_prev + n_new;
|
|
|
|
if (n <= 1)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
eu = ctrs[0]->hw_event.exclude_user;
|
|
|
|
ek = ctrs[0]->hw_event.exclude_kernel;
|
|
|
|
eh = ctrs[0]->hw_event.exclude_hv;
|
|
|
|
if (n_prev == 0)
|
|
|
|
n_prev = 1;
|
|
|
|
for (i = n_prev; i < n; ++i) {
|
|
|
|
counter = ctrs[i];
|
|
|
|
if (counter->hw_event.exclude_user != eu ||
|
|
|
|
counter->hw_event.exclude_kernel != ek ||
|
|
|
|
counter->hw_event.exclude_hv != eh)
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-29 06:47:03 -04:00
|
|
|
static void power_pmu_read(struct perf_counter *counter)
|
2009-01-09 04:21:55 -05:00
|
|
|
{
|
|
|
|
long val, delta, prev;
|
|
|
|
|
|
|
|
if (!counter->hw.idx)
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Performance monitor interrupts come even when interrupts
|
|
|
|
* are soft-disabled, as long as interrupts are hard-enabled.
|
|
|
|
* Therefore we treat them like NMIs.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
|
|
|
barrier();
|
|
|
|
val = read_pmc(counter->hw.idx);
|
|
|
|
} while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
|
|
|
|
|
|
|
|
/* The counters are only 32 bits wide */
|
|
|
|
delta = (val - prev) & 0xfffffffful;
|
|
|
|
atomic64_add(delta, &counter->count);
|
|
|
|
atomic64_sub(delta, &counter->hw.period_left);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable all counters to prevent PMU interrupts and to allow
|
|
|
|
* counters to be added or removed.
|
|
|
|
*/
|
|
|
|
u64 hw_perf_save_disable(void)
|
|
|
|
{
|
|
|
|
struct cpu_hw_counters *cpuhw;
|
|
|
|
unsigned long ret;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
|
|
|
|
ret = cpuhw->disabled;
|
|
|
|
if (!ret) {
|
|
|
|
cpuhw->disabled = 1;
|
|
|
|
cpuhw->n_added = 0;
|
|
|
|
|
2009-01-13 21:44:19 -05:00
|
|
|
/*
|
|
|
|
* Check if we ever enabled the PMU on this cpu.
|
|
|
|
*/
|
|
|
|
if (!cpuhw->pmcs_enabled) {
|
|
|
|
if (ppc_md.enable_pmcs)
|
|
|
|
ppc_md.enable_pmcs();
|
|
|
|
cpuhw->pmcs_enabled = 1;
|
|
|
|
}
|
|
|
|
|
2009-04-08 06:30:18 -04:00
|
|
|
/*
|
|
|
|
* Disable instruction sampling if it was enabled
|
|
|
|
*/
|
|
|
|
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
|
|
|
|
mtspr(SPRN_MMCRA,
|
|
|
|
cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
/*
|
|
|
|
* Set the 'freeze counters' bit.
|
|
|
|
* The barrier is to make sure the mtspr has been
|
|
|
|
* executed and the PMU has frozen the counters
|
|
|
|
* before we return.
|
|
|
|
*/
|
|
|
|
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Re-enable all counters if disable == 0.
|
|
|
|
* If we were previously disabled and counters were added, then
|
|
|
|
* put the new config on the PMU.
|
|
|
|
*/
|
|
|
|
void hw_perf_restore(u64 disable)
|
|
|
|
{
|
|
|
|
struct perf_counter *counter;
|
|
|
|
struct cpu_hw_counters *cpuhw;
|
|
|
|
unsigned long flags;
|
|
|
|
long i;
|
|
|
|
unsigned long val;
|
|
|
|
s64 left;
|
|
|
|
unsigned int hwc_index[MAX_HWCOUNTERS];
|
|
|
|
|
|
|
|
if (disable)
|
|
|
|
return;
|
|
|
|
local_irq_save(flags);
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
cpuhw->disabled = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't change anything, or only removed counters,
|
|
|
|
* no need to recalculate MMCR* settings and reset the PMCs.
|
|
|
|
* Just reenable the PMU with the current MMCR* settings
|
|
|
|
* (possibly updated for removal of counters).
|
|
|
|
*/
|
|
|
|
if (!cpuhw->n_added) {
|
2009-04-08 06:30:18 -04:00
|
|
|
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
|
2009-01-09 04:21:55 -05:00
|
|
|
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
|
2009-01-13 21:44:19 -05:00
|
|
|
if (cpuhw->n_counters == 0)
|
|
|
|
get_lppaca()->pmcregs_in_use = 0;
|
2009-04-08 06:30:18 -04:00
|
|
|
goto out_enable;
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute MMCR* values for the new set of counters
|
|
|
|
*/
|
|
|
|
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
|
|
|
|
cpuhw->mmcr)) {
|
|
|
|
/* shouldn't ever get here */
|
|
|
|
printk(KERN_ERR "oops compute_mmcr failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
/*
|
|
|
|
* Add in MMCR0 freeze bits corresponding to the
|
|
|
|
* hw_event.exclude_* bits for the first counter.
|
|
|
|
* We have already checked that all counters have the
|
|
|
|
* same values for these bits as the first counter.
|
|
|
|
*/
|
|
|
|
counter = cpuhw->counter[0];
|
|
|
|
if (counter->hw_event.exclude_user)
|
|
|
|
cpuhw->mmcr[0] |= MMCR0_FCP;
|
|
|
|
if (counter->hw_event.exclude_kernel)
|
2009-02-23 07:01:28 -05:00
|
|
|
cpuhw->mmcr[0] |= freeze_counters_kernel;
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
if (counter->hw_event.exclude_hv)
|
|
|
|
cpuhw->mmcr[0] |= MMCR0_FCHV;
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
/*
|
|
|
|
* Write the new configuration to MMCR* with the freeze
|
|
|
|
* bit set and set the hardware counters to their initial values.
|
|
|
|
* Then unfreeze the counters.
|
|
|
|
*/
|
2009-01-13 21:44:19 -05:00
|
|
|
get_lppaca()->pmcregs_in_use = 1;
|
2009-04-08 06:30:18 -04:00
|
|
|
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
|
2009-01-09 04:21:55 -05:00
|
|
|
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
|
|
|
|
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
|
|
|
|
| MMCR0_FC);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read off any pre-existing counters that need to move
|
|
|
|
* to another PMC.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
|
|
|
counter = cpuhw->counter[i];
|
|
|
|
if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
|
2009-04-29 06:47:03 -04:00
|
|
|
power_pmu_read(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
write_pmc(counter->hw.idx, 0);
|
|
|
|
counter->hw.idx = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the PMCs for all the new and moved counters.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
|
|
|
counter = cpuhw->counter[i];
|
|
|
|
if (counter->hw.idx)
|
|
|
|
continue;
|
|
|
|
val = 0;
|
|
|
|
if (counter->hw_event.irq_period) {
|
|
|
|
left = atomic64_read(&counter->hw.period_left);
|
|
|
|
if (left < 0x80000000L)
|
|
|
|
val = 0x80000000L - left;
|
|
|
|
}
|
|
|
|
atomic64_set(&counter->hw.prev_count, val);
|
|
|
|
counter->hw.idx = hwc_index[i] + 1;
|
|
|
|
write_pmc(counter->hw.idx, val);
|
2009-03-23 13:22:10 -04:00
|
|
|
perf_counter_update_userpage(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
|
2009-04-08 06:30:18 -04:00
|
|
|
|
|
|
|
out_enable:
|
|
|
|
mb();
|
2009-01-09 04:21:55 -05:00
|
|
|
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
|
|
|
|
|
2009-04-08 06:30:18 -04:00
|
|
|
/*
|
|
|
|
* Enable instruction sampling if necessary
|
|
|
|
*/
|
|
|
|
if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
|
|
|
|
mb();
|
|
|
|
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
|
|
|
|
}
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
out:
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int collect_events(struct perf_counter *group, int max_count,
|
|
|
|
struct perf_counter *ctrs[], unsigned int *events)
|
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
struct perf_counter *counter;
|
|
|
|
|
|
|
|
if (!is_software_counter(group)) {
|
|
|
|
if (n >= max_count)
|
|
|
|
return -1;
|
|
|
|
ctrs[n] = group;
|
|
|
|
events[n++] = group->hw.config;
|
|
|
|
}
|
|
|
|
list_for_each_entry(counter, &group->sibling_list, list_entry) {
|
|
|
|
if (!is_software_counter(counter) &&
|
|
|
|
counter->state != PERF_COUNTER_STATE_OFF) {
|
|
|
|
if (n >= max_count)
|
|
|
|
return -1;
|
|
|
|
ctrs[n] = counter;
|
|
|
|
events[n++] = counter->hw.config;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void counter_sched_in(struct perf_counter *counter, int cpu)
|
|
|
|
{
|
|
|
|
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
|
|
|
counter->oncpu = cpu;
|
2009-04-08 06:30:10 -04:00
|
|
|
counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
|
2009-01-09 04:21:55 -05:00
|
|
|
if (is_software_counter(counter))
|
2009-04-29 06:47:03 -04:00
|
|
|
counter->pmu->enable(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called to enable a whole group of counters.
|
|
|
|
* Returns 1 if the group was enabled, or -EAGAIN if it could not be.
|
|
|
|
* Assumes the caller has disabled interrupts and has
|
|
|
|
* frozen the PMU with hw_perf_save_disable.
|
|
|
|
*/
|
|
|
|
int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx, int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_hw_counters *cpuhw;
|
|
|
|
long i, n, n0;
|
|
|
|
struct perf_counter *sub;
|
|
|
|
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
n0 = cpuhw->n_counters;
|
|
|
|
n = collect_events(group_leader, ppmu->n_counter - n0,
|
|
|
|
&cpuhw->counter[n0], &cpuhw->events[n0]);
|
|
|
|
if (n < 0)
|
|
|
|
return -EAGAIN;
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
if (check_excludes(cpuhw->counter, n0, n))
|
|
|
|
return -EAGAIN;
|
2009-01-09 04:21:55 -05:00
|
|
|
if (power_check_constraints(cpuhw->events, n + n0))
|
|
|
|
return -EAGAIN;
|
|
|
|
cpuhw->n_counters = n0 + n;
|
|
|
|
cpuhw->n_added += n;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OK, this group can go on; update counter states etc.,
|
|
|
|
* and enable any software counters
|
|
|
|
*/
|
|
|
|
for (i = n0; i < n0 + n; ++i)
|
|
|
|
cpuhw->counter[i]->hw.config = cpuhw->events[i];
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 05:00:30 -05:00
|
|
|
cpuctx->active_oncpu += n;
|
2009-01-09 04:21:55 -05:00
|
|
|
n = 1;
|
|
|
|
counter_sched_in(group_leader, cpu);
|
|
|
|
list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
|
|
|
|
if (sub->state != PERF_COUNTER_STATE_OFF) {
|
|
|
|
counter_sched_in(sub, cpu);
|
|
|
|
++n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ctx->nr_active += n;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a counter to the PMU.
|
|
|
|
* If all counters are not already frozen, then we disable and
|
|
|
|
* re-enable the PMU in order to get hw_perf_restore to do the
|
|
|
|
* actual work of reconfiguring the PMU.
|
|
|
|
*/
|
2009-04-29 06:47:03 -04:00
|
|
|
static int power_pmu_enable(struct perf_counter *counter)
|
2009-01-09 04:21:55 -05:00
|
|
|
{
|
|
|
|
struct cpu_hw_counters *cpuhw;
|
|
|
|
unsigned long flags;
|
|
|
|
u64 pmudis;
|
|
|
|
int n0;
|
|
|
|
int ret = -EAGAIN;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
pmudis = hw_perf_save_disable();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the counter to the list (if there is room)
|
|
|
|
* and check whether the total set is still feasible.
|
|
|
|
*/
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
n0 = cpuhw->n_counters;
|
|
|
|
if (n0 >= ppmu->n_counter)
|
|
|
|
goto out;
|
|
|
|
cpuhw->counter[n0] = counter;
|
|
|
|
cpuhw->events[n0] = counter->hw.config;
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
if (check_excludes(cpuhw->counter, n0, 1))
|
|
|
|
goto out;
|
2009-01-09 04:21:55 -05:00
|
|
|
if (power_check_constraints(cpuhw->events, n0 + 1))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
counter->hw.config = cpuhw->events[n0];
|
|
|
|
++cpuhw->n_counters;
|
|
|
|
++cpuhw->n_added;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
hw_perf_restore(pmudis);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a counter from the PMU.
|
|
|
|
*/
|
2009-04-29 06:47:03 -04:00
|
|
|
static void power_pmu_disable(struct perf_counter *counter)
|
2009-01-09 04:21:55 -05:00
|
|
|
{
|
|
|
|
struct cpu_hw_counters *cpuhw;
|
|
|
|
long i;
|
|
|
|
u64 pmudis;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
pmudis = hw_perf_save_disable();
|
|
|
|
|
2009-04-29 06:47:03 -04:00
|
|
|
power_pmu_read(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
|
|
|
if (counter == cpuhw->counter[i]) {
|
|
|
|
while (++i < cpuhw->n_counters)
|
|
|
|
cpuhw->counter[i-1] = cpuhw->counter[i];
|
|
|
|
--cpuhw->n_counters;
|
|
|
|
ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
|
|
|
|
write_pmc(counter->hw.idx, 0);
|
|
|
|
counter->hw.idx = 0;
|
2009-03-23 13:22:10 -04:00
|
|
|
perf_counter_update_userpage(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cpuhw->n_counters == 0) {
|
|
|
|
/* disable exceptions if no counters are running */
|
|
|
|
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
|
|
|
|
}
|
|
|
|
|
|
|
|
hw_perf_restore(pmudis);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2009-04-29 06:47:03 -04:00
|
|
|
struct pmu power_pmu = {
|
|
|
|
.enable = power_pmu_enable,
|
|
|
|
.disable = power_pmu_disable,
|
|
|
|
.read = power_pmu_read,
|
2009-01-09 04:21:55 -05:00
|
|
|
};
|
|
|
|
|
perf_counter: powerpc: only reserve PMU hardware when we need it
Impact: cooperate with oprofile
At present, on PowerPC, if you have perf_counters compiled in, oprofile
doesn't work. There is code to allow the PMU to be shared between
competing subsystems, such as perf_counters and oprofile, but currently
the perf_counter subsystem reserves the PMU for itself at boot time,
and never releases it.
This makes perf_counter play nicely with oprofile. Now we keep a count
of how many perf_counter instances are counting hardware events, and
reserve the PMU when that count becomes non-zero, and release the PMU
when that count becomes zero. This means that it is possible to have
perf_counters compiled in and still use oprofile, as long as there are
no hardware perf_counters active. This also means that if oprofile is
active, sys_perf_counter_open will fail if the hw_event specifies a
hardware event.
To avoid races with other tasks creating and destroying perf_counters,
we use a mutex. We use atomic_inc_not_zero and atomic_add_unless to
avoid having to take the mutex unless there is a possibility of the
count going between 0 and 1.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.627912475@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:07 -04:00
|
|
|
/* Number of perf_counters counting hardware events */
|
|
|
|
static atomic_t num_counters;
|
|
|
|
/* Used to avoid races in calling reserve/release_pmc_hardware */
|
|
|
|
static DEFINE_MUTEX(pmc_reserve_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the PMU if this is the last perf_counter.
|
|
|
|
*/
|
|
|
|
static void hw_perf_counter_destroy(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
if (!atomic_add_unless(&num_counters, -1, 1)) {
|
|
|
|
mutex_lock(&pmc_reserve_mutex);
|
|
|
|
if (atomic_dec_return(&num_counters) == 0)
|
|
|
|
release_pmc_hardware();
|
|
|
|
mutex_unlock(&pmc_reserve_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-29 06:47:03 -04:00
|
|
|
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
|
2009-01-09 04:21:55 -05:00
|
|
|
{
|
|
|
|
unsigned long ev;
|
|
|
|
struct perf_counter *ctrs[MAX_HWCOUNTERS];
|
|
|
|
unsigned int events[MAX_HWCOUNTERS];
|
|
|
|
int n;
|
perf_counter: powerpc: only reserve PMU hardware when we need it
Impact: cooperate with oprofile
At present, on PowerPC, if you have perf_counters compiled in, oprofile
doesn't work. There is code to allow the PMU to be shared between
competing subsystems, such as perf_counters and oprofile, but currently
the perf_counter subsystem reserves the PMU for itself at boot time,
and never releases it.
This makes perf_counter play nicely with oprofile. Now we keep a count
of how many perf_counter instances are counting hardware events, and
reserve the PMU when that count becomes non-zero, and release the PMU
when that count becomes zero. This means that it is possible to have
perf_counters compiled in and still use oprofile, as long as there are
no hardware perf_counters active. This also means that if oprofile is
active, sys_perf_counter_open will fail if the hw_event specifies a
hardware event.
To avoid races with other tasks creating and destroying perf_counters,
we use a mutex. We use atomic_inc_not_zero and atomic_add_unless to
avoid having to take the mutex unless there is a possibility of the
count going between 0 and 1.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.627912475@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:07 -04:00
|
|
|
int err;
|
2009-01-09 04:21:55 -05:00
|
|
|
|
|
|
|
if (!ppmu)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-ENXIO);
|
2009-01-09 04:21:55 -05:00
|
|
|
if ((s64)counter->hw_event.irq_period < 0)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-EINVAL);
|
2009-03-23 13:22:06 -04:00
|
|
|
if (!perf_event_raw(&counter->hw_event)) {
|
|
|
|
ev = perf_event_id(&counter->hw_event);
|
2009-03-21 00:31:47 -04:00
|
|
|
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
2009-01-09 04:21:55 -05:00
|
|
|
ev = ppmu->generic_events[ev];
|
2009-03-21 00:31:47 -04:00
|
|
|
} else {
|
2009-03-23 13:22:06 -04:00
|
|
|
ev = perf_event_config(&counter->hw_event);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
counter->hw.config_base = ev;
|
|
|
|
counter->hw.idx = 0;
|
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
/*
|
|
|
|
* If we are not running on a hypervisor, force the
|
|
|
|
* exclude_hv bit to 0 so that we don't care what
|
2009-02-23 07:01:28 -05:00
|
|
|
* the user set it to.
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
*/
|
|
|
|
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
|
|
|
counter->hw_event.exclude_hv = 0;
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
/*
|
|
|
|
* If this is in a group, check if it can go on with all the
|
|
|
|
* other hardware counters in the group. We assume the counter
|
|
|
|
* hasn't been linked into its leader's sibling list at this point.
|
|
|
|
*/
|
|
|
|
n = 0;
|
|
|
|
if (counter->group_leader != counter) {
|
|
|
|
n = collect_events(counter->group_leader, ppmu->n_counter - 1,
|
|
|
|
ctrs, events);
|
|
|
|
if (n < 0)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-EINVAL);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
events[n] = ev;
|
2009-03-04 22:05:57 -05:00
|
|
|
ctrs[n] = counter;
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
if (check_excludes(ctrs, n, 1))
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-EINVAL);
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
if (power_check_constraints(events, n + 1))
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(-EINVAL);
|
2009-01-09 04:21:55 -05:00
|
|
|
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
counter->hw.config = events[n];
|
2009-01-09 04:21:55 -05:00
|
|
|
atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
|
perf_counter: powerpc: only reserve PMU hardware when we need it
Impact: cooperate with oprofile
At present, on PowerPC, if you have perf_counters compiled in, oprofile
doesn't work. There is code to allow the PMU to be shared between
competing subsystems, such as perf_counters and oprofile, but currently
the perf_counter subsystem reserves the PMU for itself at boot time,
and never releases it.
This makes perf_counter play nicely with oprofile. Now we keep a count
of how many perf_counter instances are counting hardware events, and
reserve the PMU when that count becomes non-zero, and release the PMU
when that count becomes zero. This means that it is possible to have
perf_counters compiled in and still use oprofile, as long as there are
no hardware perf_counters active. This also means that if oprofile is
active, sys_perf_counter_open will fail if the hw_event specifies a
hardware event.
To avoid races with other tasks creating and destroying perf_counters,
we use a mutex. We use atomic_inc_not_zero and atomic_add_unless to
avoid having to take the mutex unless there is a possibility of the
count going between 0 and 1.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.627912475@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:07 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we need to reserve the PMU.
|
|
|
|
* If no counters are currently in use, then we have to take a
|
|
|
|
* mutex to ensure that we don't race with another task doing
|
|
|
|
* reserve_pmc_hardware or release_pmc_hardware.
|
|
|
|
*/
|
|
|
|
err = 0;
|
|
|
|
if (!atomic_inc_not_zero(&num_counters)) {
|
|
|
|
mutex_lock(&pmc_reserve_mutex);
|
|
|
|
if (atomic_read(&num_counters) == 0 &&
|
|
|
|
reserve_pmc_hardware(perf_counter_interrupt))
|
|
|
|
err = -EBUSY;
|
|
|
|
else
|
|
|
|
atomic_inc(&num_counters);
|
|
|
|
mutex_unlock(&pmc_reserve_mutex);
|
|
|
|
}
|
|
|
|
counter->destroy = hw_perf_counter_destroy;
|
|
|
|
|
|
|
|
if (err)
|
perf_counter: make it possible for hw_perf_counter_init to return error codes
Impact: better error reporting
At present, if hw_perf_counter_init encounters an error, all it can do
is return NULL, which causes sys_perf_counter_open to return an EINVAL
error to userspace. This isn't very informative for userspace; it means
that userspace can't tell the difference between "sorry, oprofile is
already using the PMU" and "we don't support this CPU" and "this CPU
doesn't support the requested generic hardware event".
This commit uses the PTR_ERR/ERR_PTR/IS_ERR set of macros to let
hw_perf_counter_init return an error code on error rather than just NULL
if it wishes. If it does so, that error code will be returned from
sys_perf_counter_open to userspace. If it returns NULL, an EINVAL
error will be returned to userspace, as before.
This also adapts the powerpc hw_perf_counter_init to make use of this
to return ENXIO, EINVAL, EBUSY, or EOPNOTSUPP as appropriate. It would
be good to add extra error numbers in future to allow userspace to
distinguish the various errors that are currently reported as EINVAL,
i.e. irq_period < 0, too many events in a group, conflict between
exclude_* settings in a group, and PMU resource conflict in a group.
[ v2: fix a bug pointed out by Corey Ashford where error returns from
hw_perf_counter_init were not handled correctly in the case of
raw hardware events.]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Orig-LKML-Reference: <20090330171023.682428180@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-30 13:07:08 -04:00
|
|
|
return ERR_PTR(err);
|
2009-04-29 06:47:03 -04:00
|
|
|
return &power_pmu;
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A counter has overflowed; update its count and record
|
|
|
|
* things if requested. Note that interrupts are hard-disabled
|
|
|
|
* here so there is no possibility of being interrupted.
|
|
|
|
*/
|
|
|
|
static void record_and_restart(struct perf_counter *counter, long val,
|
2009-04-09 00:42:56 -04:00
|
|
|
struct pt_regs *regs, int nmi)
|
2009-01-09 04:21:55 -05:00
|
|
|
{
|
|
|
|
s64 prev, delta, left;
|
|
|
|
int record = 0;
|
|
|
|
|
|
|
|
/* we don't have to worry about interrupts here */
|
|
|
|
prev = atomic64_read(&counter->hw.prev_count);
|
|
|
|
delta = (val - prev) & 0xfffffffful;
|
|
|
|
atomic64_add(delta, &counter->count);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if the total period for this counter has expired,
|
|
|
|
* and update for the next period.
|
|
|
|
*/
|
|
|
|
val = 0;
|
|
|
|
left = atomic64_read(&counter->hw.period_left) - delta;
|
|
|
|
if (counter->hw_event.irq_period) {
|
|
|
|
if (left <= 0) {
|
|
|
|
left += counter->hw_event.irq_period;
|
|
|
|
if (left <= 0)
|
|
|
|
left = counter->hw_event.irq_period;
|
|
|
|
record = 1;
|
|
|
|
}
|
|
|
|
if (left < 0x80000000L)
|
|
|
|
val = 0x80000000L - left;
|
|
|
|
}
|
|
|
|
write_pmc(counter->hw.idx, val);
|
|
|
|
atomic64_set(&counter->hw.prev_count, val);
|
|
|
|
atomic64_set(&counter->hw.period_left, left);
|
2009-03-23 13:22:10 -04:00
|
|
|
perf_counter_update_userpage(counter);
|
2009-01-09 04:21:55 -05:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Finally record data if requested.
|
|
|
|
*/
|
2009-03-19 15:26:19 -04:00
|
|
|
if (record)
|
2009-04-09 00:42:56 -04:00
|
|
|
perf_counter_overflow(counter, nmi, regs, 0);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Performance monitor interrupt stuff
|
|
|
|
*/
|
|
|
|
static void perf_counter_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
struct perf_counter *counter;
|
|
|
|
long val;
|
2009-03-30 13:07:02 -04:00
|
|
|
int found = 0;
|
2009-04-09 00:42:56 -04:00
|
|
|
int nmi;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If interrupts were soft-disabled when this PMU interrupt
|
|
|
|
* occurred, treat it as an NMI.
|
|
|
|
*/
|
|
|
|
nmi = !regs->softe;
|
|
|
|
if (nmi)
|
|
|
|
nmi_enter();
|
|
|
|
else
|
|
|
|
irq_enter();
|
2009-01-09 04:21:55 -05:00
|
|
|
|
|
|
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
|
|
|
counter = cpuhw->counter[i];
|
|
|
|
val = read_pmc(counter->hw.idx);
|
|
|
|
if ((int)val < 0) {
|
|
|
|
/* counter has overflowed */
|
|
|
|
found = 1;
|
2009-04-09 00:42:56 -04:00
|
|
|
record_and_restart(counter, val, regs, nmi);
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In case we didn't find and reset the counter that caused
|
|
|
|
* the interrupt, scan all counters and reset any that are
|
|
|
|
* negative, to avoid getting continual interrupts.
|
|
|
|
* Any that we processed in the previous loop will not be negative.
|
|
|
|
*/
|
|
|
|
if (!found) {
|
|
|
|
for (i = 0; i < ppmu->n_counter; ++i) {
|
|
|
|
val = read_pmc(i + 1);
|
|
|
|
if ((int)val < 0)
|
|
|
|
write_pmc(i + 1, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset MMCR0 to its normal value. This will set PMXE and
|
|
|
|
* clear FC (freeze counters) and PMAO (perf mon alert occurred)
|
|
|
|
* and thus allow interrupts to occur again.
|
|
|
|
* XXX might want to use MSR.PM to keep the counters frozen until
|
|
|
|
* we get back out of this interrupt.
|
|
|
|
*/
|
|
|
|
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
|
|
|
|
|
2009-04-09 00:42:56 -04:00
|
|
|
if (nmi)
|
|
|
|
nmi_exit();
|
|
|
|
else
|
2009-03-19 15:26:20 -04:00
|
|
|
irq_exit();
|
2009-01-09 04:21:55 -05:00
|
|
|
}
|
|
|
|
|
2009-01-13 21:44:19 -05:00
|
|
|
void hw_perf_counter_setup(int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
|
|
|
|
|
|
|
|
memset(cpuhw, 0, sizeof(*cpuhw));
|
|
|
|
cpuhw->mmcr[0] = MMCR0_FC;
|
|
|
|
}
|
|
|
|
|
2009-03-06 00:30:52 -05:00
|
|
|
extern struct power_pmu power4_pmu;
|
2009-01-10 00:34:07 -05:00
|
|
|
extern struct power_pmu ppc970_pmu;
|
2009-02-23 19:33:56 -05:00
|
|
|
extern struct power_pmu power5_pmu;
|
2009-03-06 00:27:10 -05:00
|
|
|
extern struct power_pmu power5p_pmu;
|
2009-01-09 05:05:35 -05:00
|
|
|
extern struct power_pmu power6_pmu;
|
2009-01-10 00:34:07 -05:00
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
static int init_perf_counters(void)
|
|
|
|
{
|
2009-01-10 00:34:07 -05:00
|
|
|
unsigned long pvr;
|
|
|
|
|
|
|
|
/* XXX should get this from cputable */
|
|
|
|
pvr = mfspr(SPRN_PVR);
|
|
|
|
switch (PVR_VER(pvr)) {
|
2009-03-06 00:30:52 -05:00
|
|
|
case PV_POWER4:
|
|
|
|
case PV_POWER4p:
|
|
|
|
ppmu = &power4_pmu;
|
|
|
|
break;
|
2009-01-10 00:34:07 -05:00
|
|
|
case PV_970:
|
|
|
|
case PV_970FX:
|
|
|
|
case PV_970MP:
|
|
|
|
ppmu = &ppc970_pmu;
|
|
|
|
break;
|
2009-02-23 19:33:56 -05:00
|
|
|
case PV_POWER5:
|
|
|
|
ppmu = &power5_pmu;
|
|
|
|
break;
|
2009-03-06 00:27:10 -05:00
|
|
|
case PV_POWER5p:
|
|
|
|
ppmu = &power5p_pmu;
|
|
|
|
break;
|
2009-01-09 05:05:35 -05:00
|
|
|
case 0x3e:
|
|
|
|
ppmu = &power6_pmu;
|
|
|
|
break;
|
2009-01-10 00:34:07 -05:00
|
|
|
}
|
2009-02-23 07:01:28 -05:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use FCHV to ignore kernel events if MSR.HV is set.
|
|
|
|
*/
|
|
|
|
if (mfmsr() & MSR_HV)
|
|
|
|
freeze_counters_kernel = MMCR0_FCHV;
|
|
|
|
|
2009-01-09 04:21:55 -05:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_initcall(init_perf_counters);
|