2008-12-04 14:12:29 -05:00
|
|
|
/*
|
|
|
|
* Performance counters:
|
|
|
|
*
|
2009-06-11 08:44:26 -04:00
|
|
|
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
|
|
|
|
* Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
|
2008-12-04 14:12:29 -05:00
|
|
|
*
|
|
|
|
* Data type definitions, declarations, prototypes.
|
|
|
|
*
|
2009-06-11 08:44:26 -04:00
|
|
|
* Started by: Thomas Gleixner and Ingo Molnar
|
2008-12-04 14:12:29 -05:00
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_PERF_COUNTER_H
|
|
|
|
#define _LINUX_PERF_COUNTER_H
|
|
|
|
|
2009-02-26 06:43:46 -05:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/ioctl.h>
|
2009-03-21 00:31:47 -04:00
|
|
|
#include <asm/byteorder.h>
|
2008-12-04 14:12:29 -05:00
|
|
|
|
|
|
|
/*
|
2008-12-10 06:33:23 -05:00
|
|
|
* User-space ABI bits:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2009-06-02 13:22:16 -04:00
|
|
|
* attr.type
|
2008-12-04 14:12:29 -05:00
|
|
|
*/
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_type_id {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_TYPE_HARDWARE = 0,
|
|
|
|
PERF_TYPE_SOFTWARE = 1,
|
|
|
|
PERF_TYPE_TRACEPOINT = 2,
|
|
|
|
PERF_TYPE_HW_CACHE = 3,
|
|
|
|
PERF_TYPE_RAW = 4,
|
2009-03-19 15:26:18 -04:00
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_TYPE_MAX, /* non-ABI */
|
2009-03-19 15:26:18 -04:00
|
|
|
};
|
2008-12-14 06:34:15 -05:00
|
|
|
|
2009-03-19 15:26:18 -04:00
|
|
|
/*
|
2009-06-11 08:44:26 -04:00
|
|
|
* Generalized performance counter event types, used by the
|
|
|
|
* attr.event_id parameter of the sys_perf_counter_open()
|
|
|
|
* syscall:
|
2009-03-19 15:26:18 -04:00
|
|
|
*/
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_hw_id {
|
2008-12-10 06:33:23 -05:00
|
|
|
/*
|
2009-03-19 15:26:18 -04:00
|
|
|
* Common hardware events, generalized by the kernel:
|
2008-12-10 06:33:23 -05:00
|
|
|
*/
|
2009-06-11 08:06:28 -04:00
|
|
|
PERF_COUNT_HW_CPU_CYCLES = 0,
|
|
|
|
PERF_COUNT_HW_INSTRUCTIONS = 1,
|
|
|
|
PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
|
|
|
PERF_COUNT_HW_CACHE_MISSES = 3,
|
|
|
|
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
|
|
|
|
PERF_COUNT_HW_BRANCH_MISSES = 5,
|
|
|
|
PERF_COUNT_HW_BUS_CYCLES = 6,
|
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_HW_MAX, /* non-ABI */
|
2009-03-19 15:26:18 -04:00
|
|
|
};
|
2009-03-19 15:26:17 -04:00
|
|
|
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
/*
|
|
|
|
* Generalized hardware cache counters:
|
|
|
|
*
|
2009-06-11 08:19:11 -04:00
|
|
|
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
* { read, write, prefetch } x
|
|
|
|
* { accesses, misses }
|
|
|
|
*/
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_hw_cache_id {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_HW_CACHE_L1D = 0,
|
|
|
|
PERF_COUNT_HW_CACHE_L1I = 1,
|
|
|
|
PERF_COUNT_HW_CACHE_LL = 2,
|
|
|
|
PERF_COUNT_HW_CACHE_DTLB = 3,
|
|
|
|
PERF_COUNT_HW_CACHE_ITLB = 4,
|
|
|
|
PERF_COUNT_HW_CACHE_BPU = 5,
|
|
|
|
|
|
|
|
PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
};
|
|
|
|
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_hw_cache_op_id {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_HW_CACHE_OP_READ = 0,
|
|
|
|
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
|
|
|
|
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
};
|
|
|
|
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_hw_cache_op_result_id {
|
|
|
|
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
|
|
|
|
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-05 14:22:46 -04:00
|
|
|
};
|
|
|
|
|
2009-03-19 15:26:18 -04:00
|
|
|
/*
|
|
|
|
* Special "software" counters provided by the kernel, even if the hardware
|
|
|
|
* does not support performance counters. These counters measure various
|
|
|
|
* physical and sw events of the kernel (and allow the profiling of them as
|
|
|
|
* well):
|
|
|
|
*/
|
2009-06-11 07:19:29 -04:00
|
|
|
enum perf_sw_ids {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_COUNT_SW_CPU_CLOCK = 0,
|
|
|
|
PERF_COUNT_SW_TASK_CLOCK = 1,
|
|
|
|
PERF_COUNT_SW_PAGE_FAULTS = 2,
|
|
|
|
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
|
|
|
|
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
|
|
|
|
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
|
|
|
|
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
|
|
|
|
|
|
|
|
PERF_COUNT_SW_MAX, /* non-ABI */
|
2008-12-04 14:12:29 -05:00
|
|
|
};
|
|
|
|
|
2009-04-02 05:11:59 -04:00
|
|
|
/*
|
2009-06-02 13:22:16 -04:00
|
|
|
* Bits that can be set in attr.sample_type to request information
|
2009-04-02 05:11:59 -04:00
|
|
|
* in the overflow packets.
|
|
|
|
*/
|
2009-06-02 09:13:03 -04:00
|
|
|
enum perf_counter_sample_format {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_SAMPLE_IP = 1U << 0,
|
|
|
|
PERF_SAMPLE_TID = 1U << 1,
|
|
|
|
PERF_SAMPLE_TIME = 1U << 2,
|
|
|
|
PERF_SAMPLE_ADDR = 1U << 3,
|
2009-08-13 05:47:53 -04:00
|
|
|
PERF_SAMPLE_READ = 1U << 4,
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_SAMPLE_CALLCHAIN = 1U << 5,
|
|
|
|
PERF_SAMPLE_ID = 1U << 6,
|
|
|
|
PERF_SAMPLE_CPU = 1U << 7,
|
|
|
|
PERF_SAMPLE_PERIOD = 1U << 8,
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 07:19:40 -04:00
|
|
|
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
2009-08-07 22:26:37 -04:00
|
|
|
PERF_SAMPLE_RAW = 1U << 10,
|
2009-06-12 06:46:55 -04:00
|
|
|
|
2009-08-06 19:25:54 -04:00
|
|
|
PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
|
2009-04-02 05:11:59 -04:00
|
|
|
};
|
|
|
|
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
/*
|
2009-08-13 05:47:53 -04:00
|
|
|
* The format of the data returned by read() on a perf counter fd,
|
|
|
|
* as specified by attr.read_format:
|
|
|
|
*
|
|
|
|
* struct read_format {
|
|
|
|
* { u64 value;
|
|
|
|
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
|
|
|
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
|
|
|
* { u64 id; } && PERF_FORMAT_ID
|
|
|
|
* } && !PERF_FORMAT_GROUP
|
|
|
|
*
|
|
|
|
* { u64 nr;
|
|
|
|
* { u64 time_enabled; } && PERF_FORMAT_ENABLED
|
|
|
|
* { u64 time_running; } && PERF_FORMAT_RUNNING
|
|
|
|
* { u64 value;
|
|
|
|
* { u64 id; } && PERF_FORMAT_ID
|
|
|
|
* } cntr[nr];
|
|
|
|
* } && PERF_FORMAT_GROUP
|
|
|
|
* };
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
*/
|
|
|
|
enum perf_counter_read_format {
|
2009-06-11 08:44:26 -04:00
|
|
|
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
|
|
|
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
|
|
|
PERF_FORMAT_ID = 1U << 2,
|
2009-08-13 05:47:53 -04:00
|
|
|
PERF_FORMAT_GROUP = 1U << 3,
|
2009-06-12 06:46:55 -04:00
|
|
|
|
2009-08-13 05:47:53 -04:00
|
|
|
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
};
|
|
|
|
|
2009-06-12 06:46:55 -04:00
|
|
|
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
|
|
|
|
2008-12-10 06:33:23 -05:00
|
|
|
/*
|
|
|
|
* Hardware event to monitor via a performance monitoring counter:
|
|
|
|
*/
|
2009-06-02 13:22:16 -04:00
|
|
|
struct perf_counter_attr {
|
2009-06-12 06:46:55 -04:00
|
|
|
|
2009-03-23 13:22:06 -04:00
|
|
|
/*
|
2009-06-06 03:58:57 -04:00
|
|
|
* Major type: hardware/software/tracepoint/etc.
|
|
|
|
*/
|
|
|
|
__u32 type;
|
2009-06-12 06:46:55 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Size of the attr structure, for fwd/bwd compat.
|
|
|
|
*/
|
|
|
|
__u32 size;
|
2009-06-06 03:58:57 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Type specific configuration information.
|
2009-03-23 13:22:06 -04:00
|
|
|
*/
|
|
|
|
__u64 config;
|
2008-12-10 06:33:23 -05:00
|
|
|
|
2009-05-15 09:19:28 -04:00
|
|
|
union {
|
2009-06-02 09:13:03 -04:00
|
|
|
__u64 sample_period;
|
|
|
|
__u64 sample_freq;
|
2009-05-15 09:19:28 -04:00
|
|
|
};
|
|
|
|
|
2009-06-02 09:13:03 -04:00
|
|
|
__u64 sample_type;
|
|
|
|
__u64 read_format;
|
2008-12-10 06:33:23 -05:00
|
|
|
|
2009-03-04 04:36:51 -05:00
|
|
|
__u64 disabled : 1, /* off by default */
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
inherit : 1, /* children inherit it */
|
|
|
|
pinned : 1, /* must always be on PMU */
|
|
|
|
exclusive : 1, /* only group on PMU */
|
|
|
|
exclude_user : 1, /* don't count user */
|
|
|
|
exclude_kernel : 1, /* ditto kernel */
|
|
|
|
exclude_hv : 1, /* ditto hypervisor */
|
2009-03-04 04:36:51 -05:00
|
|
|
exclude_idle : 1, /* don't count when idle */
|
2009-03-30 13:07:05 -04:00
|
|
|
mmap : 1, /* include mmap data */
|
2009-04-08 09:01:30 -04:00
|
|
|
comm : 1, /* include comm data */
|
2009-05-15 09:19:28 -04:00
|
|
|
freq : 1, /* use freq, not period */
|
2009-06-24 15:11:59 -04:00
|
|
|
inherit_stat : 1, /* per task counts */
|
2009-06-30 02:07:19 -04:00
|
|
|
enable_on_exec : 1, /* next exec enables */
|
2009-07-23 08:46:33 -04:00
|
|
|
task : 1, /* trace fork/exit */
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-10 22:35:35 -05:00
|
|
|
|
2009-07-23 08:46:33 -04:00
|
|
|
__reserved_1 : 50;
|
2009-03-04 04:36:51 -05:00
|
|
|
|
2009-04-02 05:12:01 -04:00
|
|
|
__u32 wakeup_events; /* wakeup every n events */
|
2009-06-12 06:46:55 -04:00
|
|
|
__u32 __reserved_2;
|
2008-12-10 06:33:23 -05:00
|
|
|
|
2009-06-12 06:46:55 -04:00
|
|
|
__u64 __reserved_3;
|
2008-12-08 13:26:59 -05:00
|
|
|
};
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 02:10:22 -05:00
|
|
|
/*
|
|
|
|
* Ioctls that can be done on a perf counter fd:
|
|
|
|
*/
|
2009-06-02 10:46:57 -04:00
|
|
|
#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
|
|
|
|
#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
|
|
|
|
#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
|
|
|
|
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
|
|
|
|
#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
|
2009-08-19 05:18:27 -04:00
|
|
|
#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
|
2009-05-08 12:52:22 -04:00
|
|
|
|
|
|
|
enum perf_counter_ioc_flags {
|
|
|
|
PERF_IOC_FLAG_GROUP = 1U << 0,
|
|
|
|
};
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 02:10:22 -05:00
|
|
|
|
2009-03-23 13:22:08 -04:00
|
|
|
/*
|
|
|
|
* Structure of the page that can be mapped via mmap
|
|
|
|
*/
|
|
|
|
struct perf_counter_mmap_page {
|
|
|
|
__u32 version; /* version number of this structure */
|
|
|
|
__u32 compat_version; /* lowest version this is compat with */
|
2009-03-30 13:07:03 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bits needed to read the hw counters in user-space.
|
|
|
|
*
|
2009-04-02 05:12:04 -04:00
|
|
|
* u32 seq;
|
|
|
|
* s64 count;
|
2009-03-30 13:07:03 -04:00
|
|
|
*
|
2009-04-06 05:44:59 -04:00
|
|
|
* do {
|
|
|
|
* seq = pc->lock;
|
2009-03-30 13:07:03 -04:00
|
|
|
*
|
2009-04-06 05:44:59 -04:00
|
|
|
* barrier()
|
|
|
|
* if (pc->index) {
|
|
|
|
* count = pmc_read(pc->index - 1);
|
|
|
|
* count += pc->offset;
|
|
|
|
* } else
|
|
|
|
* goto regular_read;
|
2009-03-30 13:07:03 -04:00
|
|
|
*
|
2009-04-06 05:44:59 -04:00
|
|
|
* barrier();
|
|
|
|
* } while (pc->lock != seq);
|
2009-03-30 13:07:03 -04:00
|
|
|
*
|
2009-04-02 05:12:04 -04:00
|
|
|
* NOTE: for obvious reason this only works on self-monitoring
|
|
|
|
* processes.
|
2009-03-30 13:07:03 -04:00
|
|
|
*/
|
2009-03-23 13:22:08 -04:00
|
|
|
__u32 lock; /* seqlock for synchronization */
|
|
|
|
__u32 index; /* hardware counter identifier */
|
|
|
|
__s64 offset; /* add to hardware counter value */
|
2009-06-22 08:34:35 -04:00
|
|
|
__u64 time_enabled; /* time counter active */
|
|
|
|
__u64 time_running; /* time counter on cpu */
|
2009-03-23 13:22:10 -04:00
|
|
|
|
2009-06-23 11:55:18 -04:00
|
|
|
/*
|
|
|
|
* Hole for extension of the self monitor capabilities
|
|
|
|
*/
|
|
|
|
|
2009-06-22 08:34:35 -04:00
|
|
|
__u64 __reserved[123]; /* align to 1k */
|
2009-06-23 11:55:18 -04:00
|
|
|
|
2009-03-30 13:07:03 -04:00
|
|
|
/*
|
|
|
|
* Control data for the mmap() data buffer.
|
|
|
|
*
|
2009-03-25 14:39:37 -04:00
|
|
|
* User-space reading the @data_head value should issue an rmb(), on
|
|
|
|
* SMP capable platforms, after reading this value -- see
|
|
|
|
* perf_counter_wakeup().
|
|
|
|
*
|
|
|
|
* When the mapping is PROT_WRITE the @data_tail value should be
|
|
|
|
* written by userspace to reflect the last read data. In this case
|
|
|
|
* the kernel will not over-write unread data.
|
2009-03-30 13:07:03 -04:00
|
|
|
*/
|
2009-06-02 10:16:02 -04:00
|
|
|
__u64 data_head; /* head in the data section */
|
2009-03-25 14:39:37 -04:00
|
|
|
__u64 data_tail; /* user-space written tail */
|
2009-03-23 13:22:08 -04:00
|
|
|
};
|
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
|
|
|
|
#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
|
|
|
|
#define PERF_EVENT_MISC_KERNEL (1 << 0)
|
|
|
|
#define PERF_EVENT_MISC_USER (2 << 0)
|
|
|
|
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
|
2009-04-08 09:01:26 -04:00
|
|
|
|
2009-03-25 07:30:23 -04:00
|
|
|
struct perf_event_header {
|
|
|
|
__u32 type;
|
2009-04-08 09:01:26 -04:00
|
|
|
__u16 misc;
|
|
|
|
__u16 size;
|
2009-03-25 07:30:23 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
enum perf_event_type {
|
2009-03-30 13:07:12 -04:00
|
|
|
|
2009-04-06 05:45:08 -04:00
|
|
|
/*
|
|
|
|
* The MMAP events record the PROT_EXEC mappings so that we can
|
|
|
|
* correlate userspace IPs to code. They have the following structure:
|
|
|
|
*
|
|
|
|
* struct {
|
2009-05-25 16:03:26 -04:00
|
|
|
* struct perf_event_header header;
|
2009-04-06 05:45:08 -04:00
|
|
|
*
|
2009-05-25 16:03:26 -04:00
|
|
|
* u32 pid, tid;
|
|
|
|
* u64 addr;
|
|
|
|
* u64 len;
|
|
|
|
* u64 pgoff;
|
|
|
|
* char filename[];
|
2009-04-06 05:45:08 -04:00
|
|
|
* };
|
|
|
|
*/
|
2009-04-02 05:11:59 -04:00
|
|
|
PERF_EVENT_MMAP = 1,
|
2009-03-30 13:07:05 -04:00
|
|
|
|
2009-03-25 14:39:37 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
|
|
|
* struct perf_event_header header;
|
|
|
|
* u64 id;
|
|
|
|
* u64 lost;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_LOST = 2,
|
|
|
|
|
2009-04-08 09:01:30 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
2009-05-25 16:03:26 -04:00
|
|
|
* struct perf_event_header header;
|
2009-04-08 09:01:30 -04:00
|
|
|
*
|
2009-05-25 16:03:26 -04:00
|
|
|
* u32 pid, tid;
|
|
|
|
* char comm[];
|
2009-04-08 09:01:30 -04:00
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_COMM = 3,
|
|
|
|
|
2009-07-23 08:46:33 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
|
|
|
* struct perf_event_header header;
|
|
|
|
* u32 pid, ppid;
|
|
|
|
* u32 tid, ptid;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_EXIT = 4,
|
|
|
|
|
2009-05-20 06:21:20 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
2009-05-25 16:03:26 -04:00
|
|
|
* struct perf_event_header header;
|
|
|
|
* u64 time;
|
2009-06-05 09:05:43 -04:00
|
|
|
* u64 id;
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 07:19:40 -04:00
|
|
|
* u64 stream_id;
|
2009-05-25 11:39:05 -04:00
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_THROTTLE = 5,
|
|
|
|
PERF_EVENT_UNTHROTTLE = 6,
|
|
|
|
|
2009-06-04 10:53:44 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
2009-06-06 03:58:57 -04:00
|
|
|
* struct perf_event_header header;
|
|
|
|
* u32 pid, ppid;
|
2009-07-23 08:46:33 -04:00
|
|
|
* u32 tid, ptid;
|
2009-06-04 10:53:44 -04:00
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_FORK = 7,
|
|
|
|
|
2009-06-23 14:13:11 -04:00
|
|
|
/*
|
|
|
|
* struct {
|
|
|
|
* struct perf_event_header header;
|
|
|
|
* u32 pid, tid;
|
2009-08-13 05:47:53 -04:00
|
|
|
*
|
|
|
|
* struct read_format values;
|
2009-06-23 14:13:11 -04:00
|
|
|
* };
|
|
|
|
*/
|
|
|
|
PERF_EVENT_READ = 8,
|
|
|
|
|
2009-04-02 05:11:59 -04:00
|
|
|
/*
|
2009-04-06 05:45:08 -04:00
|
|
|
* struct {
|
2009-05-25 16:03:26 -04:00
|
|
|
* struct perf_event_header header;
|
2009-04-06 05:45:08 -04:00
|
|
|
*
|
2009-03-25 14:39:37 -04:00
|
|
|
* { u64 ip; } && PERF_SAMPLE_IP
|
|
|
|
* { u32 pid, tid; } && PERF_SAMPLE_TID
|
|
|
|
* { u64 time; } && PERF_SAMPLE_TIME
|
|
|
|
* { u64 addr; } && PERF_SAMPLE_ADDR
|
2009-06-25 05:27:12 -04:00
|
|
|
* { u64 id; } && PERF_SAMPLE_ID
|
perf_counter: PERF_SAMPLE_ID and inherited counters
Anton noted that for inherited counters the counter-id as provided by
PERF_SAMPLE_ID isn't mappable to the id found through PERF_RECORD_ID
because each inherited counter gets its own id.
His suggestion was to always return the parent counter id, since that
is the primary counter id as exposed. However, these inherited
counters have a unique identifier so that events like
PERF_EVENT_PERIOD and PERF_EVENT_THROTTLE can be specific about which
counter gets modified, which is important when trying to normalize the
sample streams.
This patch removes PERF_EVENT_PERIOD in favour of PERF_SAMPLE_PERIOD,
which is more useful anyway, since changing periods became a lot more
common than initially thought -- rendering PERF_EVENT_PERIOD the less
useful solution (also, PERF_SAMPLE_PERIOD reports the more accurate
value, since it reports the value used to trigger the overflow,
whereas PERF_EVENT_PERIOD simply reports the requested period changed,
which might only take effect on the next cycle).
This still leaves us PERF_EVENT_THROTTLE to consider, but since that
_should_ be a rare occurrence, and linking it to a primary id is the
most useful bit to diagnose the problem, we introduce a
PERF_SAMPLE_STREAM_ID, for those few cases where the full
reconstruction is important.
[Does change the ABI a little, but I see no other way out]
Suggested-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1248095846.15751.8781.camel@twins>
2009-07-21 07:19:40 -04:00
|
|
|
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
2009-03-25 14:39:37 -04:00
|
|
|
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
2009-06-25 05:27:12 -04:00
|
|
|
* { u64 period; } && PERF_SAMPLE_PERIOD
|
2009-04-06 05:45:08 -04:00
|
|
|
*
|
2009-08-13 05:47:53 -04:00
|
|
|
* { struct read_format values; } && PERF_SAMPLE_READ
|
2009-04-06 05:45:08 -04:00
|
|
|
*
|
2009-06-18 16:20:52 -04:00
|
|
|
* { u64 nr,
|
2009-03-25 14:39:37 -04:00
|
|
|
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
2009-08-13 05:47:53 -04:00
|
|
|
*
|
|
|
|
* #
|
|
|
|
* # The RAW record below is opaque data wrt the ABI
|
|
|
|
* #
|
|
|
|
* # That is, the ABI doesn't make any promises wrt to
|
|
|
|
* # the stability of its content, it may vary depending
|
|
|
|
* # on event, hardware, kernel version and phase of
|
|
|
|
* # the moon.
|
|
|
|
* #
|
|
|
|
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
|
|
|
* #
|
|
|
|
*
|
2009-08-10 05:16:52 -04:00
|
|
|
* { u32 size;
|
|
|
|
* char data[size];}&& PERF_SAMPLE_RAW
|
2009-04-06 05:45:08 -04:00
|
|
|
* };
|
2009-04-02 05:11:59 -04:00
|
|
|
*/
|
2009-06-25 05:27:12 -04:00
|
|
|
PERF_EVENT_SAMPLE = 9,
|
|
|
|
|
|
|
|
PERF_EVENT_MAX, /* non-ABI */
|
2009-03-25 07:30:23 -04:00
|
|
|
};
|
|
|
|
|
2009-06-18 16:20:52 -04:00
|
|
|
enum perf_callchain_context {
|
|
|
|
PERF_CONTEXT_HV = (__u64)-32,
|
|
|
|
PERF_CONTEXT_KERNEL = (__u64)-128,
|
|
|
|
PERF_CONTEXT_USER = (__u64)-512,
|
2009-06-18 02:00:17 -04:00
|
|
|
|
2009-06-18 16:20:52 -04:00
|
|
|
PERF_CONTEXT_GUEST = (__u64)-2048,
|
|
|
|
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
|
|
|
|
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
|
|
|
|
|
|
|
|
PERF_CONTEXT_MAX = (__u64)-4095,
|
2009-06-18 02:00:17 -04:00
|
|
|
};
|
|
|
|
|
2009-08-19 05:18:27 -04:00
|
|
|
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
|
|
|
|
#define PERF_FLAG_FD_OUTPUT (1U << 1)
|
|
|
|
|
2009-02-26 06:43:46 -05:00
|
|
|
#ifdef __KERNEL__
|
2008-12-10 06:33:23 -05:00
|
|
|
/*
|
2009-02-26 06:43:46 -05:00
|
|
|
* Kernel-internal data types and definitions:
|
2008-12-10 06:33:23 -05:00
|
|
|
*/
|
|
|
|
|
2009-02-26 06:43:46 -05:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
# include <asm/perf_counter.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/spinlock.h>
|
2009-03-13 07:21:35 -04:00
|
|
|
#include <linux/hrtimer.h>
|
2009-04-06 05:45:01 -04:00
|
|
|
#include <linux/fs.h>
|
2009-06-02 08:13:15 -04:00
|
|
|
#include <linux/pid_namespace.h>
|
2009-02-26 06:43:46 -05:00
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
2009-06-18 16:20:52 -04:00
|
|
|
#define PERF_MAX_STACK_DEPTH 255
|
|
|
|
|
|
|
|
struct perf_callchain_entry {
|
|
|
|
__u64 nr;
|
|
|
|
__u64 ip[PERF_MAX_STACK_DEPTH];
|
|
|
|
};
|
|
|
|
|
2009-08-07 22:26:37 -04:00
|
|
|
struct perf_raw_record {
|
|
|
|
u32 size;
|
|
|
|
void *data;
|
2009-08-06 19:25:54 -04:00
|
|
|
};
|
|
|
|
|
2009-02-26 06:43:46 -05:00
|
|
|
struct task_struct;
|
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
/**
|
2008-12-10 06:33:23 -05:00
|
|
|
* struct hw_perf_counter - performance counter hardware details:
|
2008-12-04 14:12:29 -05:00
|
|
|
*/
|
|
|
|
struct hw_perf_counter {
|
2008-12-13 03:00:03 -05:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2009-03-13 07:21:35 -04:00
|
|
|
union {
|
|
|
|
struct { /* hardware */
|
2009-06-11 08:44:26 -04:00
|
|
|
u64 config;
|
|
|
|
unsigned long config_base;
|
|
|
|
unsigned long counter_base;
|
|
|
|
int idx;
|
2009-03-13 07:21:35 -04:00
|
|
|
};
|
|
|
|
union { /* software */
|
2009-06-11 08:44:26 -04:00
|
|
|
atomic64_t count;
|
|
|
|
struct hrtimer hrtimer;
|
2009-03-13 07:21:35 -04:00
|
|
|
};
|
|
|
|
};
|
2008-12-13 03:00:03 -05:00
|
|
|
atomic64_t prev_count;
|
2009-06-02 09:13:03 -04:00
|
|
|
u64 sample_period;
|
2009-06-10 15:34:59 -04:00
|
|
|
u64 last_period;
|
2008-12-13 03:00:03 -05:00
|
|
|
atomic64_t period_left;
|
2009-05-15 09:19:28 -04:00
|
|
|
u64 interrupts;
|
2009-06-05 12:01:29 -04:00
|
|
|
|
|
|
|
u64 freq_count;
|
|
|
|
u64 freq_interrupts;
|
2009-06-10 07:40:57 -04:00
|
|
|
u64 freq_stamp;
|
2008-12-13 03:00:03 -05:00
|
|
|
#endif
|
2008-12-04 14:12:29 -05:00
|
|
|
};
|
|
|
|
|
2008-12-11 06:46:46 -05:00
|
|
|
struct perf_counter;
|
|
|
|
|
|
|
|
/**
|
2009-04-29 06:47:03 -04:00
|
|
|
* struct pmu - generic performance monitoring unit
|
2008-12-11 06:46:46 -05:00
|
|
|
*/
|
2009-04-29 06:47:03 -04:00
|
|
|
struct pmu {
|
2008-12-21 07:50:42 -05:00
|
|
|
int (*enable) (struct perf_counter *counter);
|
2008-12-17 08:20:28 -05:00
|
|
|
void (*disable) (struct perf_counter *counter);
|
|
|
|
void (*read) (struct perf_counter *counter);
|
2009-05-25 11:39:05 -04:00
|
|
|
void (*unthrottle) (struct perf_counter *counter);
|
2008-12-11 06:46:46 -05:00
|
|
|
};
|
|
|
|
|
2008-12-11 09:17:03 -05:00
|
|
|
/**
|
|
|
|
* enum perf_counter_active_state - the states of a counter
|
|
|
|
*/
|
|
|
|
enum perf_counter_active_state {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 05:00:30 -05:00
|
|
|
PERF_COUNTER_STATE_ERROR = -2,
|
2008-12-11 09:17:03 -05:00
|
|
|
PERF_COUNTER_STATE_OFF = -1,
|
|
|
|
PERF_COUNTER_STATE_INACTIVE = 0,
|
|
|
|
PERF_COUNTER_STATE_ACTIVE = 1,
|
|
|
|
};
|
|
|
|
|
2008-12-12 07:49:45 -05:00
|
|
|
struct file;
|
|
|
|
|
2009-03-23 13:22:10 -04:00
|
|
|
struct perf_mmap_data {
|
|
|
|
struct rcu_head rcu_head;
|
2009-04-08 09:01:29 -04:00
|
|
|
int nr_pages; /* nr of data pages */
|
2009-03-25 14:39:37 -04:00
|
|
|
int writable; /* are we writable */
|
2009-05-05 11:50:24 -04:00
|
|
|
int nr_locked; /* nr pages mlocked */
|
2009-04-08 09:01:29 -04:00
|
|
|
|
2009-05-01 06:23:16 -04:00
|
|
|
atomic_t poll; /* POLL_ for wakeups */
|
2009-04-08 09:01:29 -04:00
|
|
|
atomic_t events; /* event limit */
|
|
|
|
|
2009-06-02 10:16:02 -04:00
|
|
|
atomic_long_t head; /* write position */
|
|
|
|
atomic_long_t done_head; /* completed head */
|
|
|
|
|
2009-05-01 06:23:16 -04:00
|
|
|
atomic_t lock; /* concurrent writes */
|
2009-05-05 11:50:22 -04:00
|
|
|
atomic_t wakeup; /* needs a wakeup */
|
2009-03-25 14:39:37 -04:00
|
|
|
atomic_t lost; /* nr records lost */
|
2009-05-05 11:50:22 -04:00
|
|
|
|
2009-03-23 13:22:10 -04:00
|
|
|
struct perf_counter_mmap_page *user_page;
|
2009-05-25 16:03:26 -04:00
|
|
|
void *data_pages[0];
|
2009-03-23 13:22:10 -04:00
|
|
|
};
|
|
|
|
|
2009-04-06 05:45:02 -04:00
|
|
|
struct perf_pending_entry {
|
|
|
|
struct perf_pending_entry *next;
|
|
|
|
void (*func)(struct perf_pending_entry *);
|
2009-03-30 13:07:02 -04:00
|
|
|
};
|
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
/**
|
|
|
|
* struct perf_counter - performance counter kernel representation:
|
|
|
|
*/
|
|
|
|
struct perf_counter {
|
2008-12-13 03:00:03 -05:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-11 02:38:42 -05:00
|
|
|
struct list_head list_entry;
|
2009-03-13 07:21:36 -04:00
|
|
|
struct list_head event_entry;
|
2008-12-11 02:38:42 -05:00
|
|
|
struct list_head sibling_list;
|
2009-05-25 16:03:26 -04:00
|
|
|
int nr_siblings;
|
2008-12-11 02:38:42 -05:00
|
|
|
struct perf_counter *group_leader;
|
2009-08-19 05:18:27 -04:00
|
|
|
struct perf_counter *output;
|
2009-04-29 06:47:03 -04:00
|
|
|
const struct pmu *pmu;
|
2008-12-11 02:38:42 -05:00
|
|
|
|
2008-12-11 09:17:03 -05:00
|
|
|
enum perf_counter_active_state state;
|
2008-12-04 14:12:29 -05:00
|
|
|
atomic64_t count;
|
2008-12-13 03:00:03 -05:00
|
|
|
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
/*
|
|
|
|
* These are the total time in nanoseconds that the counter
|
|
|
|
* has been enabled (i.e. eligible to run, and the task has
|
|
|
|
* been scheduled in, if this is a per-task counter)
|
|
|
|
* and running (scheduled onto the CPU), respectively.
|
|
|
|
*
|
|
|
|
* They are computed from tstamp_enabled, tstamp_running and
|
|
|
|
* tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
|
|
|
|
*/
|
|
|
|
u64 total_time_enabled;
|
|
|
|
u64 total_time_running;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are timestamps used for computing total_time_enabled
|
|
|
|
* and total_time_running when the counter is in INACTIVE or
|
|
|
|
* ACTIVE state, measured in nanoseconds from an arbitrary point
|
|
|
|
* in time.
|
|
|
|
* tstamp_enabled: the notional time when the counter was enabled
|
|
|
|
* tstamp_running: the notional time when the counter was scheduled on
|
|
|
|
* tstamp_stopped: in INACTIVE state, the notional time when the
|
|
|
|
* counter was scheduled off.
|
|
|
|
*/
|
|
|
|
u64 tstamp_enabled;
|
|
|
|
u64 tstamp_running;
|
|
|
|
u64 tstamp_stopped;
|
|
|
|
|
2009-06-02 13:22:16 -04:00
|
|
|
struct perf_counter_attr attr;
|
2008-12-04 14:12:29 -05:00
|
|
|
struct hw_perf_counter hw;
|
|
|
|
|
|
|
|
struct perf_counter_context *ctx;
|
2008-12-12 07:49:45 -05:00
|
|
|
struct file *filp;
|
2008-12-04 14:12:29 -05:00
|
|
|
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
/*
|
|
|
|
* These accumulate total time (in nanoseconds) that children
|
|
|
|
* counters have been enabled and running, respectively.
|
|
|
|
*/
|
|
|
|
atomic64_t child_total_time_enabled;
|
|
|
|
atomic64_t child_total_time_running;
|
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 02:10:22 -05:00
|
|
|
* Protect attach/detach and child_list:
|
2008-12-04 14:12:29 -05:00
|
|
|
*/
|
2009-05-23 12:28:56 -04:00
|
|
|
struct mutex child_mutex;
|
|
|
|
struct list_head child_list;
|
|
|
|
struct perf_counter *parent;
|
2008-12-04 14:12:29 -05:00
|
|
|
|
|
|
|
int oncpu;
|
|
|
|
int cpu;
|
|
|
|
|
2009-05-23 12:29:00 -04:00
|
|
|
struct list_head owner_entry;
|
|
|
|
struct task_struct *owner;
|
|
|
|
|
2009-03-23 13:22:10 -04:00
|
|
|
/* mmap bits */
|
|
|
|
struct mutex mmap_mutex;
|
|
|
|
atomic_t mmap_count;
|
|
|
|
struct perf_mmap_data *data;
|
2009-03-23 13:22:08 -04:00
|
|
|
|
2009-03-23 13:22:10 -04:00
|
|
|
/* poll related */
|
2008-12-04 14:12:29 -05:00
|
|
|
wait_queue_head_t waitq;
|
2009-04-06 05:45:01 -04:00
|
|
|
struct fasync_struct *fasync;
|
2009-04-06 05:45:07 -04:00
|
|
|
|
|
|
|
/* delayed work for NMIs and such */
|
|
|
|
int pending_wakeup;
|
2009-04-06 05:45:09 -04:00
|
|
|
int pending_kill;
|
2009-04-06 05:45:07 -04:00
|
|
|
int pending_disable;
|
2009-04-06 05:45:02 -04:00
|
|
|
struct perf_pending_entry pending;
|
2009-03-13 07:21:36 -04:00
|
|
|
|
2009-04-06 05:45:07 -04:00
|
|
|
atomic_t event_limit;
|
|
|
|
|
2009-03-19 15:26:17 -04:00
|
|
|
void (*destroy)(struct perf_counter *);
|
2009-03-13 07:21:36 -04:00
|
|
|
struct rcu_head rcu_head;
|
2009-06-02 08:13:15 -04:00
|
|
|
|
|
|
|
struct pid_namespace *ns;
|
2009-06-02 09:08:15 -04:00
|
|
|
u64 id;
|
2008-12-13 03:00:03 -05:00
|
|
|
#endif
|
2008-12-04 14:12:29 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_context - counter context structure
|
|
|
|
*
|
|
|
|
* Used as a container for task counters and CPU counters as well:
|
|
|
|
*/
|
|
|
|
struct perf_counter_context {
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 02:10:22 -05:00
|
|
|
* Protect the states of the counters in the list,
|
|
|
|
* nr_active, and the list:
|
2008-12-04 14:12:29 -05:00
|
|
|
*/
|
2009-06-11 08:44:26 -04:00
|
|
|
spinlock_t lock;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 02:10:22 -05:00
|
|
|
/*
|
|
|
|
* Protect the list of counters. Locking either mutex or lock
|
|
|
|
* is sufficient to ensure the list doesn't change; to change
|
|
|
|
* the list you need to lock both the mutex and the spinlock.
|
|
|
|
*/
|
2009-06-11 08:44:26 -04:00
|
|
|
struct mutex mutex;
|
2008-12-11 02:38:42 -05:00
|
|
|
|
2009-06-11 08:44:26 -04:00
|
|
|
struct list_head counter_list;
|
|
|
|
struct list_head event_list;
|
|
|
|
int nr_counters;
|
|
|
|
int nr_active;
|
|
|
|
int is_active;
|
2009-06-24 15:11:59 -04:00
|
|
|
int nr_stat;
|
2009-06-11 08:44:26 -04:00
|
|
|
atomic_t refcount;
|
|
|
|
struct task_struct *task;
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
|
|
|
|
/*
|
2009-04-06 05:45:10 -04:00
|
|
|
* Context clock, runs when context enabled.
|
perf_counter: record time running and time enabled for each counter
Impact: new functionality
Currently, if there are more counters enabled than can fit on the CPU,
the kernel will multiplex the counters on to the hardware using
round-robin scheduling. That isn't too bad for sampling counters, but
for counting counters it means that the value read from a counter
represents some unknown fraction of the true count of events that
occurred while the counter was enabled.
This remedies the situation by keeping track of how long each counter
is enabled for, and how long it is actually on the cpu and counting
events. These times are recorded in nanoseconds using the task clock
for per-task counters and the cpu clock for per-cpu counters.
These values can be supplied to userspace on a read from the counter.
Userspace requests that they be supplied after the counter value by
setting the PERF_FORMAT_TOTAL_TIME_ENABLED and/or
PERF_FORMAT_TOTAL_TIME_RUNNING bits in the hw_event.read_format field
when creating the counter. (There is no way to change the read format
after the counter is created, though it would be possible to add some
way to do that.)
Using this information it is possible for userspace to scale the count
it reads from the counter to get an estimate of the true count:
true_count_estimate = count * total_time_enabled / total_time_running
This also lets userspace detect the situation where the counter never
got to go on the cpu: total_time_running == 0.
This functionality has been requested by the PAPI developers, and will
be generally needed for interpreting the count values from counting
counters correctly.
In the implementation, this keeps 5 time values (in nanoseconds) for
each counter: total_time_enabled and total_time_running are used when
the counter is in state OFF or ERROR and for reporting back to
userspace. When the counter is in state INACTIVE or ACTIVE, it is the
tstamp_enabled, tstamp_running and tstamp_stopped values that are
relevant, and total_time_enabled and total_time_running are determined
from them. (tstamp_stopped is only used in INACTIVE state.) The
reason for doing it like this is that it means that only counters
being enabled or disabled at sched-in and sched-out time need to be
updated. There are no new loops that iterate over all counters to
update total_time_enabled or total_time_running.
This also keeps separate child_total_time_running and
child_total_time_enabled fields that get added in when reporting the
totals to userspace. They are separate fields so that they can be
atomic. We don't want to use atomics for total_time_running,
total_time_enabled etc., because then we would have to use atomic
sequences to update them, which are slower than regular arithmetic and
memory accesses.
It is possible to measure total_time_running by adding a task_clock
counter to each group of counters, and total_time_enabled can be
measured approximately with a top-level task_clock counter (though
inaccuracies will creep in if you need to disable and enable groups
since it is not possible in general to disable/enable the top-level
task_clock counter simultaneously with another group). However, that
adds extra overhead - I measured around 15% increase in the context
switch latency reported by lat_ctx (from lmbench) when a task_clock
counter was added to each of 2 groups, and around 25% increase when a
task_clock counter was added to each of 4 groups. (In both cases a
top-level task-clock counter was also added.)
In contrast, the code added in this commit gives better information
with no overhead that I could measure (in fact in some cases I
measured lower times with this code, but the differences were all less
than one standard deviation).
[ v2: address review comments by Andrew Morton. ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Orig-LKML-Reference: <18890.6578.728637.139402@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-25 07:46:58 -04:00
|
|
|
*/
|
2009-06-11 08:44:26 -04:00
|
|
|
u64 time;
|
|
|
|
u64 timestamp;
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 00:27:22 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These fields let us detect when two contexts have both
|
|
|
|
* been cloned (inherited) from a common ancestor.
|
|
|
|
*/
|
2009-06-11 08:44:26 -04:00
|
|
|
struct perf_counter_context *parent_ctx;
|
|
|
|
u64 parent_gen;
|
|
|
|
u64 generation;
|
|
|
|
int pin_count;
|
|
|
|
struct rcu_head rcu_head;
|
2008-12-04 14:12:29 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_cpu_context - per cpu counter context structure
|
|
|
|
*/
|
|
|
|
struct perf_cpu_context {
|
|
|
|
struct perf_counter_context ctx;
|
|
|
|
struct perf_counter_context *task_ctx;
|
|
|
|
int active_oncpu;
|
|
|
|
int max_pertask;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 05:00:30 -05:00
|
|
|
int exclusive;
|
2009-03-23 13:22:07 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursion avoidance:
|
|
|
|
*
|
|
|
|
* task, softirq, irq, nmi context
|
|
|
|
*/
|
2009-06-01 04:13:37 -04:00
|
|
|
int recursion[4];
|
2008-12-04 14:12:29 -05:00
|
|
|
};
|
|
|
|
|
2009-04-29 06:46:59 -04:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
/*
|
|
|
|
* Set by architecture code:
|
|
|
|
*/
|
|
|
|
extern int perf_max_counters;
|
|
|
|
|
2009-04-29 06:47:03 -04:00
|
|
|
extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
|
2008-12-11 06:46:46 -05:00
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
perf_counter: Optimize context switch between identical inherited contexts
When monitoring a process and its descendants with a set of inherited
counters, we can often get the situation in a context switch where
both the old (outgoing) and new (incoming) process have the same set
of counters, and their values are ultimately going to be added together.
In that situation it doesn't matter which set of counters are used to
count the activity for the new process, so there is really no need to
go through the process of reading the hardware counters and updating
the old task's counters and then setting up the PMU for the new task.
This optimizes the context switch in this situation. Instead of
scheduling out the perf_counter_context for the old task and
scheduling in the new context, we simply transfer the old context
to the new task and keep using it without interruption. The new
context gets transferred to the old task. This means that both
tasks still have a valid perf_counter_context, so no special case
is introduced when the old task gets scheduled in again, either on
this CPU or another CPU.
The equivalence of contexts is detected by keeping a pointer in
each cloned context pointing to the context it was cloned from.
To cope with the situation where a context is changed by adding
or removing counters after it has been cloned, we also keep a
generation number on each context which is incremented every time
a context is changed. When a context is cloned we take a copy
of the parent's generation number, and two cloned contexts are
equivalent only if they have the same parent and the same
generation number. In order that the parent context pointer
remains valid (and is not reused), we increment the parent
context's reference count for each context cloned from it.
Since we don't have individual fds for the counters in a cloned
context, the only thing that can make two clones of a given parent
different after they have been cloned is enabling or disabling all
counters with prctl. To account for this, we keep a count of the
number of enabled counters in each context. Two contexts must have
the same number of enabled counters to be considered equivalent.
Here are some measurements of the context switch time as measured with
the lat_ctx benchmark from lmbench, comparing the times obtained with
and without this patch series:
-----Unmodified----- With this patch series
Counters: none 2 HW 4H+4S none 2 HW 4H+4S
2 processes:
Average 3.44 6.45 11.24 3.12 3.39 3.60
St dev 0.04 0.04 0.13 0.05 0.17 0.19
8 processes:
Average 6.45 8.79 14.00 5.57 6.23 7.57
St dev 1.27 1.04 0.88 1.42 1.46 1.42
32 processes:
Average 5.56 8.43 13.78 5.28 5.55 7.15
St dev 0.41 0.47 0.53 0.54 0.57 0.81
The numbers are the mean and standard deviation of 20 runs of
lat_ctx. The "none" columns are lat_ctx run directly without any
counters. The "2 HW" columns are with lat_ctx run under perfstat,
counting cycles and instructions. The "4H+4S" columns are lat_ctx run
under perfstat with 4 hardware counters and 4 software counters
(cycles, instructions, cache references, cache misses, task
clock, context switch, cpu migrations, and page faults).
[ Impact: performance optimization of counter context-switches ]
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <18966.10666.517218.332164@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-05-22 00:27:22 -04:00
|
|
|
extern void perf_counter_task_sched_out(struct task_struct *task,
|
|
|
|
struct task_struct *next, int cpu);
|
2008-12-04 14:12:29 -05:00
|
|
|
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
2009-05-25 08:45:27 -04:00
|
|
|
extern int perf_counter_init_task(struct task_struct *child);
|
2008-12-12 07:49:45 -05:00
|
|
|
extern void perf_counter_exit_task(struct task_struct *child);
|
2009-05-29 08:25:58 -04:00
|
|
|
extern void perf_counter_free_task(struct task_struct *task);
|
2009-06-15 07:45:16 -04:00
|
|
|
extern void set_perf_counter_pending(void);
|
2009-03-30 13:07:02 -04:00
|
|
|
extern void perf_counter_do_pending(void);
|
2008-12-04 14:12:29 -05:00
|
|
|
extern void perf_counter_print_debug(void);
|
2009-05-13 10:21:38 -04:00
|
|
|
extern void __perf_disable(void);
|
|
|
|
extern bool __perf_enable(void);
|
|
|
|
extern void perf_disable(void);
|
|
|
|
extern void perf_enable(void);
|
2008-12-11 08:59:31 -05:00
|
|
|
extern int perf_counter_task_disable(void);
|
|
|
|
extern int perf_counter_task_enable(void);
|
2009-01-09 00:43:42 -05:00
|
|
|
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx, int cpu);
|
2009-03-23 13:22:08 -04:00
|
|
|
extern void perf_counter_update_userpage(struct perf_counter *counter);
|
2008-12-11 07:21:10 -05:00
|
|
|
|
2009-06-10 15:02:22 -04:00
|
|
|
struct perf_sample_data {
|
2009-06-11 08:44:26 -04:00
|
|
|
struct pt_regs *regs;
|
|
|
|
u64 addr;
|
|
|
|
u64 period;
|
2009-08-07 22:26:37 -04:00
|
|
|
struct perf_raw_record *raw;
|
2009-06-10 15:02:22 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|
|
|
struct perf_sample_data *data);
|
2009-08-13 04:13:22 -04:00
|
|
|
extern void perf_counter_output(struct perf_counter *counter, int nmi,
|
|
|
|
struct perf_sample_data *data);
|
2009-06-10 15:02:22 -04:00
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 05:00:30 -05:00
|
|
|
/*
|
|
|
|
* Return 1 for a software counter, 0 for a hardware counter
|
|
|
|
*/
|
|
|
|
static inline int is_software_counter(struct perf_counter *counter)
|
|
|
|
{
|
2009-06-06 03:58:57 -04:00
|
|
|
return (counter->attr.type != PERF_TYPE_RAW) &&
|
2009-06-11 11:56:09 -04:00
|
|
|
(counter->attr.type != PERF_TYPE_HARDWARE) &&
|
|
|
|
(counter->attr.type != PERF_TYPE_HW_CACHE);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 05:00:30 -05:00
|
|
|
}
|
|
|
|
|
2009-06-19 12:27:26 -04:00
|
|
|
extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
|
|
|
|
|
|
|
|
extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
|
|
|
|
{
|
|
|
|
if (atomic_read(&perf_swcounter_enabled[event]))
|
|
|
|
__perf_swcounter_event(event, nr, nmi, regs, addr);
|
|
|
|
}
|
2009-03-13 07:21:32 -04:00
|
|
|
|
2009-06-05 08:04:55 -04:00
|
|
|
extern void __perf_counter_mmap(struct vm_area_struct *vma);
|
|
|
|
|
|
|
|
static inline void perf_counter_mmap(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
if (vma->vm_flags & VM_EXEC)
|
|
|
|
__perf_counter_mmap(vma);
|
|
|
|
}
|
2009-03-30 13:07:05 -04:00
|
|
|
|
2009-04-08 09:01:30 -04:00
|
|
|
extern void perf_counter_comm(struct task_struct *tsk);
|
2009-06-04 10:53:44 -04:00
|
|
|
extern void perf_counter_fork(struct task_struct *tsk);
|
2009-04-08 09:01:30 -04:00
|
|
|
|
2009-03-30 13:07:14 -04:00
|
|
|
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
|
|
|
|
|
2009-06-11 05:18:36 -04:00
|
|
|
extern int sysctl_perf_counter_paranoid;
|
2009-05-05 11:50:24 -04:00
|
|
|
extern int sysctl_perf_counter_mlock;
|
2009-06-11 05:25:05 -04:00
|
|
|
extern int sysctl_perf_counter_sample_rate;
|
2009-04-09 04:53:45 -04:00
|
|
|
|
2009-05-04 13:13:30 -04:00
|
|
|
extern void perf_counter_init(void);
|
2009-08-10 16:53:02 -04:00
|
|
|
extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
|
|
|
|
void *record, int entry_size);
|
2009-05-04 13:13:30 -04:00
|
|
|
|
2009-05-14 07:48:08 -04:00
|
|
|
#ifndef perf_misc_flags
|
|
|
|
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
|
|
|
|
PERF_EVENT_MISC_KERNEL)
|
|
|
|
#define perf_instruction_pointer(regs) instruction_pointer(regs)
|
|
|
|
#endif
|
|
|
|
|
2008-12-04 14:12:29 -05:00
|
|
|
#else
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
|
|
|
static inline void
|
2009-05-22 06:32:15 -04:00
|
|
|
perf_counter_task_sched_out(struct task_struct *task,
|
|
|
|
struct task_struct *next, int cpu) { }
|
2008-12-04 14:12:29 -05:00
|
|
|
static inline void
|
|
|
|
perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
2009-05-28 05:41:50 -04:00
|
|
|
static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
|
2008-12-12 07:49:45 -05:00
|
|
|
static inline void perf_counter_exit_task(struct task_struct *child) { }
|
2009-05-29 08:25:58 -04:00
|
|
|
static inline void perf_counter_free_task(struct task_struct *task) { }
|
2009-03-30 13:07:02 -04:00
|
|
|
static inline void perf_counter_do_pending(void) { }
|
2008-12-04 14:12:29 -05:00
|
|
|
static inline void perf_counter_print_debug(void) { }
|
2009-05-13 10:21:38 -04:00
|
|
|
static inline void perf_disable(void) { }
|
|
|
|
static inline void perf_enable(void) { }
|
2008-12-11 08:59:31 -05:00
|
|
|
static inline int perf_counter_task_disable(void) { return -EINVAL; }
|
|
|
|
static inline int perf_counter_task_enable(void) { return -EINVAL; }
|
2009-03-13 07:21:32 -04:00
|
|
|
|
2009-03-30 13:07:02 -04:00
|
|
|
static inline void
|
2009-04-08 09:01:33 -04:00
|
|
|
perf_swcounter_event(u32 event, u64 nr, int nmi,
|
|
|
|
struct pt_regs *regs, u64 addr) { }
|
2009-03-30 13:07:05 -04:00
|
|
|
|
2009-06-05 08:04:55 -04:00
|
|
|
static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
|
2009-04-08 09:01:30 -04:00
|
|
|
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
2009-06-04 10:53:44 -04:00
|
|
|
static inline void perf_counter_fork(struct task_struct *tsk) { }
|
2009-05-04 13:13:30 -04:00
|
|
|
static inline void perf_counter_init(void) { }
|
2008-12-04 14:12:29 -05:00
|
|
|
#endif
|
|
|
|
|
2009-02-26 06:43:46 -05:00
|
|
|
#endif /* __KERNEL__ */
|
2008-12-04 14:12:29 -05:00
|
|
|
#endif /* _LINUX_PERF_COUNTER_H */
|