2009-06-01 14:13:33 -04:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Alan Stern
|
|
|
|
* Copyright (C) IBM Corporation, 2009
|
2009-09-09 13:22:48 -04:00
|
|
|
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
*
|
|
|
|
* Thanks to Ingo Molnar for his many suggestions.
|
2009-11-23 10:47:13 -05:00
|
|
|
*
|
|
|
|
* Authors: Alan Stern <stern@rowland.harvard.edu>
|
|
|
|
* K.Prasad <prasad@linux.vnet.ibm.com>
|
|
|
|
* Frederic Weisbecker <fweisbec@gmail.com>
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
|
|
|
|
* using the CPU's debug registers.
|
|
|
|
* This file contains the arch-independent routines.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/kprobes.h>
|
|
|
|
#include <linux/kdebug.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
#include <linux/hw_breakpoint.h>
|
|
|
|
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
/*
|
|
|
|
* Constraints data
|
|
|
|
*/
|
2009-06-01 14:13:33 -04:00
|
|
|
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
/* Number of pinned cpu breakpoints in a cpu */
|
|
|
|
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
|
|
|
|
|
|
|
|
/* Number of pinned task breakpoints in a cpu */
|
|
|
|
static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
|
|
|
|
|
|
|
|
/* Number of non-pinned cpu/task breakpoints in a cpu */
|
|
|
|
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
|
|
|
|
|
|
|
|
/* Gather the number of total pinned and un-pinned bp in a cpuset */
|
|
|
|
struct bp_busy_slots {
|
|
|
|
unsigned int pinned;
|
|
|
|
unsigned int flexible;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Serialize accesses to the above constraints */
|
|
|
|
static DEFINE_MUTEX(nr_bp_mutex);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Report the maximum number of pinned breakpoints a task
|
|
|
|
* have in this cpu
|
|
|
|
*/
|
|
|
|
static unsigned int max_task_bp_pinned(int cpu)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
int i;
|
|
|
|
unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
for (i = HBP_NUM -1; i >= 0; i--) {
|
|
|
|
if (tsk_pinned[i] > 0)
|
|
|
|
return i + 1;
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return 0;
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
/*
|
|
|
|
* Report the number of pinned/un-pinned breakpoints we have in
|
|
|
|
* a given cpu (cpu > -1) or in all of them (cpu = -1).
|
|
|
|
*/
|
|
|
|
static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
|
|
|
|
{
|
|
|
|
if (cpu >= 0) {
|
|
|
|
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
|
|
|
|
slots->pinned += max_task_bp_pinned(cpu);
|
|
|
|
slots->flexible = per_cpu(nr_bp_flexible, cpu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu) {
|
|
|
|
unsigned int nr;
|
|
|
|
|
|
|
|
nr = per_cpu(nr_cpu_bp_pinned, cpu);
|
|
|
|
nr += max_task_bp_pinned(cpu);
|
|
|
|
|
|
|
|
if (nr > slots->pinned)
|
|
|
|
slots->pinned = nr;
|
|
|
|
|
|
|
|
nr = per_cpu(nr_bp_flexible, cpu);
|
|
|
|
|
|
|
|
if (nr > slots->flexible)
|
|
|
|
slots->flexible = nr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a pinned breakpoint for the given task in our constraint table
|
|
|
|
*/
|
|
|
|
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
|
|
|
|
{
|
|
|
|
int count = 0;
|
|
|
|
struct perf_event *bp;
|
|
|
|
struct perf_event_context *ctx = tsk->perf_event_ctxp;
|
|
|
|
unsigned int *task_bp_pinned;
|
|
|
|
struct list_head *list;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (WARN_ONCE(!ctx, "No perf context for this task"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list = &ctx->event_list;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctx->lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The current breakpoint counter is not included in the list
|
|
|
|
* at the open() callback time
|
|
|
|
*/
|
|
|
|
list_for_each_entry(bp, list, event_entry) {
|
|
|
|
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
|
|
|
|
|
|
|
if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
task_bp_pinned = per_cpu(task_bp_pinned, cpu);
|
|
|
|
if (enable) {
|
|
|
|
task_bp_pinned[count]++;
|
|
|
|
if (count > 0)
|
|
|
|
task_bp_pinned[count-1]--;
|
|
|
|
} else {
|
|
|
|
task_bp_pinned[count]--;
|
|
|
|
if (count > 0)
|
|
|
|
task_bp_pinned[count-1]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add/remove the given breakpoint in our constraint table
|
|
|
|
*/
|
|
|
|
static void toggle_bp_slot(struct perf_event *bp, bool enable)
|
|
|
|
{
|
|
|
|
int cpu = bp->cpu;
|
|
|
|
struct task_struct *tsk = bp->ctx->task;
|
|
|
|
|
|
|
|
/* Pinned counter task profiling */
|
|
|
|
if (tsk) {
|
|
|
|
if (cpu >= 0) {
|
|
|
|
toggle_bp_task_slot(tsk, cpu, enable);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
|
toggle_bp_task_slot(tsk, cpu, enable);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Pinned counter cpu profiling */
|
|
|
|
if (enable)
|
|
|
|
per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
|
|
|
|
else
|
|
|
|
per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Contraints to check before allowing this new breakpoint counter:
|
|
|
|
*
|
|
|
|
* == Non-pinned counter == (Considered as pinned for now)
|
|
|
|
*
|
|
|
|
* - If attached to a single cpu, check:
|
|
|
|
*
|
|
|
|
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
|
|
|
|
* + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
|
|
|
|
*
|
|
|
|
* -> If there are already non-pinned counters in this cpu, it means
|
|
|
|
* there is already a free slot for them.
|
|
|
|
* Otherwise, we check that the maximum number of per task
|
|
|
|
* breakpoints (for this cpu) plus the number of per cpu breakpoint
|
|
|
|
* (for this cpu) doesn't cover every registers.
|
|
|
|
*
|
|
|
|
* - If attached to every cpus, check:
|
|
|
|
*
|
|
|
|
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
|
|
|
|
* + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
|
|
|
|
*
|
|
|
|
* -> This is roughly the same, except we check the number of per cpu
|
|
|
|
* bp for every cpu and we keep the max one. Same for the per tasks
|
|
|
|
* breakpoints.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* == Pinned counter ==
|
|
|
|
*
|
|
|
|
* - If attached to a single cpu, check:
|
|
|
|
*
|
|
|
|
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
|
|
|
|
* + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
|
|
|
|
*
|
|
|
|
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
|
|
|
|
* one register at least (or they will never be fed).
|
|
|
|
*
|
|
|
|
* - If attached to every cpus, check:
|
|
|
|
*
|
|
|
|
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
|
|
|
|
* + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
|
|
|
|
*/
|
|
|
|
int reserve_bp_slot(struct perf_event *bp)
|
|
|
|
{
|
|
|
|
struct bp_busy_slots slots = {0};
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&nr_bp_mutex);
|
|
|
|
|
|
|
|
fetch_bp_busy_slots(&slots, bp->cpu);
|
|
|
|
|
|
|
|
/* Flexible counters need to keep at least one slot */
|
|
|
|
if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
|
|
|
|
ret = -ENOSPC;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
toggle_bp_slot(bp, true);
|
|
|
|
|
|
|
|
end:
|
|
|
|
mutex_unlock(&nr_bp_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
void release_bp_slot(struct perf_event *bp)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
mutex_lock(&nr_bp_mutex);
|
|
|
|
|
|
|
|
toggle_bp_slot(bp, false);
|
|
|
|
|
|
|
|
mutex_unlock(&nr_bp_mutex);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
hw-breakpoints: Arbitrate access to pmu following registers constraints
Allow or refuse to build a counter using the breakpoints pmu following
given constraints.
We keep track of the pmu users by using three per cpu variables:
- nr_cpu_bp_pinned stores the number of pinned cpu breakpoints counters
in the given cpu
- nr_bp_flexible stores the number of non-pinned breakpoints counters
in the given cpu.
- task_bp_pinned stores the number of pinned task breakpoints in a cpu
The latter is not a simple counter but gathers the number of tasks that
have n pinned breakpoints.
Considering HBP_NUM the number of available breakpoint address
registers:
task_bp_pinned[0] is the number of tasks having 1 breakpoint
task_bp_pinned[1] is the number of tasks having 2 breakpoints
[...]
task_bp_pinned[HBP_NUM - 1] is the number of tasks having the
maximum number of registers (HBP_NUM).
When a breakpoint counter is created and wants an access to the pmu,
we evaluate the following constraints:
== Non-pinned counter ==
- If attached to a single cpu, check:
(per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
-> If there are already non-pinned counters in this cpu, it
means there is already a free slot for them.
Otherwise, we check that the maximum number of per task
breakpoints (for this cpu) plus the number of per cpu
breakpoint (for this cpu) doesn't cover every registers.
- If attached to every cpus, check:
(per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
-> This is roughly the same, except we check the number of per
cpu bp for every cpu and we keep the max one. Same for the
per tasks breakpoints.
== Pinned counter ==
- If attached to a single cpu, check:
((per_cpu(nr_bp_flexible, cpu) > 1)
+ per_cpu(nr_cpu_bp_pinned, cpu)
+ max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
-> Same checks as before. But now the nr_bp_flexible, if any,
must keep one register at least (or flexible breakpoints will
never be be fed).
- If attached to every cpus, check:
((per_cpu(nr_bp_flexible, *) > 1)
+ max(per_cpu(nr_cpu_bp_pinned, *))
+ max(per_cpu(task_bp_pinned, *))) < HBP_NUM
Changes in v2:
- Counter -> event rename
Changes in v5:
- Fix unreleased non-pinned task-bound-only counters. We only released
it in the first cpu. (Thanks to Paul Mackerras for reporting that)
Changes in v6:
- Currently, events scheduling are done in this order: cpu context
pinned + cpu context non-pinned + task context pinned + task context
non-pinned events. Then our current constraints are right theoretically
but not in practice, because non-pinned counters may be scheduled
before we can apply every possible pinned counters. So consider
non-pinned counters as pinned for now.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Prasad <prasad@linux.vnet.ibm.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jan Kiszka <jan.kiszka@web.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
2009-09-10 03:26:21 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
int __register_perf_hw_breakpoint(struct perf_event *bp)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
int ret;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
ret = reserve_bp_slot(bp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-11-23 09:42:33 -05:00
|
|
|
/*
|
|
|
|
* Ptrace breakpoints can be temporary perf events only
|
|
|
|
* meant to reserve a slot. In this case, it is created disabled and
|
|
|
|
* we don't want to check the params right now (as we put a null addr)
|
|
|
|
* But perf tools create events as disabled and we want to check
|
|
|
|
* the params for them.
|
|
|
|
* This is a quick hack that will be removed soon, once we remove
|
|
|
|
* the tmp breakpoints from ptrace
|
|
|
|
*/
|
|
|
|
if (!bp->attr.disabled || bp->callback == perf_bp_event)
|
2009-09-09 13:22:48 -04:00
|
|
|
ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return ret;
|
|
|
|
}
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
int register_perf_hw_breakpoint(struct perf_event *bp)
|
|
|
|
{
|
|
|
|
bp->callback = perf_bp_event;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return __register_perf_hw_breakpoint(bp);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-09-09 13:22:48 -04:00
|
|
|
* Register a breakpoint bound to a task and a given cpu.
|
|
|
|
* If cpu is -1, the breakpoint is active for the task in every cpu
|
|
|
|
* If the task is -1, the breakpoint is active for every tasks in the given
|
|
|
|
* cpu.
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
static struct perf_event *
|
|
|
|
register_user_hw_breakpoint_cpu(unsigned long addr,
|
|
|
|
int len,
|
|
|
|
int type,
|
|
|
|
perf_callback_t triggered,
|
|
|
|
pid_t pid,
|
|
|
|
int cpu,
|
|
|
|
bool active)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
struct perf_event_attr *attr;
|
|
|
|
struct perf_event *bp;
|
|
|
|
|
|
|
|
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
|
|
|
|
if (!attr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
attr->type = PERF_TYPE_BREAKPOINT;
|
|
|
|
attr->size = sizeof(*attr);
|
|
|
|
attr->bp_addr = addr;
|
|
|
|
attr->bp_len = len;
|
|
|
|
attr->bp_type = type;
|
2009-06-01 14:13:33 -04:00
|
|
|
/*
|
2009-09-09 13:22:48 -04:00
|
|
|
* Such breakpoints are used by debuggers to trigger signals when
|
|
|
|
* we hit the excepted memory op. We can't miss such events, they
|
|
|
|
* must be pinned.
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
attr->pinned = 1;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
if (!active)
|
|
|
|
attr->disabled = 1;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
|
|
|
|
kfree(attr);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return bp;
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* register_user_hw_breakpoint - register a hardware breakpoint for user space
|
2009-09-09 13:22:48 -04:00
|
|
|
* @addr: is the memory address that triggers the breakpoint
|
|
|
|
* @len: the length of the access to the memory (1 byte, 2 bytes etc...)
|
|
|
|
* @type: the type of the access to the memory (read/write/exec)
|
|
|
|
* @triggered: callback to trigger when we hit the breakpoint
|
2009-06-01 14:13:33 -04:00
|
|
|
* @tsk: pointer to 'task_struct' of the process to which the address belongs
|
2009-09-09 13:22:48 -04:00
|
|
|
* @active: should we activate it while registering it
|
2009-06-01 14:13:33 -04:00
|
|
|
*
|
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
struct perf_event *
|
|
|
|
register_user_hw_breakpoint(unsigned long addr,
|
|
|
|
int len,
|
|
|
|
int type,
|
|
|
|
perf_callback_t triggered,
|
|
|
|
struct task_struct *tsk,
|
|
|
|
bool active)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
|
|
|
|
tsk->pid, -1, active);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
|
2009-09-09 13:22:48 -04:00
|
|
|
* @bp: the breakpoint structure to modify
|
|
|
|
* @addr: is the memory address that triggers the breakpoint
|
|
|
|
* @len: the length of the access to the memory (1 byte, 2 bytes etc...)
|
|
|
|
* @type: the type of the access to the memory (read/write/exec)
|
|
|
|
* @triggered: callback to trigger when we hit the breakpoint
|
2009-06-01 14:13:33 -04:00
|
|
|
* @tsk: pointer to 'task_struct' of the process to which the address belongs
|
2009-09-09 13:22:48 -04:00
|
|
|
* @active: should we activate it while registering it
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
struct perf_event *
|
|
|
|
modify_user_hw_breakpoint(struct perf_event *bp,
|
|
|
|
unsigned long addr,
|
|
|
|
int len,
|
|
|
|
int type,
|
|
|
|
perf_callback_t triggered,
|
|
|
|
struct task_struct *tsk,
|
|
|
|
bool active)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
/*
|
|
|
|
* FIXME: do it without unregistering
|
|
|
|
* - We don't want to lose our slot
|
|
|
|
* - If the new bp is incorrect, don't lose the older one
|
|
|
|
*/
|
|
|
|
unregister_hw_breakpoint(bp);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return register_user_hw_breakpoint(addr, len, type, triggered,
|
|
|
|
tsk, active);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
|
|
|
|
|
|
|
|
/**
|
2009-09-09 13:22:48 -04:00
|
|
|
* unregister_hw_breakpoint - unregister a user-space hardware breakpoint
|
2009-06-01 14:13:33 -04:00
|
|
|
* @bp: the breakpoint structure to unregister
|
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
void unregister_hw_breakpoint(struct perf_event *bp)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
if (!bp)
|
|
|
|
return;
|
|
|
|
perf_event_release_kernel(bp);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
|
|
|
|
|
|
|
|
static struct perf_event *
|
|
|
|
register_kernel_hw_breakpoint_cpu(unsigned long addr,
|
|
|
|
int len,
|
|
|
|
int type,
|
|
|
|
perf_callback_t triggered,
|
|
|
|
int cpu,
|
|
|
|
bool active)
|
|
|
|
{
|
|
|
|
return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
|
|
|
|
-1, cpu, active);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2009-09-09 13:22:48 -04:00
|
|
|
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
|
|
|
|
* @addr: is the memory address that triggers the breakpoint
|
|
|
|
* @len: the length of the access to the memory (1 byte, 2 bytes etc...)
|
|
|
|
* @type: the type of the access to the memory (read/write/exec)
|
|
|
|
* @triggered: callback to trigger when we hit the breakpoint
|
|
|
|
* @active: should we activate it while registering it
|
2009-06-01 14:13:33 -04:00
|
|
|
*
|
2009-09-09 13:22:48 -04:00
|
|
|
* @return a set of per_cpu pointers to perf events
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
struct perf_event **
|
|
|
|
register_wide_hw_breakpoint(unsigned long addr,
|
|
|
|
int len,
|
|
|
|
int type,
|
|
|
|
perf_callback_t triggered,
|
|
|
|
bool active)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
struct perf_event **cpu_events, **pevent, *bp;
|
|
|
|
long err;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
cpu_events = alloc_percpu(typeof(*cpu_events));
|
|
|
|
if (!cpu_events)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
pevent = per_cpu_ptr(cpu_events, cpu);
|
|
|
|
bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
|
|
|
|
triggered, cpu, active);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
*pevent = bp;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
if (IS_ERR(bp) || !bp) {
|
|
|
|
err = PTR_ERR(bp);
|
|
|
|
goto fail;
|
|
|
|
}
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
return cpu_events;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
pevent = per_cpu_ptr(cpu_events, cpu);
|
|
|
|
if (IS_ERR(*pevent) || !*pevent)
|
|
|
|
break;
|
|
|
|
unregister_hw_breakpoint(*pevent);
|
|
|
|
}
|
|
|
|
free_percpu(cpu_events);
|
|
|
|
/* return the error if any */
|
|
|
|
return ERR_PTR(err);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
2009-11-10 04:17:07 -05:00
|
|
|
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
|
|
|
/**
|
2009-09-09 13:22:48 -04:00
|
|
|
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
|
|
|
|
* @cpu_events: the per cpu set of events to unregister
|
2009-06-01 14:13:33 -04:00
|
|
|
*/
|
2009-09-09 13:22:48 -04:00
|
|
|
void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
|
2009-06-01 14:13:33 -04:00
|
|
|
{
|
2009-09-09 13:22:48 -04:00
|
|
|
int cpu;
|
|
|
|
struct perf_event **pevent;
|
2009-06-01 14:13:33 -04:00
|
|
|
|
2009-09-09 13:22:48 -04:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
pevent = per_cpu_ptr(cpu_events, cpu);
|
|
|
|
unregister_hw_breakpoint(*pevent);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
2009-09-09 13:22:48 -04:00
|
|
|
free_percpu(cpu_events);
|
2009-06-01 14:13:33 -04:00
|
|
|
}
|
2009-11-10 04:17:07 -05:00
|
|
|
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
|
2009-06-01 14:13:33 -04:00
|
|
|
|
|
|
|
static struct notifier_block hw_breakpoint_exceptions_nb = {
|
|
|
|
.notifier_call = hw_breakpoint_exceptions_notify,
|
|
|
|
/* we need to be notified first */
|
|
|
|
.priority = 0x7fffffff
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_hw_breakpoint(void)
|
|
|
|
{
|
|
|
|
return register_die_notifier(&hw_breakpoint_exceptions_nb);
|
|
|
|
}
|
|
|
|
core_initcall(init_hw_breakpoint);
|
2009-09-09 13:22:48 -04:00
|
|
|
|
|
|
|
|
|
|
|
struct pmu perf_ops_bp = {
|
|
|
|
.enable = arch_install_hw_breakpoint,
|
|
|
|
.disable = arch_uninstall_hw_breakpoint,
|
|
|
|
.read = hw_breakpoint_pmu_read,
|
|
|
|
.unthrottle = hw_breakpoint_pmu_unthrottle
|
|
|
|
};
|