Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 169696
b: refs/heads/master
c: ba1c813
h: refs/heads/master
v: v3
  • Loading branch information
Frederic Weisbecker committed Nov 8, 2009
1 parent 716a34d commit a2e143e
Show file tree
Hide file tree
Showing 2 changed files with 206 additions and 7 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 24f1e32c60c45c89a997c73395b69c8af6f0a84e
refs/heads/master: ba1c813a6b9a0ef14d7112daf51270eff326f037
211 changes: 205 additions & 6 deletions trunk/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
* Copyright (C) 2007 Alan Stern
* Copyright (C) IBM Corporation, 2009
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
*
* Thanks to Ingo Molnar for his many suggestions.
*/

/*
Expand Down Expand Up @@ -44,24 +46,221 @@
#include <asm/debugreg.h>
#endif

static atomic_t bp_slot;
/*
* Constraints data
*/

/* Number of pinned cpu breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);

int reserve_bp_slot(struct perf_event *bp)
/* Number of pinned task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);

/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);

/* Gather the number of total pinned and un-pinned bp in a cpuset */
struct bp_busy_slots {
unsigned int pinned;
unsigned int flexible;
};

/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

/*
* Report the maximum number of pinned breakpoints a task
* have in this cpu
*/
static unsigned int max_task_bp_pinned(int cpu)
{
if (atomic_inc_return(&bp_slot) == HBP_NUM) {
atomic_dec(&bp_slot);
int i;
unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);

return -ENOSPC;
for (i = HBP_NUM -1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
return i + 1;
}

return 0;
}

/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
{
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
slots->pinned += max_task_bp_pinned(cpu);
slots->flexible = per_cpu(nr_bp_flexible, cpu);

return;
}

for_each_online_cpu(cpu) {
unsigned int nr;

nr = per_cpu(nr_cpu_bp_pinned, cpu);
nr += max_task_bp_pinned(cpu);

if (nr > slots->pinned)
slots->pinned = nr;

nr = per_cpu(nr_bp_flexible, cpu);

if (nr > slots->flexible)
slots->flexible = nr;
}
}

/*
* Add a pinned breakpoint for the given task in our constraint table
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
int count = 0;
struct perf_event *bp;
struct perf_event_context *ctx = tsk->perf_event_ctxp;
unsigned int *task_bp_pinned;
struct list_head *list;
unsigned long flags;

if (WARN_ONCE(!ctx, "No perf context for this task"))
return;

list = &ctx->event_list;

spin_lock_irqsave(&ctx->lock, flags);

/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
count++;
}

spin_unlock_irqrestore(&ctx->lock, flags);

if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
return;

task_bp_pinned = per_cpu(task_bp_pinned, cpu);
if (enable) {
task_bp_pinned[count]++;
if (count > 0)
task_bp_pinned[count-1]--;
} else {
task_bp_pinned[count]--;
if (count > 0)
task_bp_pinned[count-1]++;
}
}

/*
* Add/remove the given breakpoint in our constraint table
*/
static void toggle_bp_slot(struct perf_event *bp, bool enable)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;

/* Pinned counter task profiling */
if (tsk) {
if (cpu >= 0) {
toggle_bp_task_slot(tsk, cpu, enable);
return;
}

for_each_online_cpu(cpu)
toggle_bp_task_slot(tsk, cpu, enable);
return;
}

/* Pinned counter cpu profiling */
if (enable)
per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
else
per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
}

/*
* Contraints to check before allowing this new breakpoint counter:
*
* == Non-pinned counter == (Considered as pinned for now)
*
* - If attached to a single cpu, check:
*
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
*
* -> If there are already non-pinned counters in this cpu, it means
* there is already a free slot for them.
* Otherwise, we check that the maximum number of per task
* breakpoints (for this cpu) plus the number of per cpu breakpoint
* (for this cpu) doesn't cover every registers.
*
* - If attached to every cpus, check:
*
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
*
* -> This is roughly the same, except we check the number of per cpu
* bp for every cpu and we keep the max one. Same for the per tasks
* breakpoints.
*
*
* == Pinned counter ==
*
* - If attached to a single cpu, check:
*
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
*
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
* one register at least (or they will never be fed).
*
* - If attached to every cpus, check:
*
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
*/
int reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
int ret = 0;

mutex_lock(&nr_bp_mutex);

fetch_bp_busy_slots(&slots, bp->cpu);

/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
ret = -ENOSPC;
goto end;
}

toggle_bp_slot(bp, true);

end:
mutex_unlock(&nr_bp_mutex);

return ret;
}

void release_bp_slot(struct perf_event *bp)
{
atomic_dec(&bp_slot);
mutex_lock(&nr_bp_mutex);

toggle_bp_slot(bp, false);

mutex_unlock(&nr_bp_mutex);
}


int __register_perf_hw_breakpoint(struct perf_event *bp)
{
int ret;
Expand Down

0 comments on commit a2e143e

Please sign in to comment.