Skip to content

Commit

Permalink
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/frederic/random-tracing into perf/urgent
  • Loading branch information
Ingo Molnar committed Dec 7, 2009
2 parents 3a9a0be + 5605317 commit 11a80dd
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 29 deletions.
2 changes: 2 additions & 0 deletions include/linux/hw_breakpoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ enum {

static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
memset(attr, 0, sizeof(*attr));

attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(*attr);
/*
Expand Down
74 changes: 45 additions & 29 deletions kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu)
return 0;
}

static int task_bp_pinned(struct task_struct *tsk)
{
struct perf_event_context *ctx = tsk->perf_event_ctxp;
struct list_head *list;
struct perf_event *bp;
unsigned long flags;
int count = 0;

if (WARN_ONCE(!ctx, "No perf context for this task"))
return 0;

list = &ctx->event_list;

spin_lock_irqsave(&ctx->lock, flags);

/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
count++;
}

spin_unlock_irqrestore(&ctx->lock, flags);

return count;
}

/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;

if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
slots->pinned += max_task_bp_pinned(cpu);
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu);
else
slots->pinned += task_bp_pinned(tsk);
slots->flexible = per_cpu(nr_bp_flexible, cpu);

return;
Expand All @@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
unsigned int nr;

nr = per_cpu(nr_cpu_bp_pinned, cpu);
nr += max_task_bp_pinned(cpu);
if (!tsk)
nr += max_task_bp_pinned(cpu);
else
nr += task_bp_pinned(tsk);

if (nr > slots->pinned)
slots->pinned = nr;
Expand All @@ -118,33 +157,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
int count = 0;
struct perf_event *bp;
struct perf_event_context *ctx = tsk->perf_event_ctxp;
unsigned int *tsk_pinned;
struct list_head *list;
unsigned long flags;

if (WARN_ONCE(!ctx, "No perf context for this task"))
return;

list = &ctx->event_list;

spin_lock_irqsave(&ctx->lock, flags);

/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
count++;
}

spin_unlock_irqrestore(&ctx->lock, flags);
int count = 0;

if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
return;
count = task_bp_pinned(tsk);

tsk_pinned = per_cpu(task_bp_pinned, cpu);
if (enable) {
Expand Down Expand Up @@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp)

mutex_lock(&nr_bp_mutex);

fetch_bp_busy_slots(&slots, bp->cpu);
fetch_bp_busy_slots(&slots, bp);

/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
Expand Down

0 comments on commit 11a80dd

Please sign in to comment.