Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 205271
b: refs/heads/master
c: 45a7337
h: refs/heads/master
i:
  205269: 8024a77
  205267: f8d3f83
  205263: 82c1e01
v: v3
  • Loading branch information
Frederic Weisbecker committed Jun 24, 2010
1 parent 226db98 commit 9f5b4b5
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 40 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c882e0feb937af4e5b991cbd1c81536f37053e86
refs/heads/master: 45a73372efe4a63f44aa2e1125d4a777c2fdc8d8
6 changes: 4 additions & 2 deletions trunk/include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,10 @@ struct hw_perf_event {
struct hrtimer hrtimer;
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* breakpoint */
struct arch_hw_breakpoint info;
struct { /* breakpoint */
struct arch_hw_breakpoint info;
struct list_head bp_list;
};
#endif
};
local64_t prev_count;
Expand Down
78 changes: 41 additions & 37 deletions trunk/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/smp.h>

Expand All @@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);

static int nr_slots[TYPE_MAX];

/* Keep track of the breakpoints attached to tasks */
static LIST_HEAD(bp_task_head);

static int constraints_initialized;

/* Gather the number of total pinned and un-pinned bp in a cpuset */
Expand Down Expand Up @@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
return 0;
}

static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
/*
* Count the number of breakpoints of the same type and same task.
* The given event must be not on the list.
*/
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
{
struct perf_event_context *ctx = tsk->perf_event_ctxp;
struct list_head *list;
struct perf_event *bp;
unsigned long flags;
struct perf_event_context *ctx = bp->ctx;
struct perf_event *iter;
int count = 0;

if (WARN_ONCE(!ctx, "No perf context for this task"))
return 0;

list = &ctx->event_list;

raw_spin_lock_irqsave(&ctx->lock, flags);

/*
* The current breakpoint counter is not included in the list
* at the open() callback time
*/
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
if (find_slot_idx(bp) == type)
count += hw_breakpoint_weight(bp);
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->ctx == ctx && find_slot_idx(iter) == type)
count += hw_breakpoint_weight(iter);
}

raw_spin_unlock_irqrestore(&ctx->lock, flags);

return count;
}

Expand All @@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu, type);
else
slots->pinned += task_bp_pinned(tsk, type);
slots->pinned += task_bp_pinned(bp, type);
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);

return;
Expand All @@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
if (!tsk)
nr += max_task_bp_pinned(cpu, type);
else
nr += task_bp_pinned(tsk, type);
nr += task_bp_pinned(bp, type);

if (nr > slots->pinned)
slots->pinned = nr;
Expand All @@ -188,18 +180,19 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight)
/*
* Add a pinned breakpoint for the given task in our constraint table
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
enum bp_type_idx type, int weight)
{
unsigned int *tsk_pinned;
int old_count = 0;
int old_idx = 0;
int idx = 0;

old_count = task_bp_pinned(tsk, type);
old_count = task_bp_pinned(bp, type);
old_idx = old_count - 1;
idx = old_idx + weight;

/* tsk_pinned[n] is the number of tasks having n breakpoints */
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
if (enable) {
tsk_pinned[idx]++;
Expand All @@ -222,23 +215,30 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;

/* Pinned counter cpu profiling */
if (!tsk) {

if (enable)
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
else
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
return;
}

/* Pinned counter task profiling */
if (tsk) {
if (cpu >= 0) {
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
}

if (!enable)
list_del(&bp->hw.bp_list);

if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else {
for_each_online_cpu(cpu)
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
toggle_bp_task_slot(bp, cpu, enable, type, weight);
}

/* Pinned counter cpu profiling */
if (enable)
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
else
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
list_add_tail(&bp->hw.bp_list, &bp_task_head);
}

/*
Expand Down Expand Up @@ -301,6 +301,10 @@ static int __reserve_bp_slot(struct perf_event *bp)
weight = hw_breakpoint_weight(bp);

fetch_bp_busy_slots(&slots, bp, type);
/*
* Simulate the addition of this breakpoint to the constraints
* and see the result.
*/
fetch_this_slot(&slots, weight);

/* Flexible counters need to keep at least one slot */
Expand Down

0 comments on commit 9f5b4b5

Please sign in to comment.