Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191275
b: refs/heads/master
c: f93a205
h: refs/heads/master
i:
  191273: b4e0cfd
  191271: 955e80a
v: v3
  • Loading branch information
Frederic Weisbecker committed May 1, 2010
1 parent 504f1f3 commit 37b7ab2
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 19 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0102752e4c9e0655b39734550d4c35327954f7f9
refs/heads/master: f93a20541134fa767e8dc4eb32e956d30b9f6b92
63 changes: 45 additions & 18 deletions trunk/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ struct bp_busy_slots {
/* Serialize accesses to the above constraints */
static DEFINE_MUTEX(nr_bp_mutex);

__weak int hw_breakpoint_weight(struct perf_event *bp)
{
return 1;
}

static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
{
if (bp->attr.bp_type & HW_BREAKPOINT_RW)
Expand Down Expand Up @@ -124,7 +129,7 @@ static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
list_for_each_entry(bp, list, event_entry) {
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
if (find_slot_idx(bp) == type)
count++;
count += hw_breakpoint_weight(bp);
}

raw_spin_unlock_irqrestore(&ctx->lock, flags);
Expand Down Expand Up @@ -173,55 +178,71 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
}
}

/*
* For now, continue to consider flexible as pinned, until we can
* ensure no flexible event can ever be scheduled before a pinned event
* in a same cpu.
*/
static void
fetch_this_slot(struct bp_busy_slots *slots, int weight)
{
slots->pinned += weight;
}

/*
* Add a pinned breakpoint for the given task in our constraint table
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
enum bp_type_idx type)
enum bp_type_idx type, int weight)
{
unsigned int *tsk_pinned;
int count = 0;
int old_count = 0;
int old_idx = 0;
int idx = 0;

count = task_bp_pinned(tsk, type);
old_count = task_bp_pinned(tsk, type);
old_idx = old_count - 1;
idx = old_idx + weight;

tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
if (enable) {
tsk_pinned[count]++;
if (count > 0)
tsk_pinned[count-1]--;
tsk_pinned[idx]++;
if (old_count > 0)
tsk_pinned[old_idx]--;
} else {
tsk_pinned[count]--;
if (count > 0)
tsk_pinned[count-1]++;
tsk_pinned[idx]--;
if (old_count > 0)
tsk_pinned[old_idx]++;
}
}

/*
* Add/remove the given breakpoint in our constraint table
*/
static void
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type)
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
int weight)
{
int cpu = bp->cpu;
struct task_struct *tsk = bp->ctx->task;

/* Pinned counter task profiling */
if (tsk) {
if (cpu >= 0) {
toggle_bp_task_slot(tsk, cpu, enable, type);
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
}

for_each_online_cpu(cpu)
toggle_bp_task_slot(tsk, cpu, enable, type);
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
return;
}

/* Pinned counter cpu profiling */
if (enable)
per_cpu(nr_cpu_bp_pinned[type], bp->cpu)++;
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
else
per_cpu(nr_cpu_bp_pinned[type], bp->cpu)--;
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
}

/*
Expand Down Expand Up @@ -269,20 +290,24 @@ static int __reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
enum bp_type_idx type;
int weight;

/* Basic checks */
if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
bp->attr.bp_type == HW_BREAKPOINT_INVALID)
return -EINVAL;

type = find_slot_idx(bp);
weight = hw_breakpoint_weight(bp);

fetch_bp_busy_slots(&slots, bp, type);
fetch_this_slot(&slots, weight);

/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM)
if (slots.pinned + (!!slots.flexible) > HBP_NUM)
return -ENOSPC;

toggle_bp_slot(bp, true, type);
toggle_bp_slot(bp, true, type, weight);

return 0;
}
Expand All @@ -303,9 +328,11 @@ int reserve_bp_slot(struct perf_event *bp)
static void __release_bp_slot(struct perf_event *bp)
{
enum bp_type_idx type;
int weight;

type = find_slot_idx(bp);
toggle_bp_slot(bp, false, type);
weight = hw_breakpoint_weight(bp);
toggle_bp_slot(bp, false, type, weight);
}

void release_bp_slot(struct perf_event *bp)
Expand Down

0 comments on commit 37b7ab2

Please sign in to comment.