Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 175416
b: refs/heads/master
c: 6ab8886
h: refs/heads/master
v: v3
  • Loading branch information
Stephen Rothwell authored and Ingo Molnar committed Dec 8, 2009
1 parent 50e84a2 commit 1bd731c
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2ff6cfd70720780234fdfea636218c2a62b31287
refs/heads/master: 6ab8886326a1b9a3a8d164d8174e3c20703a03a2
14 changes: 7 additions & 7 deletions trunk/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);

/* Number of pinned task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);

/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
Expand All @@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex);
static unsigned int max_task_bp_pinned(int cpu)
{
int i;
unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);

for (i = HBP_NUM -1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
Expand Down Expand Up @@ -162,7 +162,7 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)

count = task_bp_pinned(tsk);

tsk_pinned = per_cpu(task_bp_pinned, cpu);
tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
if (enable) {
tsk_pinned[count]++;
if (count > 0)
Expand Down Expand Up @@ -209,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
* + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
*
* -> If there are already non-pinned counters in this cpu, it means
* there is already a free slot for them.
Expand All @@ -220,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to every cpus, check:
*
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
* + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
*
* -> This is roughly the same, except we check the number of per cpu
* bp for every cpu and we keep the max one. Same for the per tasks
Expand All @@ -232,15 +232,15 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
* + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
* + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
*
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
* one register at least (or they will never be fed).
*
* - If attached to every cpus, check:
*
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
int reserve_bp_slot(struct perf_event *bp)
{
Expand Down

0 comments on commit 1bd731c

Please sign in to comment.