Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 277315
b: refs/heads/master
c: bc1738f
h: refs/heads/master
i:
  277313: 4f1ced1
  277311: 1d5ae95
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Dec 6, 2011
1 parent 24f5cd8 commit 3a14ac0
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1e2ad28f80b4e155678259238f51edebc19e4014
refs/heads/master: bc1738f6ee83015f090867813dcca4d690e7917c
45 changes: 43 additions & 2 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -499,11 +499,16 @@ struct sched_state {
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
};

/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
#define SCHED_STATES_MAX 2

struct perf_sched {
int max_weight;
int max_events;
struct event_constraint **constraints;
struct sched_state state;
int saved_states;
struct sched_state saved[SCHED_STATES_MAX];
};

/*
Expand All @@ -529,11 +534,34 @@ static void perf_sched_init(struct perf_sched *sched, struct event_constraint **
sched->state.unassigned = num;
}

static void perf_sched_save_state(struct perf_sched *sched)
{
if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
return;

sched->saved[sched->saved_states] = sched->state;
sched->saved_states++;
}

static bool perf_sched_restore_state(struct perf_sched *sched)
{
if (!sched->saved_states)
return false;

sched->saved_states--;
sched->state = sched->saved[sched->saved_states];

/* continue with next counter: */
clear_bit(sched->state.counter++, sched->state.used);

return true;
}

/*
* Select a counter for the current event to schedule. Return true on
* success.
*/
static bool perf_sched_find_counter(struct perf_sched *sched)
static bool __perf_sched_find_counter(struct perf_sched *sched)
{
struct event_constraint *c;
int idx;
Expand All @@ -557,6 +585,19 @@ static bool perf_sched_find_counter(struct perf_sched *sched)
if (idx >= X86_PMC_IDX_MAX)
return false;

if (c->overlap)
perf_sched_save_state(sched);

return true;
}

static bool perf_sched_find_counter(struct perf_sched *sched)
{
while (!__perf_sched_find_counter(sched)) {
if (!perf_sched_restore_state(sched))
return false;
}

return true;
}

Expand Down Expand Up @@ -1250,7 +1291,7 @@ static int __init init_hw_perf_events(void)

unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters);
0, x86_pmu.num_counters, 0);

if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
Expand Down
30 changes: 28 additions & 2 deletions trunk/arch/x86/kernel/cpu/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ struct event_constraint {
u64 code;
u64 cmask;
int weight;
int overlap;
};

struct amd_nb {
Expand Down Expand Up @@ -151,15 +152,40 @@ struct cpu_hw_events {
void *kfree_on_online;
};

#define __EVENT_CONSTRAINT(c, n, m, w) {\
#define __EVENT_CONSTRAINT(c, n, m, w, o) {\
{ .idxmsk64 = (n) }, \
.code = (c), \
.cmask = (m), \
.weight = (w), \
.overlap = (o), \
}

#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0)

/*
* The overlap flag marks event constraints with overlapping counter
* masks. This is the case if the counter mask of such an event is not
* a subset of any other counter mask of a constraint with an equal or
* higher weight, e.g.:
*
* c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
* c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
* c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
*
* The event scheduler may not select the correct counter in the first
* cycle because it needs to know which subsequent events will be
* scheduled. It may fail to schedule the events then. So we set the
* overlap flag for such constraints to give the scheduler a hint which
* events to select for counter rescheduling.
*
* Care must be taken as the rescheduling algorithm is O(n!) which
* will increase scheduling cycles for an over-commited system
* dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros
* and its counter masks must be kept at a minimum.
*/
#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1)

/*
* Constraint on the Event code.
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/cpu/perf_event_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);

Expand Down

0 comments on commit 3a14ac0

Please sign in to comment.