Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 310601
b: refs/heads/master
c: b430f7c
h: refs/heads/master
i:
  310599: a862e0d
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Jun 6, 2012
1 parent f265853 commit 84f44aa
Show file tree
Hide file tree
Showing 4 changed files with 67 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 436d03faf6961b30e13b2d0967aea9d772d6cf44
refs/heads/master: b430f7c4706aeba4270c7ab7744fc504b9315e1c
1 change: 1 addition & 0 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
if (!cpuc->shared_regs)
goto error;
}
cpuc->is_fake = 1;
return cpuc;
error:
free_fake_cpuc(cpuc);
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/x86/kernel/cpu/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */

unsigned int group_flag;
int is_fake;

/*
* Intel DebugStore bits
Expand Down
92 changes: 64 additions & 28 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
return NULL;
}

static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
static int intel_alt_er(int idx)
{
if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
return false;
return idx;

if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= 0x01bb;
event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
if (idx == EXTRA_REG_RSP_0)
return EXTRA_REG_RSP_1;

if (idx == EXTRA_REG_RSP_1)
return EXTRA_REG_RSP_0;

return idx;
}

static void intel_fixup_er(struct perf_event *event, int idx)
{
event->hw.extra_reg.idx = idx;

if (idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= 0x01b7;
event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
} else if (idx == EXTRA_REG_RSP_1) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= 0x01bb;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
}

if (event->hw.extra_reg.idx == orig_idx)
return false;

return true;
}

/*
Expand All @@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
struct event_constraint *c = &emptyconstraint;
struct er_account *era;
unsigned long flags;
int orig_idx = reg->idx;
int idx = reg->idx;

/* already allocated shared msr */
if (reg->alloc)
/*
* reg->alloc can be set due to existing state, so for fake cpuc we
* need to ignore this, otherwise we might fail to allocate proper fake
* state for this extra reg constraint. Also see the comment below.
*/
if (reg->alloc && !cpuc->is_fake)
return NULL; /* call x86_get_event_constraint() */

again:
era = &cpuc->shared_regs->regs[reg->idx];
era = &cpuc->shared_regs->regs[idx];
/*
* we use spin_lock_irqsave() to avoid lockdep issues when
* passing a fake cpuc
Expand All @@ -1173,24 +1183,47 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,

if (!atomic_read(&era->ref) || era->config == reg->config) {

/*
* If its a fake cpuc -- as per validate_{group,event}() we
* shouldn't touch event state and we can avoid doing so
* since both will only call get_event_constraints() once
* on each event, this avoids the need for reg->alloc.
*
* Not doing the ER fixup will only result in era->reg being
* wrong, but since we won't actually try and program hardware
* this isn't a problem either.
*/
if (!cpuc->is_fake) {
if (idx != reg->idx)
intel_fixup_er(event, idx);

/*
* x86_schedule_events() can call get_event_constraints()
* multiple times on events in the case of incremental
* scheduling(). reg->alloc ensures we only do the ER
* allocation once.
*/
reg->alloc = 1;
}

/* lock in msr value */
era->config = reg->config;
era->reg = reg->reg;

/* one more user */
atomic_inc(&era->ref);

/* no need to reallocate during incremental event scheduling */
reg->alloc = 1;

/*
* need to call x86_get_event_constraint()
* to check if associated event has constraints
*/
c = NULL;
} else if (intel_try_alt_er(event, orig_idx)) {
raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
} else {
idx = intel_alt_er(idx);
if (idx != reg->idx) {
raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
}
}
raw_spin_unlock_irqrestore(&era->lock, flags);

Expand All @@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
struct er_account *era;

/*
* only put constraint if extra reg was actually
* allocated. Also takes care of event which do
* not use an extra shared reg
* Only put constraint if extra reg was actually allocated. Also takes
* care of event which do not use an extra shared reg.
*
* Also, if this is a fake cpuc we shouldn't touch any event state
* (reg->alloc) and we don't care about leaving inconsistent cpuc state
* either since it'll be thrown out.
*/
if (!reg->alloc)
if (!reg->alloc || cpuc->is_fake)
return;

era = &cpuc->shared_regs->regs[reg->idx];
Expand Down

0 comments on commit 84f44aa

Please sign in to comment.