Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 234424
b: refs/heads/master
c: 41bf498
h: refs/heads/master
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Feb 16, 2011
1 parent 82a7a1c commit 9a1ba1e
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d45dd923fcc620c948bd1eda16cc61426ac31646
refs/heads/master: 41bf498949a263fa0b2d32524b89d696ac330e94
36 changes: 23 additions & 13 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,16 @@ x86_perf_event_update(struct perf_event *event)
return new_raw_count;
}

static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + index;
}

static inline unsigned int x86_pmu_event_addr(int index)
{
return x86_pmu.perfctr + index;
}

static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);

Expand All @@ -331,26 +341,26 @@ static bool reserve_pmc_hardware(void)
int i;

for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}

for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}

return true;

eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
release_evntsel_nmi(x86_pmu_config_addr(i));

i = x86_pmu.num_counters;

perfctr_fail:
for (i--; i >= 0; i--)
release_perfctr_nmi(x86_pmu.perfctr + i);
release_perfctr_nmi(x86_pmu_event_addr(i));

return false;
}
Expand All @@ -360,8 +370,8 @@ static void release_pmc_hardware(void)
int i;

for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
}

Expand All @@ -382,7 +392,7 @@ static bool check_hw_exists(void)
* complain and bail.
*/
for (i = 0; i < x86_pmu.num_counters; i++) {
reg = x86_pmu.eventsel + i;
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
Expand All @@ -407,8 +417,8 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
ret = checking_wrmsrl(x86_pmu.perfctr, val);
ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
if (ret || val != val_new)
goto msr_fail;

Expand Down Expand Up @@ -617,11 +627,11 @@ static void x86_pmu_disable_all(void)

if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(x86_pmu.eventsel + idx, val);
rdmsrl(x86_pmu_config_addr(idx), val);
if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
wrmsrl(x86_pmu.eventsel + idx, val);
wrmsrl(x86_pmu_config_addr(idx), val);
}
}

Expand Down Expand Up @@ -1110,8 +1120,8 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);

prev_left = per_cpu(pmc_prev_left[idx], cpu);

Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -691,8 +691,8 @@ static void intel_pmu_reset(void)
printk("clearing PMU state on CPU#%d\n", smp_processor_id());

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
Expand Down

0 comments on commit 9a1ba1e

Please sign in to comment.