Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 191155
b: refs/heads/master
c: 948b1bb
h: refs/heads/master
i:
  191153: 5783cbe
  191151: 431e919
v: v3
  • Loading branch information
Robert Richter authored and Ingo Molnar committed Apr 2, 2010
1 parent dadc280 commit 43a4a29
Show file tree
Hide file tree
Showing 8 changed files with 66 additions and 66 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ec5e61aabeac58670691bd0613388d16697d0d81
refs/heads/master: 948b1bb89a44561560531394c18da4a99215f772
4 changes: 2 additions & 2 deletions trunk/arch/x86/include/asm/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
union cpuid10_eax {
struct {
unsigned int version_id:8;
unsigned int num_events:8;
unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
Expand All @@ -76,7 +76,7 @@ union cpuid10_eax {

union cpuid10_edx {
struct {
unsigned int num_events_fixed:4;
unsigned int num_counters_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
Expand Down
74 changes: 37 additions & 37 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,10 +195,10 @@ struct x86_pmu {
u64 (*event_map)(int);
u64 (*raw_event)(u64);
int max_events;
int num_events;
int num_events_fixed;
int event_bits;
u64 event_mask;
int num_counters;
int num_counters_fixed;
int cntval_bits;
u64 cntval_mask;
int apic;
u64 max_period;
struct event_constraint *
Expand Down Expand Up @@ -268,7 +268,7 @@ static u64
x86_perf_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int shift = 64 - x86_pmu.event_bits;
int shift = 64 - x86_pmu.cntval_bits;
u64 prev_raw_count, new_raw_count;
int idx = hwc->idx;
s64 delta;
Expand Down Expand Up @@ -320,12 +320,12 @@ static bool reserve_pmc_hardware(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();

for (i = 0; i < x86_pmu.num_events; i++) {
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}

for (i = 0; i < x86_pmu.num_events; i++) {
for (i = 0; i < x86_pmu.num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
Expand All @@ -336,7 +336,7 @@ static bool reserve_pmc_hardware(void)
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);

i = x86_pmu.num_events;
i = x86_pmu.num_counters;

perfctr_fail:
for (i--; i >= 0; i--)
Expand All @@ -352,7 +352,7 @@ static void release_pmc_hardware(void)
{
int i;

for (i = 0; i < x86_pmu.num_events; i++) {
for (i = 0; i < x86_pmu.num_counters; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
}
Expand Down Expand Up @@ -547,7 +547,7 @@ static void x86_pmu_disable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;

if (!test_bit(idx, cpuc->active_mask))
Expand Down Expand Up @@ -582,7 +582,7 @@ static void x86_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
u64 val;

Expand Down Expand Up @@ -657,14 +657,14 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* assign events to counters starting with most
* constrained events.
*/
wmax = x86_pmu.num_events;
wmax = x86_pmu.num_counters;

/*
* when fixed event counters are present,
* wmax is incremented by 1 to account
* for one more choice
*/
if (x86_pmu.num_events_fixed)
if (x86_pmu.num_counters_fixed)
wmax++;

for (w = 1, num = n; num && w <= wmax; w++) {
Expand Down Expand Up @@ -714,7 +714,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
struct perf_event *event;
int n, max_count;

max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;

/* current number of events already accepted */
n = cpuc->n_events;
Expand Down Expand Up @@ -904,7 +904,7 @@ x86_perf_event_set_period(struct perf_event *event)
atomic64_set(&hwc->prev_count, (u64)-left);

wrmsrl(hwc->event_base + idx,
(u64)(-left) & x86_pmu.event_mask);
(u64)(-left) & x86_pmu.cntval_mask);

perf_event_update_userpage(event);

Expand Down Expand Up @@ -987,7 +987,7 @@ void perf_event_print_debug(void)
unsigned long flags;
int cpu, idx;

if (!x86_pmu.num_events)
if (!x86_pmu.num_counters)
return;

local_irq_save(flags);
Expand All @@ -1011,7 +1011,7 @@ void perf_event_print_debug(void)
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);

Expand All @@ -1024,7 +1024,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Expand Down Expand Up @@ -1089,15 +1089,15 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)

cpuc = &__get_cpu_var(cpu_hw_events);

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;

event = cpuc->events[idx];
hwc = &event->hw;

val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.event_bits - 1)))
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;

/*
Expand Down Expand Up @@ -1401,46 +1401,46 @@ void __init init_hw_perf_events(void)
if (x86_pmu.quirks)
x86_pmu.quirks();

if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_events, X86_PMC_MAX_GENERIC);
x86_pmu.num_events = X86_PMC_MAX_GENERIC;
x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1;
perf_max_events = x86_pmu.num_events;
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
perf_max_events = x86_pmu.num_counters;

if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
}

x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;

perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);

unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
0, x86_pmu.num_events);
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters);

if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != INTEL_ARCH_FIXED_MASK)
continue;

c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
c->weight += x86_pmu.num_events;
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}

pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_events);
pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);

perf_cpu_notifier(x86_pmu_notifier);
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/x86/kernel/cpu/perf_event_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_events; i++) {
for (i = 0; i < x86_pmu.num_counters; i++) {
if (nb->owners[i] == event) {
cmpxchg(nb->owners+i, event, NULL);
break;
Expand Down Expand Up @@ -215,7 +215,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
struct perf_event *old = NULL;
int max = x86_pmu.num_events;
int max = x86_pmu.num_counters;
int i, j, k = -1;

/*
Expand Down Expand Up @@ -293,7 +293,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_events; i++) {
for (i = 0; i < x86_pmu.num_counters; i++) {
__set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
Expand Down Expand Up @@ -385,9 +385,9 @@ static __initconst struct x86_pmu amd_pmu = {
.event_map = amd_pmu_event_map,
.raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
.num_events = 4,
.event_bits = 48,
.event_mask = (1ULL << 48) - 1,
.num_counters = 4,
.cntval_bits = 48,
.cntval_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
Expand Down
16 changes: 8 additions & 8 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -653,20 +653,20 @@ static void intel_pmu_reset(void)
unsigned long flags;
int idx;

if (!x86_pmu.num_events)
if (!x86_pmu.num_counters)
return;

local_irq_save(flags);

printk("clearing PMU state on CPU#%d\n", smp_processor_id());

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
}

if (ds)
ds->bts_index = ds->bts_buffer_base;

Expand Down Expand Up @@ -901,16 +901,16 @@ static __init int intel_pmu_init(void)
x86_pmu = intel_pmu;

x86_pmu.version = version;
x86_pmu.num_events = eax.split.num_events;
x86_pmu.event_bits = eax.split.bit_width;
x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
x86_pmu.num_counters = eax.split.num_counters;
x86_pmu.cntval_bits = eax.split.bit_width;
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;

/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
*/
if (version > 1)
x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);

/*
* v2 and above have a perf capabilities MSR
Expand Down
14 changes: 7 additions & 7 deletions trunk/arch/x86/kernel/cpu/perf_event_p4.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ static void p4_pmu_disable_all(void)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
Expand Down Expand Up @@ -540,7 +540,7 @@ static void p4_pmu_enable_all(int added)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
Expand All @@ -562,7 +562,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)

cpuc = &__get_cpu_var(cpu_hw_events);

for (idx = 0; idx < x86_pmu.num_events; idx++) {
for (idx = 0; idx < x86_pmu.num_counters; idx++) {

if (!test_bit(idx, cpuc->active_mask))
continue;
Expand All @@ -579,7 +579,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
p4_pmu_clear_cccr_ovf(hwc);

val = x86_perf_event_update(event);
if (val & (1ULL << (x86_pmu.event_bits - 1)))
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
continue;

/*
Expand Down Expand Up @@ -794,10 +794,10 @@ static __initconst struct x86_pmu p4_pmu = {
* though leave it restricted at moment assuming
* HT is on
*/
.num_events = ARCH_P4_MAX_CCCR,
.num_counters = ARCH_P4_MAX_CCCR,
.apic = 1,
.event_bits = 40,
.event_mask = (1ULL << 40) - 1,
.cntval_bits = 40,
.cntval_mask = (1ULL << 40) - 1,
.max_period = (1ULL << 39) - 1,
.hw_config = p4_hw_config,
.schedule_events = p4_pmu_schedule_events,
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/x86/kernel/cpu/perf_event_p6.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,16 +119,16 @@ static __initconst struct x86_pmu p6_pmu = {
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
.num_events = 2,
.num_counters = 2,
/*
* Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
* effective width of a event for P6-like PMU is 32 bits only.
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
.event_bits = 32,
.event_mask = (1ULL << 32) - 1,
.cntval_bits = 32,
.cntval_mask = (1ULL << 32) - 1,
.get_event_constraints = x86_get_event_constraints,
.event_constraints = p6_event_constraints,
};
Expand Down
Loading

0 comments on commit 43a4a29

Please sign in to comment.