Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 312293
b: refs/heads/master
c: 90574eb
h: refs/heads/master
i:
  312291: 9810f3b
v: v3
  • Loading branch information
Ingo Molnar committed Jul 5, 2012
1 parent b740f1a commit 76f014f
Show file tree
Hide file tree
Showing 79 changed files with 4,531 additions and 1,071 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ce5c1fe9a9e059b5c58f0a7e2a3e687d0efac815
refs/heads/master: 90574ebb7e6e0f7f74636ee87315890ba88d6a4a
2 changes: 2 additions & 0 deletions trunk/arch/x86/include/asm/msr.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,8 @@ do { \
(high) = (u32)(_l >> 32); \
} while (0)

#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))

#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,8 @@ do { \
high = _l >> 32; \
} while (0)

#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))

static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/include/asm/uprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct arch_uprobe_task {
#endif
};

extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
Expand Down
4 changes: 3 additions & 1 deletion trunk/arch/x86/kernel/cpu/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o

ifdef CONFIG_PERF_EVENTS
obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
endif

obj-$(CONFIG_X86_MCE) += mcheck/
Expand Down
28 changes: 12 additions & 16 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,6 @@

#include "perf_event.h"

#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
do { \
trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
(unsigned long)(val)); \
native_write_msr((msr), (u32)((u64)(val)), \
(u32)((u64)(val) >> 32)); \
} while (0)
#endif

struct x86_pmu x86_pmu __read_mostly;

DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Expand Down Expand Up @@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
rdmsrl(hwc->event_base, new_raw_count);
rdpmcl(hwc->event_base_rdpmc, new_raw_count);

if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
Expand Down Expand Up @@ -637,7 +626,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
c = sched->constraints[sched->state.event];

/* Prefer fixed purpose counters */
if (x86_pmu.num_counters_fixed) {
if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
idx = X86_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
if (!__test_and_set_bit(idx, sched->state.used))
Expand Down Expand Up @@ -704,8 +693,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
/*
* Assign a counter for each event.
*/
static int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int *assign)
int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int *assign)
{
struct perf_sched sched;

Expand Down Expand Up @@ -830,9 +819,11 @@ static inline void x86_assign_hw_event(struct perf_event *event,
} else if (hwc->idx >= X86_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
hwc->event_base_rdpmc = hwc->idx;
}
}

Expand Down Expand Up @@ -1649,7 +1640,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val = simple_strtoul(buf, NULL, 0);
unsigned long val;
ssize_t ret;

ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;

if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
Expand Down
17 changes: 16 additions & 1 deletion trunk/arch/x86/kernel/cpu/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,18 @@

#include <linux/perf_event.h>

#if 0
#undef wrmsrl
#define wrmsrl(msr, val) \
do { \
unsigned int _msr = (msr); \
u64 _val = (val); \
trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
(unsigned long long)(_val)); \
native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
} while (0)
#endif

/*
* | NHM/WSM | SNB |
* register -------------------------------
Expand Down Expand Up @@ -57,7 +69,7 @@ struct amd_nb {
};

/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 4
#define MAX_PEBS_EVENTS 8

/*
* A debug store configuration.
Expand Down Expand Up @@ -366,6 +378,7 @@ struct x86_pmu {
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
int max_pebs_events;

/*
* Intel LBR
Expand Down Expand Up @@ -468,6 +481,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,

void x86_pmu_enable_all(int added);

int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int *assign);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);

void x86_pmu_stop(struct perf_event *event, int flags);
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1800,6 +1800,8 @@ __init int intel_pmu_init(void)
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;

x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);

/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > 1);
WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
at += n - 1;

__intel_pmu_pebs_event(event, iregs, at);
Expand Down Expand Up @@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);

for ( ; at < top; at++) {
for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
Expand All @@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
break;
}

if (!event || bit >= MAX_PEBS_EVENTS)
if (!event || bit >= x86_pmu.max_pebs_events)
continue;

__intel_pmu_pebs_event(event, iregs, at);
Expand Down
Loading

0 comments on commit 76f014f

Please sign in to comment.