Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 182483
b: refs/heads/master
c: 38331f6
h: refs/heads/master
i:
  182481: f30730a
  182479: 5d15425
v: v3
  • Loading branch information
Stephane Eranian authored and Ingo Molnar committed Feb 26, 2010
1 parent b784497 commit 879b6aa
Show file tree
Hide file tree
Showing 3 changed files with 268 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d76a0812ac4139ceb54daab3cc70e1bd8bd9d43a
refs/heads/master: 38331f62c20456454eed9ebea2525f072c6f1d2e
265 changes: 262 additions & 3 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,13 @@ struct event_constraint {
int weight;
};

struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
struct perf_event *owners[X86_PMC_IDX_MAX];
struct event_constraint event_constraints[X86_PMC_IDX_MAX];
};

struct cpu_hw_events {
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Expand All @@ -92,6 +99,7 @@ struct cpu_hw_events {
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
struct amd_nb *amd_nb;
};

#define __EVENT_CONSTRAINT(c, n, m, w) {\
Expand Down Expand Up @@ -153,6 +161,8 @@ struct x86_pmu {

static struct x86_pmu x86_pmu __read_mostly;

static raw_spinlock_t amd_nb_lock;

static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
Expand Down Expand Up @@ -802,7 +812,7 @@ static u64 amd_pmu_event_map(int hw_event)

static u64 amd_pmu_raw_event(u64 hw_event)
{
#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Expand Down Expand Up @@ -2210,6 +2220,7 @@ perf_event_nmi_handler(struct notifier_block *self,
}

static struct event_constraint unconstrained;
static struct event_constraint emptyconstraint;

static struct event_constraint bts_constraint =
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
Expand Down Expand Up @@ -2249,10 +2260,146 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
return &unconstrained;
}

/*
* AMD64 events are detected based on their event codes.
*/
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
{
return (hwc->config & 0xe0) == 0xe0;
}

static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
int i;

/*
* only care about NB events
*/
if (!(nb && amd_is_nb_event(hwc)))
return;

/*
* need to scan whole list because event may not have
* been assigned during scheduling
*
* no race condition possible because event can only
* be removed on one CPU at a time AND PMU is disabled
* when we come here
*/
for (i = 0; i < x86_pmu.num_events; i++) {
if (nb->owners[i] == event) {
cmpxchg(nb->owners+i, event, NULL);
break;
}
}
}

/*
* AMD64 NorthBridge events need special treatment because
* counter access needs to be synchronized across all cores
* of a package. Refer to BKDG section 3.12
*
* NB events are events measuring L3 cache, Hypertransport
* traffic. They are identified by an event code >= 0xe00.
* They measure events on the NorthBride which is shared
* by all cores on a package. NB events are counted on a
* shared set of counters. When a NB event is programmed
* in a counter, the data actually comes from a shared
* counter. Thus, access to those counters needs to be
* synchronized.
*
* We implement the synchronization such that no two cores
* can be measuring NB events using the same counters. Thus,
* we maintain a per-NB allocation table. The available slot
* is propagated using the event_constraint structure.
*
* We provide only one choice for each NB event based on
* the fact that only NB events have restrictions. Consequently,
* if a counter is available, there is a guarantee the NB event
* will be assigned to it. If no slot is available, an empty
* constraint is returned and scheduling will eventually fail
* for this event.
*
* Note that all cores attached the same NB compete for the same
* counters to host NB events, this is why we use atomic ops. Some
* multi-chip CPUs may have more than one NB.
*
* Given that resources are allocated (cmpxchg), they must be
* eventually freed for others to use. This is accomplished by
* calling amd_put_event_constraints().
*
* Non NB events are not impacted by this restriction.
*/
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
{
return &unconstrained;
struct hw_perf_event *hwc = &event->hw;
struct amd_nb *nb = cpuc->amd_nb;
struct perf_event *old = NULL;
int max = x86_pmu.num_events;
int i, j, k = -1;

/*
* if not NB event or no NB, then no constraints
*/
if (!(nb && amd_is_nb_event(hwc)))
return &unconstrained;

/*
* detect if already present, if so reuse
*
* cannot merge with actual allocation
* because of possible holes
*
* event can already be present yet not assigned (in hwc->idx)
* because of successive calls to x86_schedule_events() from
* hw_perf_group_sched_in() without hw_perf_enable()
*/
for (i = 0; i < max; i++) {
/*
* keep track of first free slot
*/
if (k == -1 && !nb->owners[i])
k = i;

/* already present, reuse */
if (nb->owners[i] == event)
goto done;
}
/*
* not present, so grab a new slot
* starting either at:
*/
if (hwc->idx != -1) {
/* previous assignment */
i = hwc->idx;
} else if (k != -1) {
/* start from free slot found */
i = k;
} else {
/*
* event not found, no slot found in
* first pass, try again from the
* beginning
*/
i = 0;
}
j = i;
do {
old = cmpxchg(nb->owners+i, NULL, event);
if (!old)
break;
if (++i == max)
i = 0;
} while (i != j);
done:
if (!old)
return &nb->event_constraints[i];

return &emptyconstraint;
}

static int x86_event_sched_in(struct perf_event *event,
Expand Down Expand Up @@ -2465,7 +2612,8 @@ static __initconst struct x86_pmu amd_pmu = {
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
.get_event_constraints = amd_get_event_constraints
.get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints
};

static __init int p6_pmu_init(void)
Expand Down Expand Up @@ -2589,6 +2737,91 @@ static __init int intel_pmu_init(void)
return 0;
}

static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
{
struct amd_nb *nb;
int i;

nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
if (!nb)
return NULL;

memset(nb, 0, sizeof(*nb));
nb->nb_id = nb_id;

/*
* initialize all possible NB constraints
*/
for (i = 0; i < x86_pmu.num_events; i++) {
set_bit(i, nb->event_constraints[i].idxmsk);
nb->event_constraints[i].weight = 1;
}
return nb;
}

static void amd_pmu_cpu_online(int cpu)
{
struct cpu_hw_events *cpu1, *cpu2;
struct amd_nb *nb = NULL;
int i, nb_id;

if (boot_cpu_data.x86_max_cores < 2)
return;

/*
* function may be called too early in the
* boot process, in which case nb_id is bogus
*/
nb_id = amd_get_nb_id(cpu);
if (nb_id == BAD_APICID)
return;

cpu1 = &per_cpu(cpu_hw_events, cpu);
cpu1->amd_nb = NULL;

raw_spin_lock(&amd_nb_lock);

for_each_online_cpu(i) {
cpu2 = &per_cpu(cpu_hw_events, i);
nb = cpu2->amd_nb;
if (!nb)
continue;
if (nb->nb_id == nb_id)
goto found;
}

nb = amd_alloc_nb(cpu, nb_id);
if (!nb) {
pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
raw_spin_unlock(&amd_nb_lock);
return;
}
found:
nb->refcnt++;
cpu1->amd_nb = nb;

raw_spin_unlock(&amd_nb_lock);
}

static void amd_pmu_cpu_offline(int cpu)
{
struct cpu_hw_events *cpuhw;

if (boot_cpu_data.x86_max_cores < 2)
return;

cpuhw = &per_cpu(cpu_hw_events, cpu);

raw_spin_lock(&amd_nb_lock);

if (--cpuhw->amd_nb->refcnt == 0)
kfree(cpuhw->amd_nb);

cpuhw->amd_nb = NULL;

raw_spin_unlock(&amd_nb_lock);
}

static __init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
Expand All @@ -2601,6 +2834,11 @@ static __init int amd_pmu_init(void)
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
sizeof(hw_cache_event_ids));

/*
* explicitly initialize the boot cpu, other cpus will get
* the cpu hotplug callbacks from smp_init()
*/
amd_pmu_cpu_online(smp_processor_id());
return 0;
}

Expand Down Expand Up @@ -2934,4 +3172,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
void hw_perf_event_setup_online(int cpu)
{
init_debug_store_on_cpu(cpu);

switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
amd_pmu_cpu_online(cpu);
break;
default:
return;
}
}

void hw_perf_event_setup_offline(int cpu)
{
init_debug_store_on_cpu(cpu);

switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
amd_pmu_cpu_offline(cpu);
break;
default:
return;
}
}
5 changes: 5 additions & 0 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ void __weak hw_perf_enable(void) { barrier(); }

void __weak hw_perf_event_setup(int cpu) { barrier(); }
void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }

int __weak
hw_perf_group_sched_in(struct perf_event *group_leader,
Expand Down Expand Up @@ -5462,6 +5463,10 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
perf_event_exit_cpu(cpu);
break;

case CPU_DEAD:
hw_perf_event_setup_offline(cpu);
break;

default:
break;
}
Expand Down

0 comments on commit 879b6aa

Please sign in to comment.