Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 189429
b: refs/heads/master
c: b38b24e
h: refs/heads/master
i:
  189427: a501001
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Apr 2, 2010
1 parent e88c008 commit b85731d
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 37 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 85257024096a96fc5c00ce59d685f62bbed3ad95
refs/heads/master: b38b24ead33417146e051453d04bf60b8d2d7e25
8 changes: 5 additions & 3 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ struct x86_pmu {
struct perf_event *event);
struct event_constraint *event_constraints;

void (*cpu_prepare)(int cpu);
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
Expand Down Expand Up @@ -1333,11 +1333,12 @@ static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
x86_pmu.cpu_prepare(cpu);
ret = x86_pmu.cpu_prepare(cpu);
break;

case CPU_STARTING:
Expand All @@ -1350,6 +1351,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
x86_pmu.cpu_dying(cpu);
break;

case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
Expand All @@ -1359,7 +1361,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
}

return NOTIFY_OK;
return ret;
}

static void __init pmu_check_apic(void)
Expand Down
80 changes: 47 additions & 33 deletions trunk/arch/x86/kernel/cpu/perf_event_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0;
}

static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
struct amd_nb *nb = cpuc->amd_nb;

return nb && nb->nb_id != -1;
}

static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
Expand All @@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
/*
* only care about NB events
*/
if (!(nb && amd_is_nb_event(hwc)))
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return;

/*
Expand Down Expand Up @@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
/*
* if not NB event or no NB, then no constraints
*/
if (!(nb && amd_is_nb_event(hwc)))
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return &unconstrained;

/*
Expand Down Expand Up @@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
return nb;
}

static void amd_pmu_cpu_online(int cpu)
static int amd_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);

WARN_ON_ONCE(cpuc->amd_nb);

if (boot_cpu_data.x86_max_cores < 2)
return NOTIFY_OK;

cpuc->amd_nb = amd_alloc_nb(cpu, -1);
if (!cpuc->amd_nb)
return NOTIFY_BAD;

return NOTIFY_OK;
}

static void amd_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpu1, *cpu2;
struct amd_nb *nb = NULL;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct amd_nb *nb;
int i, nb_id;

if (boot_cpu_data.x86_max_cores < 2)
return;

/*
* function may be called too early in the
* boot process, in which case nb_id is bogus
*/
nb_id = amd_get_nb_id(cpu);
if (nb_id == BAD_APICID)
return;

cpu1 = &per_cpu(cpu_hw_events, cpu);
cpu1->amd_nb = NULL;
WARN_ON_ONCE(nb_id == BAD_APICID);

raw_spin_lock(&amd_nb_lock);

for_each_online_cpu(i) {
cpu2 = &per_cpu(cpu_hw_events, i);
nb = cpu2->amd_nb;
if (!nb)
nb = per_cpu(cpu_hw_events, i).amd_nb;
if (WARN_ON_ONCE(!nb))
continue;
if (nb->nb_id == nb_id)
goto found;
}

nb = amd_alloc_nb(cpu, nb_id);
if (!nb) {
pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
raw_spin_unlock(&amd_nb_lock);
return;
if (nb->nb_id == nb_id) {
kfree(cpuc->amd_nb);
cpuc->amd_nb = nb;
break;
}
}
found:
nb->refcnt++;
cpu1->amd_nb = nb;

cpuc->amd_nb->nb_id = nb_id;
cpuc->amd_nb->refcnt++;

raw_spin_unlock(&amd_nb_lock);
}

static void amd_pmu_cpu_offline(int cpu)
static void amd_pmu_cpu_dead(int cpu)
{
struct cpu_hw_events *cpuhw;

Expand All @@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
raw_spin_lock(&amd_nb_lock);

if (cpuhw->amd_nb) {
if (--cpuhw->amd_nb->refcnt == 0)
kfree(cpuhw->amd_nb);
struct amd_nb *nb = cpuhw->amd_nb;

if (nb->nb_id == -1 || --nb->refcnt == 0)
kfree(nb);

cpuhw->amd_nb = NULL;
}
Expand Down Expand Up @@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
.get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,

.cpu_prepare = amd_pmu_cpu_online,
.cpu_dead = amd_pmu_cpu_offline,
.cpu_prepare = amd_pmu_cpu_prepare,
.cpu_starting = amd_pmu_cpu_starting,
.cpu_dead = amd_pmu_cpu_dead,
};

static __init int amd_pmu_init(void)
Expand Down

0 comments on commit b85731d

Please sign in to comment.