Skip to content

Commit

Permalink
perf/x86/intel/uncore: Convert to hotplug state machine
Browse files Browse the repository at this point in the history
Convert the notifiers to state machine states and let the core code do the
setup for the already online CPUs. This notifier has a completely undocumented
ordering requirement versus perf hardcoded in the notifier priority. This
odering is only required for CPU down, so that hardware migration happens
before the core is notified about the outgoing CPU.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153333.752695801@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Jul 14, 2016
1 parent 95ca792 commit 1a246b9
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 88 deletions.
133 changes: 45 additions & 88 deletions arch/x86/events/intel/uncore.c
Original file line number Diff line number Diff line change
Expand Up @@ -1034,7 +1034,7 @@ static void uncore_pci_exit(void)
}
}

static void uncore_cpu_dying(int cpu)
static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
Expand All @@ -1051,16 +1051,19 @@ static void uncore_cpu_dying(int cpu)
uncore_box_exit(box);
}
}
return 0;
}

static void uncore_cpu_starting(int cpu, bool init)
static int first_init;

static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, ncpus = 1;

if (init) {
if (first_init) {
/*
* On init we get the number of online cpus in the package
* and set refcount for all of them.
Expand All @@ -1081,9 +1084,11 @@ static void uncore_cpu_starting(int cpu, bool init)
uncore_box_init(box);
}
}

return 0;
}

static int uncore_cpu_prepare(int cpu)
static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
Expand Down Expand Up @@ -1146,13 +1151,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
}

static void uncore_event_exit_cpu(int cpu)
static int uncore_event_cpu_offline(unsigned int cpu)
{
int target;

/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return;
return 0;

/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
Expand All @@ -1165,9 +1170,10 @@ static void uncore_event_exit_cpu(int cpu)

uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
return 0;
}

static void uncore_event_init_cpu(int cpu)
static int uncore_event_cpu_online(unsigned int cpu)
{
int target;

Expand All @@ -1177,50 +1183,15 @@ static void uncore_event_init_cpu(int cpu)
*/
target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
if (target < nr_cpu_ids)
return;
return 0;

cpumask_set_cpu(cpu, &uncore_cpu_mask);

uncore_change_context(uncore_msr_uncores, -1, cpu);
uncore_change_context(uncore_pci_uncores, -1, cpu);
return 0;
}

static int uncore_cpu_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
return notifier_from_errno(uncore_cpu_prepare(cpu));

case CPU_STARTING:
uncore_cpu_starting(cpu, false);
case CPU_DOWN_FAILED:
uncore_event_init_cpu(cpu);
break;

case CPU_UP_CANCELED:
case CPU_DYING:
uncore_cpu_dying(cpu);
break;

case CPU_DOWN_PREPARE:
uncore_event_exit_cpu(cpu);
break;
}
return NOTIFY_OK;
}

static struct notifier_block uncore_cpu_nb = {
.notifier_call = uncore_cpu_notifier,
/*
* to migrate uncore events, our notifier should be executed
* before perf core's notifier.
*/
.priority = CPU_PRI_PERF + 1,
};

static int __init type_pmu_register(struct intel_uncore_type *type)
{
int i, ret;
Expand Down Expand Up @@ -1264,41 +1235,6 @@ static int __init uncore_cpu_init(void)
return ret;
}

static void __init uncore_cpu_setup(void *dummy)
{
uncore_cpu_starting(smp_processor_id(), true);
}

/* Lazy to avoid allocation of a few bytes for the normal case */
static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);

static int __init uncore_cpumask_init(bool msr)
{
unsigned int cpu;

for_each_online_cpu(cpu) {
unsigned int pkg = topology_logical_package_id(cpu);
int ret;

if (test_and_set_bit(pkg, packages))
continue;
/*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required.
*/
if (msr) {
ret = uncore_cpu_prepare(cpu);
if (ret)
return ret;
}
uncore_event_init_cpu(cpu);
smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
}
__register_cpu_notifier(&uncore_cpu_nb);
return 0;
}

#define X86_UNCORE_MODEL_MATCH(model, init) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }

Expand Down Expand Up @@ -1420,29 +1356,50 @@ static int __init intel_uncore_init(void)
if (cret && pret)
return -ENODEV;

cpu_notifier_register_begin();
ret = uncore_cpumask_init(!cret);
if (ret)
goto err;
cpu_notifier_register_done();
/*
* Install callbacks. Core will call them for each online cpu.
*
* The first online cpu of each package allocates and takes
* the refcounts for all other online cpus in that package.
* If msrs are not enabled no allocation is required and
* uncore_cpu_prepare() is not called for each online cpu.
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"PERF_X86_UNCORE_PREP",
uncore_cpu_prepare, NULL);
}
first_init = 1;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"AP_PERF_X86_UNCORE_STARTING",
uncore_cpu_starting, uncore_cpu_dying);
first_init = 0;
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"AP_PERF_X86_UNCORE_ONLINE",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0;

err:
/* Undo box->init_box() */
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
cpu_notifier_register_done();
return ret;
}
module_init(intel_uncore_init);

static void __exit intel_uncore_exit(void)
{
cpu_notifier_register_begin();
__unregister_cpu_notifier(&uncore_cpu_nb);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
cpu_notifier_register_done();
}
module_exit(intel_uncore_exit);
3 changes: 3 additions & 0 deletions include/linux/cpuhotplug.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
Expand All @@ -18,6 +19,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
Expand All @@ -27,6 +29,7 @@ enum cpuhp_state {
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
Expand Down

0 comments on commit 1a246b9

Please sign in to comment.