Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
049d919
Documentation
LICENSES
arch
block
certs
crypto
drivers
accessibility
acpi
amba
android
ata
atm
auxdisplay
base
bcma
block
bluetooth
bus
cdrom
char
clk
clocksource
connector
counter
cpufreq
cpuidle
crypto
dax
dca
devfreq
dio
dma-buf
dma
edac
eisa
extcon
firewire
firmware
fpga
fsi
gnss
gpio
gpu
greybus
hid
hsi
hv
hwmon
hwspinlock
hwtracing
i2c
i3c
ide
idle
iio
infiniband
input
interconnect
iommu
ipack
irqchip
isdn
leds
lightnvm
macintosh
mailbox
mcb
md
media
memory
memstick
message
mfd
misc
mmc
mtd
mux
net
nfc
ntb
nubus
nvdimm
nvme
nvmem
of
opp
oprofile
parisc
parport
pci
pcmcia
perf
hisilicon
Kconfig
Makefile
arm-cci.c
arm-ccn.c
arm_dsu_pmu.c
arm_pmu.c
arm_pmu_acpi.c
arm_pmu_platform.c
arm_smmuv3_pmu.c
arm_spe_pmu.c
fsl_imx8_ddr_perf.c
qcom_l2_pmu.c
qcom_l3_pmu.c
thunderx2_pmu.c
xgene_pmu.c
phy
pinctrl
platform
pnp
power
powercap
pps
ps3
ptp
pwm
rapidio
ras
regulator
remoteproc
reset
rpmsg
rtc
s390
sbus
scsi
sfi
sh
siox
slimbus
soc
soundwire
spi
spmi
ssb
staging
target
tc
tee
thermal
thunderbolt
tty
uio
usb
vfio
vhost
video
virt
virtio
visorbus
vlynq
vme
w1
watchdog
xen
zorro
Kconfig
Makefile
fs
include
init
ipc
kernel
lib
mm
net
samples
scripts
security
sound
tools
usr
virt
.clang-format
.cocciconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
drivers
/
perf
/
fsl_imx8_ddr_perf.c
Blame
Blame
Latest commit
History
History
715 lines (583 loc) · 18.2 KB
Breadcrumbs
linux
/
drivers
/
perf
/
fsl_imx8_ddr_perf.c
Top
File metadata and controls
Code
Blame
715 lines (583 loc) · 18.2 KB
Raw
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2017 NXP * Copyright 2016 Freescale Semiconductor, Inc. */ #include <linux/bitfield.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/perf_event.h> #include <linux/slab.h> #define COUNTER_CNTL 0x0 #define COUNTER_READ 0x20 #define COUNTER_DPCR1 0x30 #define CNTL_OVER 0x1 #define CNTL_CLEAR 0x2 #define CNTL_EN 0x4 #define CNTL_EN_MASK 0xFFFFFFFB #define CNTL_CLEAR_MASK 0xFFFFFFFD #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CSV_SHIFT 24 #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4 #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) #define DDR_PERF_DEV_NAME "imx8_ddr" #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" static DEFINE_IDA(ddr_ida); /* DDR Perf hardware feature */ #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ struct fsl_ddr_devtype_data { unsigned int quirks; /* quirks needed for different DDR Perf core */ }; static const struct fsl_ddr_devtype_data imx8_devtype_data; static const struct fsl_ddr_devtype_data imx8m_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER, }; static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, }; static const struct of_device_id imx_ddr_pmu_dt_ids[] = { { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); struct ddr_pmu { struct pmu pmu; void __iomem *base; unsigned int cpu; struct hlist_node node; struct device *dev; struct perf_event *events[NUM_COUNTERS]; int active_events; enum cpuhp_state cpuhp_state; const struct fsl_ddr_devtype_data *devtype_data; int irq; int id; }; enum ddr_perf_filter_capabilities { PERF_CAP_AXI_ID_FILTER = 0, PERF_CAP_AXI_ID_FILTER_ENHANCED, PERF_CAP_AXI_ID_FEAT_MAX, }; static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) { u32 quirks = pmu->devtype_data->quirks; switch (cap) { case PERF_CAP_AXI_ID_FILTER: return !!(quirks & DDR_CAP_AXI_ID_FILTER); case PERF_CAP_AXI_ID_FILTER_ENHANCED: quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; default: WARN(1, "unknown filter cap %d\n", cap); } return 0; } static ssize_t ddr_perf_filter_cap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ddr_pmu *pmu = dev_get_drvdata(dev); struct dev_ext_attribute *ea = container_of(attr, struct dev_ext_attribute, attr); int cap = (long)ea->var; return snprintf(buf, PAGE_SIZE, "%u\n", ddr_perf_filter_cap_get(pmu, cap)); } #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ (&((struct dev_ext_attribute) { \ __ATTR(_name, 0444, _func, NULL), (void *)_var \ }).attr.attr) #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) static struct attribute *ddr_perf_filter_cap_attr[] = { PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), NULL, }; static struct attribute_group ddr_perf_filter_cap_attr_group = { .name = "caps", .attrs = ddr_perf_filter_cap_attr, }; static ssize_t ddr_perf_cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ddr_pmu *pmu = dev_get_drvdata(dev); return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); } static struct device_attribute ddr_perf_cpumask_attr = __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); static struct attribute *ddr_perf_cpumask_attrs[] = { &ddr_perf_cpumask_attr.attr, NULL, }; static struct attribute_group ddr_perf_cpumask_attr_group = { .attrs = ddr_perf_cpumask_attrs, }; static ssize_t ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, char *page) { struct perf_pmu_events_attr *pmu_attr; pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); return sprintf(page, "event=0x%02llx\n", pmu_attr->id); } #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ (&((struct perf_pmu_events_attr[]) { \ { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ .id = _id, } \ })[0].attr.attr) static struct attribute *ddr_perf_events_attrs[] = { IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), NULL, }; static struct attribute_group ddr_perf_events_attr_group = { .name = "events", .attrs = ddr_perf_events_attrs, }; PMU_FORMAT_ATTR(event, "config:0-7"); PMU_FORMAT_ATTR(axi_id, "config1:0-15"); PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); static struct attribute *ddr_perf_format_attrs[] = { &format_attr_event.attr, &format_attr_axi_id.attr, &format_attr_axi_mask.attr, NULL, }; static struct attribute_group ddr_perf_format_attr_group = { .name = "format", .attrs = ddr_perf_format_attrs, }; static const struct attribute_group *attr_groups[] = { &ddr_perf_events_attr_group, &ddr_perf_format_attr_group, &ddr_perf_cpumask_attr_group, &ddr_perf_filter_cap_attr_group, NULL, }; static bool ddr_perf_is_filtered(struct perf_event *event) { return event->attr.config == 0x41 || event->attr.config == 0x42; } static u32 ddr_perf_filter_val(struct perf_event *event) { return event->attr.config1; } static bool ddr_perf_filters_compatible(struct perf_event *a, struct perf_event *b) { if (!ddr_perf_is_filtered(a)) return true; if (!ddr_perf_is_filtered(b)) return true; return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); } static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) { unsigned int filt; struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && ddr_perf_is_filtered(event); } static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) { int i; /* * Always map cycle event to counter 0 * Cycles counter is dedicated for cycle event * can't used for the other events */ if (event == EVENT_CYCLES_ID) { if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) return EVENT_CYCLES_COUNTER; else return -ENOENT; } for (i = 1; i < NUM_COUNTERS; i++) { if (pmu->events[i] == NULL) return i; } return -ENOENT; } static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) { pmu->events[counter] = NULL; } static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) { struct perf_event *event = pmu->events[counter]; void __iomem *base = pmu->base; /* * return bytes instead of bursts from ddr transaction for * axid-read and axid-write event if PMU core supports enhanced * filter. */ base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : COUNTER_READ; return readl_relaxed(base + counter * 4); } static int ddr_perf_event_init(struct perf_event *event) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct perf_event *sibling; if (event->attr.type != event->pmu->type) return -ENOENT; if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; if (event->cpu < 0) { dev_warn(pmu->dev, "Can't provide per-task data!\n"); return -EOPNOTSUPP; } /* * We must NOT create groups containing mixed PMUs, although software * events are acceptable (for example to create a CCN group * periodically read when a hrtimer aka cpu-clock leader triggers). */ if (event->group_leader->pmu != event->pmu && !is_software_event(event->group_leader)) return -EINVAL; if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { if (!ddr_perf_filters_compatible(event, event->group_leader)) return -EINVAL; for_each_sibling_event(sibling, event->group_leader) { if (!ddr_perf_filters_compatible(event, sibling)) return -EINVAL; } } for_each_sibling_event(sibling, event->group_leader) { if (sibling->pmu != event->pmu && !is_software_event(sibling)) return -EINVAL; } event->cpu = pmu->cpu; hwc->idx = -1; return 0; } static void ddr_perf_event_update(struct perf_event *event) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; int counter = hwc->idx; do { prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = ddr_perf_read_counter(pmu, counter); } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count); delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; local64_add(delta, &event->count); } static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, int counter, bool enable) { u8 reg = counter * 4 + COUNTER_CNTL; int val; if (enable) { /* * cycle counter is special which should firstly write 0 then * write 1 into CLEAR bit to clear it. Other counters only * need write 0 into CLEAR bit and it turns out to be 1 by * hardware. Below enable flow is harmless for all counters. */ writel(0, pmu->base + reg); val = CNTL_EN | CNTL_CLEAR; val |= FIELD_PREP(CNTL_CSV_MASK, config); writel(val, pmu->base + reg); } else { /* Disable counter */ val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; writel(val, pmu->base + reg); } } static void ddr_perf_event_start(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; local64_set(&hwc->prev_count, 0); ddr_perf_counter_enable(pmu, event->attr.config, counter, true); hwc->state = 0; } static int ddr_perf_event_add(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter; int cfg = event->attr.config; int cfg1 = event->attr.config1; if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { int i; for (i = 1; i < NUM_COUNTERS; i++) { if (pmu->events[i] && !ddr_perf_filters_compatible(event, pmu->events[i])) return -EINVAL; } if (ddr_perf_is_filtered(event)) { /* revert axi id masking(axi_mask) value */ cfg1 ^= AXI_MASKING_REVERT; writel(cfg1, pmu->base + COUNTER_DPCR1); } } counter = ddr_perf_alloc_counter(pmu, cfg); if (counter < 0) { dev_dbg(pmu->dev, "There are not enough counters\n"); return -EOPNOTSUPP; } pmu->events[counter] = event; pmu->active_events++; hwc->idx = counter; hwc->state |= PERF_HES_STOPPED; if (flags & PERF_EF_START) ddr_perf_event_start(event, flags); return 0; } static void ddr_perf_event_stop(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; ddr_perf_counter_enable(pmu, event->attr.config, counter, false); ddr_perf_event_update(event); hwc->state |= PERF_HES_STOPPED; } static void ddr_perf_event_del(struct perf_event *event, int flags) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; ddr_perf_event_stop(event, PERF_EF_UPDATE); ddr_perf_free_counter(pmu, counter); pmu->active_events--; hwc->idx = -1; } static void ddr_perf_pmu_enable(struct pmu *pmu) { struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); /* enable cycle counter if cycle is not active event list */ if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) ddr_perf_counter_enable(ddr_pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, true); } static void ddr_perf_pmu_disable(struct pmu *pmu) { struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) ddr_perf_counter_enable(ddr_pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, false); } static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, struct device *dev) { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, .event_init = ddr_perf_event_init, .add = ddr_perf_event_add, .del = ddr_perf_event_del, .start = ddr_perf_event_start, .stop = ddr_perf_event_stop, .read = ddr_perf_event_update, .pmu_enable = ddr_perf_pmu_enable, .pmu_disable = ddr_perf_pmu_disable, }, .base = base, .dev = dev, }; pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); return pmu->id; } static irqreturn_t ddr_perf_irq_handler(int irq, void *p) { int i; struct ddr_pmu *pmu = (struct ddr_pmu *) p; struct perf_event *event, *cycle_event = NULL; /* all counter will stop if cycle counter disabled */ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, false); /* * When the cycle counter overflows, all counters are stopped, * and an IRQ is raised. If any other counter overflows, it * continues counting, and no IRQ is raised. * * Cycles occur at least 4 times as often as other events, so we * can update all events on a cycle counter overflow and not * lose events. * */ for (i = 0; i < NUM_COUNTERS; i++) { if (!pmu->events[i]) continue; event = pmu->events[i]; ddr_perf_event_update(event); if (event->hw.idx == EVENT_CYCLES_COUNTER) cycle_event = event; } ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, EVENT_CYCLES_COUNTER, true); if (cycle_event) ddr_perf_event_update(cycle_event); return IRQ_HANDLED; } static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) { struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); int target; if (cpu != pmu->cpu) return 0; target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) return 0; perf_pmu_migrate_context(&pmu->pmu, cpu, target); pmu->cpu = target; WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); return 0; } static int ddr_perf_probe(struct platform_device *pdev) { struct ddr_pmu *pmu; struct device_node *np; void __iomem *base; char *name; int num; int ret; int irq; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); np = pdev->dev.of_node; pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); if (!pmu) return -ENOMEM; num = ddr_perf_init(pmu, base, &pdev->dev); platform_set_drvdata(pdev, pmu); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num); if (!name) return -ENOMEM; pmu->devtype_data = of_device_get_match_data(&pdev->dev); pmu->cpu = raw_smp_processor_id(); ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME, NULL, ddr_perf_offline_cpu); if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); if (ret) { dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); goto cpuhp_instance_err; } /* Request irq */ irq = of_irq_get(np, 0); if (irq < 0) { dev_err(&pdev->dev, "Failed to get irq: %d", irq); ret = irq; goto ddr_perf_err; } ret = devm_request_irq(&pdev->dev, irq, ddr_perf_irq_handler, IRQF_NOBALANCING | IRQF_NO_THREAD, DDR_CPUHP_CB_NAME, pmu); if (ret < 0) { dev_err(&pdev->dev, "Request irq failed: %d", ret); goto ddr_perf_err; } pmu->irq = irq; ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); if (ret) { dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); goto ddr_perf_err; } ret = perf_pmu_register(&pmu->pmu, name, -1); if (ret) goto ddr_perf_err; return 0; ddr_perf_err: cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); cpuhp_instance_err: cpuhp_remove_multi_state(pmu->cpuhp_state); cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; } static int ddr_perf_remove(struct platform_device *pdev) { struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); ida_simple_remove(&ddr_ida, pmu->id); return 0; } static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, }; module_platform_driver(imx_ddr_pmu_driver); MODULE_LICENSE("GPL v2");
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
You can’t perform that action at this time.