From 9529279897af15e9eab4f038b8381a0987f21747 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 15 Apr 2011 11:14:38 +0100 Subject: [PATCH] --- yaml --- r: 270114 b: refs/heads/master c: a6c93afed38c242ccf4ec5bcb5ff26ff2521cf36 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/arm/include/asm/hw_breakpoint.h | 2 - trunk/arch/arm/kernel/hw_breakpoint.c | 270 ++++++++------------- trunk/arch/arm/kernel/perf_event.c | 2 +- trunk/arch/arm/kernel/perf_event_v6.c | 12 +- trunk/arch/arm/kernel/perf_event_v7.c | 16 +- trunk/arch/arm/kernel/perf_event_xscale.c | 12 +- 7 files changed, 127 insertions(+), 189 deletions(-) diff --git a/[refs] b/[refs] index 28281458c1ce..1f746e5a8094 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d12443363e590461655d4e9ccc31e40ad9078283 +refs/heads/master: a6c93afed38c242ccf4ec5bcb5ff26ff2521cf36 diff --git a/trunk/arch/arm/include/asm/hw_breakpoint.h b/trunk/arch/arm/include/asm/hw_breakpoint.h index c190bc992f0e..f389b2704d82 100644 --- a/trunk/arch/arm/include/asm/hw_breakpoint.h +++ b/trunk/arch/arm/include/asm/hw_breakpoint.h @@ -50,7 +50,6 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_DEBUG_ARCH_V6_1 2 #define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_MM 4 -#define ARM_DEBUG_ARCH_V7_1 5 /* Breakpoint */ #define ARM_BREAKPOINT_EXECUTE 0 @@ -58,7 +57,6 @@ static inline void decode_ctrl_reg(u32 reg, /* Watchpoints */ #define ARM_BREAKPOINT_LOAD 1 #define ARM_BREAKPOINT_STORE 2 -#define ARM_FSR_ACCESS_MASK (1 << 11) /* Privilege Levels */ #define ARM_BREAKPOINT_PRIV 1 diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c index 5a46225f007e..a927ca1f5566 100644 --- a/trunk/arch/arm/kernel/hw_breakpoint.c +++ b/trunk/arch/arm/kernel/hw_breakpoint.c @@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); /* Number of BRP/WRP registers on this CPU. */ static int core_num_brps; +static int core_num_reserved_brps; static int core_num_wrps; /* Debug architecture version. */ @@ -136,11 +137,10 @@ static u8 get_debug_arch(void) u32 didr; /* Do we implement the extended CPUID interface? */ - if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { - pr_warning("CPUID feature registers not supported. " - "Assuming v6 debug is present.\n"); + if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), + "CPUID feature registers not supported. " + "Assuming v6 debug is present.\n")) return ARM_DEBUG_ARCH_V6; - } ARM_DBG_READ(c0, 0, didr); return (didr >> 16) & 0xf; @@ -154,21 +154,10 @@ u8 arch_get_debug_arch(void) static int debug_arch_supported(void) { u8 arch = get_debug_arch(); - - /* We don't support the memory-mapped interface. */ - return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || - arch >= ARM_DEBUG_ARCH_V7_1; -} - -/* Determine number of WRP registers available. */ -static int get_num_wrp_resources(void) -{ - u32 didr; - ARM_DBG_READ(c0, 0, didr); - return ((didr >> 28) & 0xf) + 1; + return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; } -/* Determine number of BRP registers available. */ +/* Determine number of BRP register available. */ static int get_num_brp_resources(void) { u32 didr; @@ -187,10 +176,9 @@ static int core_has_mismatch_brps(void) static int get_num_wrps(void) { /* - * On debug architectures prior to 7.1, when a watchpoint fires, the - * only way to work out which watchpoint it was is by disassembling - * the faulting instruction and working out the address of the memory - * access. + * FIXME: When a watchpoint fires, the only way to work out which + * watchpoint it was is by disassembling the faulting instruction + * and working out the address of the memory access. * * Furthermore, we can only do this if the watchpoint was precise * since imprecise watchpoints prevent us from calculating register @@ -204,17 +192,36 @@ static int get_num_wrps(void) * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows * that it is set on some implementations]. */ - if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) - return 1; - return get_num_wrp_resources(); +#if 0 + int wrps; + u32 didr; + ARM_DBG_READ(c0, 0, didr); + wrps = ((didr >> 28) & 0xf) + 1; +#endif + int wrps = 1; + + if (core_has_mismatch_brps() && wrps >= get_num_brp_resources()) + wrps = get_num_brp_resources() - 1; + + return wrps; +} + +/* We reserve one breakpoint for each watchpoint. */ +static int get_num_reserved_brps(void) +{ + if (core_has_mismatch_brps()) + return get_num_wrps(); + return 0; } /* Determine number of usable BRPs available. */ static int get_num_brps(void) { int brps = get_num_brp_resources(); - return core_has_mismatch_brps() ? brps - 1 : brps; + if (core_has_mismatch_brps()) + brps -= get_num_reserved_brps(); + return brps; } /* @@ -232,7 +239,7 @@ static int enable_monitor_mode(void) /* Ensure that halting mode is disabled. */ if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, - "halting debug mode enabled. Unable to access hardware resources.\n")) { + "halting debug mode enabled. Unable to access hardware resources.\n")) { ret = -EPERM; goto out; } @@ -248,7 +255,6 @@ static int enable_monitor_mode(void) ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); break; case ARM_DEBUG_ARCH_V7_ECP14: - case ARM_DEBUG_ARCH_V7_1: ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); break; default: @@ -340,10 +346,24 @@ int arch_install_hw_breakpoint(struct perf_event *bp) val_base = ARM_BASE_BVR; slots = (struct perf_event **)__get_cpu_var(bp_on_reg); max_slots = core_num_brps; + if (info->step_ctrl.enabled) { + /* Override the breakpoint data with the step data. */ + addr = info->trigger & ~0x3; + ctrl = encode_ctrl_reg(info->step_ctrl); + } } else { /* Watchpoint */ - ctrl_base = ARM_BASE_WCR; - val_base = ARM_BASE_WVR; + if (info->step_ctrl.enabled) { + /* Install into the reserved breakpoint region. */ + ctrl_base = ARM_BASE_BCR + core_num_brps; + val_base = ARM_BASE_BVR + core_num_brps; + /* Override the watchpoint data with the step data. */ + addr = info->trigger & ~0x3; + ctrl = encode_ctrl_reg(info->step_ctrl); + } else { + ctrl_base = ARM_BASE_WCR; + val_base = ARM_BASE_WVR; + } slots = (struct perf_event **)__get_cpu_var(wp_on_reg); max_slots = core_num_wrps; } @@ -362,17 +382,6 @@ int arch_install_hw_breakpoint(struct perf_event *bp) goto out; } - /* Override the breakpoint data with the step data. */ - if (info->step_ctrl.enabled) { - addr = info->trigger & ~0x3; - ctrl = encode_ctrl_reg(info->step_ctrl); - if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { - i = 0; - ctrl_base = ARM_BASE_BCR + core_num_brps; - val_base = ARM_BASE_BVR + core_num_brps; - } - } - /* Setup the address register. */ write_wb_reg(val_base + i, addr); @@ -396,7 +405,10 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) max_slots = core_num_brps; } else { /* Watchpoint */ - base = ARM_BASE_WCR; + if (info->step_ctrl.enabled) + base = ARM_BASE_BCR + core_num_brps; + else + base = ARM_BASE_WCR; slots = (struct perf_event **)__get_cpu_var(wp_on_reg); max_slots = core_num_wrps; } @@ -414,13 +426,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) return; - /* Ensure that we disable the mismatch breakpoint. */ - if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && - info->step_ctrl.enabled) { - i = 0; - base = ARM_BASE_BCR + core_num_brps; - } - /* Reset the control register. */ write_wb_reg(base + i, 0); } @@ -627,9 +632,10 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * we can use the mismatch feature as a poor-man's hardware * single-step, but this only works for per-task breakpoints. */ - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || - !core_has_mismatch_brps() || !bp->hw.bp_target)) { - pr_warning("overflow handler required but none found\n"); + if (WARN_ONCE(!bp->overflow_handler && + (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() + || !bp->hw.bp_target), + "overflow handler required but none found\n")) { ret = -EINVAL; } out: @@ -660,62 +666,34 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } -static void watchpoint_handler(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) +static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) { - int i, access; - u32 val, ctrl_reg, alignment_mask; + int i; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; - struct arch_hw_breakpoint_ctrl ctrl; slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + /* Without a disassembler, we can only handle 1 watchpoint. */ + BUG_ON(core_num_wrps > 1); + for (i = 0; i < core_num_wrps; ++i) { rcu_read_lock(); wp = slots[i]; - if (wp == NULL) - goto unlock; + if (wp == NULL) { + rcu_read_unlock(); + continue; + } - info = counter_arch_bp(wp); /* - * The DFAR is an unknown value on debug architectures prior - * to 7.1. Since we only allow a single watchpoint on these - * older CPUs, we can set the trigger to the lowest possible - * faulting address. + * The DFAR is an unknown value. Since we only allow a + * single watchpoint, we can set the trigger to the lowest + * possible faulting address. */ - if (debug_arch < ARM_DEBUG_ARCH_V7_1) { - BUG_ON(i > 0); - info->trigger = wp->attr.bp_addr; - } else { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) - alignment_mask = 0x7; - else - alignment_mask = 0x3; - - /* Check if the watchpoint value matches. */ - val = read_wb_reg(ARM_BASE_WVR + i); - if (val != (addr & ~alignment_mask)) - goto unlock; - - /* Possible match, check the byte address select. */ - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); - decode_ctrl_reg(ctrl_reg, &ctrl); - if (!((1 << (addr & alignment_mask)) & ctrl.len)) - goto unlock; - - /* Check that the access type matches. */ - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : - HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; - - /* We have a winner. */ - info->trigger = addr; - } - + info = counter_arch_bp(wp); + info->trigger = wp->attr.bp_addr; pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); perf_bp_event(wp, regs); @@ -727,7 +705,6 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, if (!wp->overflow_handler) enable_single_step(wp, instruction_pointer(regs)); -unlock: rcu_read_unlock(); } } @@ -740,7 +717,7 @@ static void watchpoint_single_step_handler(unsigned long pc) slots = (struct perf_event **)__get_cpu_var(wp_on_reg); - for (i = 0; i < core_num_wrps; ++i) { + for (i = 0; i < core_num_reserved_brps; ++i) { rcu_read_lock(); wp = slots[i]; @@ -843,7 +820,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); case ARM_ENTRY_SYNC_WATCHPOINT: - watchpoint_handler(addr, fsr, regs); + watchpoint_handler(addr, regs); break; default: ret = 1; /* Unhandled fault. */ @@ -857,31 +834,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, /* * One-time initialisation. */ -static cpumask_t debug_err_mask; - -static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) +static void reset_ctrl_regs(void *info) { - int cpu = smp_processor_id(); - - pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n", - instr, cpu); - - /* Set the error flag for this CPU and skip the faulting instruction. */ - cpumask_set_cpu(cpu, &debug_err_mask); - instruction_pointer(regs) += 4; - return 0; -} - -static struct undef_hook debug_reg_hook = { - .instr_mask = 0x0fe80f10, - .instr_val = 0x0e000e10, - .fn = debug_reg_trap, -}; - -static void reset_ctrl_regs(void *unused) -{ - int i, raw_num_brps, err = 0, cpu = smp_processor_id(); + int i, cpu = smp_processor_id(); u32 dbg_power; + cpumask_t *cpumask = info; /* * v7 debug contains save and restore registers so that debug state @@ -891,52 +848,38 @@ static void reset_ctrl_regs(void *unused) * Access Register to avoid taking undefined instruction exceptions * later on. */ - switch (debug_arch) { - case ARM_DEBUG_ARCH_V7_ECP14: + if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { /* * Ensure sticky power-down is clear (i.e. debug logic is * powered up). */ asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); - if ((dbg_power & 0x1) == 0) - err = -EPERM; - break; - case ARM_DEBUG_ARCH_V7_1: + if ((dbg_power & 0x1) == 0) { + pr_warning("CPU %d debug is powered down!\n", cpu); + cpumask_or(cpumask, cpumask, cpumask_of(cpu)); + return; + } + /* - * Ensure the OS double lock is clear. + * Unconditionally clear the lock by writing a value + * other than 0xC5ACCE55 to the access register. */ - asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power)); - if ((dbg_power & 0x1) == 1) - err = -EPERM; - break; - } + asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); + isb(); - if (err) { - pr_warning("CPU %d debug is powered down!\n", cpu); - cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); - return; + /* + * Clear any configured vector-catch events before + * enabling monitor mode. + */ + asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); + isb(); } - /* - * Unconditionally clear the lock by writing a value - * other than 0xC5ACCE55 to the access register. - */ - asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); - isb(); - - /* - * Clear any configured vector-catch events before - * enabling monitor mode. - */ - asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); - isb(); - if (enable_monitor_mode()) return; /* We must also reset any reserved registers. */ - raw_num_brps = get_num_brp_resources(); - for (i = 0; i < raw_num_brps; ++i) { + for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { write_wb_reg(ARM_BASE_BCR + i, 0UL); write_wb_reg(ARM_BASE_BVR + i, 0UL); } @@ -952,7 +895,6 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self, { if (action == CPU_ONLINE) smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); - return NOTIFY_OK; } @@ -963,6 +905,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { static int __init arch_hw_breakpoint_init(void) { u32 dscr; + cpumask_t cpumask = { CPU_BITS_NONE }; debug_arch = get_debug_arch(); @@ -973,31 +916,28 @@ static int __init arch_hw_breakpoint_init(void) /* Determine how many BRPs/WRPs are available. */ core_num_brps = get_num_brps(); + core_num_reserved_brps = get_num_reserved_brps(); core_num_wrps = get_num_wrps(); - /* - * We need to tread carefully here because DBGSWENABLE may be - * driven low on this core and there isn't an architected way to - * determine that. - */ - register_undef_hook(&debug_reg_hook); + pr_info("found %d breakpoint and %d watchpoint registers.\n", + core_num_brps + core_num_reserved_brps, core_num_wrps); + + if (core_num_reserved_brps) + pr_info("%d breakpoint(s) reserved for watchpoint " + "single-step.\n", core_num_reserved_brps); /* * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ - on_each_cpu(reset_ctrl_regs, NULL, 1); - unregister_undef_hook(&debug_reg_hook); - if (!cpumask_empty(&debug_err_mask)) { + on_each_cpu(reset_ctrl_regs, &cpumask, 1); + if (!cpumask_empty(&cpumask)) { core_num_brps = 0; + core_num_reserved_brps = 0; core_num_wrps = 0; return 0; } - pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", - core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : - "", core_num_wrps); - ARM_DBG_READ(c1, 0, dscr); if (dscr & ARM_DSCR_HDBGEN) { max_watchpoint_len = 4; diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c index 53c9c2610cbc..73049963bf48 100644 --- a/trunk/arch/arm/kernel/perf_event.c +++ b/trunk/arch/arm/kernel/perf_event.c @@ -90,7 +90,7 @@ struct arm_pmu { }; /* Set at runtime when we know what CPU type we are. */ -static const struct arm_pmu *armpmu; +static struct arm_pmu *armpmu; enum arm_perf_pmu_ids armpmu_get_pmu_id(void) diff --git a/trunk/arch/arm/kernel/perf_event_v6.c b/trunk/arch/arm/kernel/perf_event_v6.c index dd7f3b9f4cb3..d1abe97ae08a 100644 --- a/trunk/arch/arm/kernel/perf_event_v6.c +++ b/trunk/arch/arm/kernel/perf_event_v6.c @@ -635,7 +635,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&pmu_lock, flags); } -static const struct arm_pmu armv6pmu = { +static struct arm_pmu armv6pmu = { .id = ARM_PERF_PMU_ID_V6, .name = "v6", .handle_irq = armv6pmu_handle_irq, @@ -653,7 +653,7 @@ static const struct arm_pmu armv6pmu = { .max_period = (1LLU << 32) - 1, }; -static const struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__init armv6pmu_init(void) { return &armv6pmu; } @@ -665,7 +665,7 @@ static const struct arm_pmu *__init armv6pmu_init(void) * disable the interrupt reporting and update the event. When unthrottling we * reset the period and enable the interrupt reporting. */ -static const struct arm_pmu armv6mpcore_pmu = { +static struct arm_pmu armv6mpcore_pmu = { .id = ARM_PERF_PMU_ID_V6MP, .name = "v6mpcore", .handle_irq = armv6pmu_handle_irq, @@ -683,17 +683,17 @@ static const struct arm_pmu armv6mpcore_pmu = { .max_period = (1LLU << 32) - 1, }; -static const struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__init armv6mpcore_pmu_init(void) { return &armv6mpcore_pmu; } #else -static const struct arm_pmu *__init armv6pmu_init(void) +static struct arm_pmu *__init armv6pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv6mpcore_pmu_init(void) +static struct arm_pmu *__init armv6mpcore_pmu_init(void) { return NULL; } diff --git a/trunk/arch/arm/kernel/perf_event_v7.c b/trunk/arch/arm/kernel/perf_event_v7.c index 4c851834f68e..74f9119906d0 100644 --- a/trunk/arch/arm/kernel/perf_event_v7.c +++ b/trunk/arch/arm/kernel/perf_event_v7.c @@ -1188,7 +1188,7 @@ static u32 __init armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; @@ -1198,7 +1198,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; @@ -1208,7 +1208,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.name = "ARMv7 Cortex-A5"; @@ -1218,7 +1218,7 @@ static const struct arm_pmu *__init armv7_a5_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.name = "ARMv7 Cortex-A15"; @@ -1228,22 +1228,22 @@ static const struct arm_pmu *__init armv7_a15_pmu_init(void) return &armv7pmu; } #else -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { return NULL; } diff --git a/trunk/arch/arm/kernel/perf_event_xscale.c b/trunk/arch/arm/kernel/perf_event_xscale.c index 3c4397491d08..730b1c299868 100644 --- a/trunk/arch/arm/kernel/perf_event_xscale.c +++ b/trunk/arch/arm/kernel/perf_event_xscale.c @@ -424,7 +424,7 @@ xscale1pmu_write_counter(int counter, u32 val) } } -static const struct arm_pmu xscale1pmu = { +static struct arm_pmu xscale1pmu = { .id = ARM_PERF_PMU_ID_XSCALE1, .name = "xscale1", .handle_irq = xscale1pmu_handle_irq, @@ -442,7 +442,7 @@ static const struct arm_pmu xscale1pmu = { .max_period = (1LLU << 32) - 1, }; -static const struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__init xscale1pmu_init(void) { return &xscale1pmu; } @@ -786,7 +786,7 @@ xscale2pmu_write_counter(int counter, u32 val) } } -static const struct arm_pmu xscale2pmu = { +static struct arm_pmu xscale2pmu = { .id = ARM_PERF_PMU_ID_XSCALE2, .name = "xscale2", .handle_irq = xscale2pmu_handle_irq, @@ -804,17 +804,17 @@ static const struct arm_pmu xscale2pmu = { .max_period = (1LLU << 32) - 1, }; -static const struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__init xscale2pmu_init(void) { return &xscale2pmu; } #else -static const struct arm_pmu *__init xscale1pmu_init(void) +static struct arm_pmu *__init xscale1pmu_init(void) { return NULL; } -static const struct arm_pmu *__init xscale2pmu_init(void) +static struct arm_pmu *__init xscale2pmu_init(void) { return NULL; }