Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 225919
b: refs/heads/master
c: 961ec6d
h: refs/heads/master
i:
  225917: bdecab4
  225915: 2a9eb21
  225911: 63499ff
  225903: 4faeba9
  225887: e5b463e
  225855: 79e4c24
  225791: 1cb66b2
v: v3
  • Loading branch information
Will Deacon authored and Russell King committed Dec 4, 2010
1 parent 8569ba4 commit 03043f6
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4d6b7a779be34e1df296abc1dc555134a8cf34af
refs/heads/master: 961ec6daa7b14f376c30d447a830fa4783a2112c
2 changes: 1 addition & 1 deletion trunk/arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ static struct platform_device *pmu_device;
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
static DEFINE_SPINLOCK(pmu_lock);
static DEFINE_RAW_SPINLOCK(pmu_lock);

/*
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
Expand Down
20 changes: 10 additions & 10 deletions trunk/arch/arm/kernel/perf_event_v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static irqreturn_t
Expand Down Expand Up @@ -500,23 +500,23 @@ armv6pmu_start(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
armv6pmu_stop(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static int
Expand Down Expand Up @@ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
Expand All @@ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static const struct arm_pmu armv6pmu = {
Expand Down
16 changes: 8 additions & 8 deletions trunk/arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);

/*
* Disable counter
Expand All @@ -713,7 +713,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_enable_counter(idx);

spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
Expand All @@ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
/*
* Disable counter and interrupt
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);

/*
* Disable counter
Expand All @@ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_disable_intens(idx);

spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
Expand Down Expand Up @@ -805,20 +805,20 @@ static void armv7pmu_start(void)
{
unsigned long flags;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void armv7pmu_stop(void)
{
unsigned long flags;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
Expand Down
32 changes: 16 additions & 16 deletions trunk/arch/arm/kernel/perf_event_xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
Expand All @@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static int
Expand Down Expand Up @@ -355,23 +355,23 @@ xscale1pmu_start(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
xscale1pmu_stop(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static inline u32
Expand Down Expand Up @@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
Expand Down Expand Up @@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static int
Expand All @@ -705,23 +705,23 @@ xscale2pmu_start(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static void
xscale2pmu_stop(void)
{
unsigned long flags, val;

spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}

static inline u32
Expand Down

0 comments on commit 03043f6

Please sign in to comment.