Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 288325
b: refs/heads/master
c: 902c6a9
h: refs/heads/master
i:
  288323: 6df0726
v: v3
  • Loading branch information
Jesper Juhl authored and Alasdair G Kergon committed Mar 7, 2012
1 parent 2a340e9 commit 090791c
Show file tree
Hide file tree
Showing 168 changed files with 615 additions and 867 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4f262acfde22b63498b5e4f165e53d3bb4e96400
refs/heads/master: 902c6a96a7cb9c50d2a8aed1788efad0a5d8f04c
3 changes: 1 addition & 2 deletions trunk/Documentation/input/alps.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ Detection

All ALPS touchpads should respond to the "E6 report" command sequence:
E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
00-00-64 if no buttons are pressed. The bits 0-2 of the first byte will be 1s
if some buttons are pressed.
00-00-64.

If the E6 report is successful, the touchpad model is identified using the "E7
report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
Expand Down
6 changes: 0 additions & 6 deletions trunk/Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2211,12 +2211,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

default: off.

printk.always_kmsg_dump=
Trigger kmsg_dump for cases other than kernel oops or
panics
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled

printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)

Expand Down
4 changes: 2 additions & 2 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -1310,15 +1310,15 @@ F: drivers/atm/
F: include/linux/atm*

ATMEL AT91 MCI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.atmel.com/products/AT91/
W: http://www.at91.com/
S: Maintained
F: drivers/mmc/host/at91_mci.c

ATMEL AT91 / AT32 MCI DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
F: drivers/mmc/host/atmel-mci-regs.h
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/alpha/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" lda $31,3b-2b(%0)\n"
" .previous\n"
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "r"(uaddr), "r"((long)oldval), "r"(newval)
: "memory");

*uval = prev;
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622
depends on CPU_V7
help
This option enables the workaround for the 743622 Cortex-A9
(r2p*) erratum. Under very rare conditions, a faulty
(r2p0..r2p2) erratum. Under very rare conditions, a faulty
optimisation in the Cortex-A9 Store Buffer may lead to data
corruption. This workaround sets a specific bit in the diagnostic
register of the Cortex-A9 which disables the Store Buffer
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/include/asm/pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);

u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
int idx, int overflow);

int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/arm/kernel/ecard.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)

memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));

vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;

flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
Expand Down
45 changes: 11 additions & 34 deletions trunk/arch/arm/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
int idx, int overflow)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
Expand All @@ -193,7 +193,13 @@ armpmu_event_update(struct perf_event *event,
new_raw_count) != prev_raw_count)
goto again;

delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
new_raw_count &= armpmu->max_period;
prev_raw_count &= armpmu->max_period;

if (overflow)
delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
else
delta = new_raw_count - prev_raw_count;

local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
Expand All @@ -210,7 +216,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;

armpmu_event_update(event, hwc, hwc->idx);
armpmu_event_update(event, hwc, hwc->idx, 0);
}

static void
Expand All @@ -226,7 +232,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
armpmu_event_update(event, hwc, hwc->idx);
armpmu_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
Expand Down Expand Up @@ -512,13 +518,7 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;

if (!hwc->sample_period) {
/*
* For non-sampling runs, limit the sample_period to half
* of the counter width. That way, the new counter value
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
hwc->sample_period = armpmu->max_period >> 1;
hwc->sample_period = armpmu->max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
Expand Down Expand Up @@ -679,28 +679,6 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
armpmu->type = ARM_PMU_DEVICE_CPU;
}

/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;

if (cpu_pmu && cpu_pmu->reset)
cpu_pmu->reset(NULL);

return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
.notifier_call = pmu_cpu_notify,
};

/*
* CPU PMU identification and registration.
*/
Expand Down Expand Up @@ -752,7 +730,6 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
Expand Down
22 changes: 19 additions & 3 deletions trunk/arch/arm/kernel/perf_event_v6.c
Original file line number Diff line number Diff line change
Expand Up @@ -467,6 +467,23 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;

if (mask)
return pmcr & mask;

WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
}

static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
Expand Down Expand Up @@ -496,8 +513,7 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;

/* Ignore if we don't have an event. */
if (!event)
if (!counter_is_active(pmcr, idx))
continue;

/*
Expand All @@ -508,7 +524,7 @@ armv6pmu_handle_irq(int irq_num,
continue;

hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
Expand Down
11 changes: 1 addition & 10 deletions trunk/arch/arm/kernel/perf_event_v7.c
Original file line number Diff line number Diff line change
Expand Up @@ -809,11 +809,6 @@ static inline int armv7_pmnc_disable_intens(int idx)

counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
isb();

return idx;
}

Expand Down Expand Up @@ -960,10 +955,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;

/* Ignore if we don't have an event. */
if (!event)
continue;

/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
Expand All @@ -972,7 +963,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;

hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
Expand Down
20 changes: 4 additions & 16 deletions trunk/arch/arm/kernel/perf_event_xscale.c
Original file line number Diff line number Diff line change
Expand Up @@ -255,14 +255,11 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;

if (!event)
continue;

if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;

hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
Expand Down Expand Up @@ -595,14 +592,11 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;

if (!event)
continue;

if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;

hwc = &event->hw;
armpmu_event_update(event, hwc, idx);
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
Expand Down Expand Up @@ -669,7 +663,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel, of_flags;
unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();

ien = xscale2pmu_read_int_enable();
Expand All @@ -678,31 +672,26 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
Expand All @@ -712,7 +701,6 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}

Expand Down
19 changes: 9 additions & 10 deletions trunk/arch/arm/mach-at91/at91sam9g45_devices.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@
#if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
static u64 hdmac_dmamask = DMA_BIT_MASK(32);

static struct at_dma_platform_data atdma_pdata = {
.nr_channels = 8,
};

static struct resource hdmac_resources[] = {
[0] = {
.start = AT91SAM9G45_BASE_DMA,
Expand All @@ -52,27 +56,22 @@ static struct resource hdmac_resources[] = {
};

static struct platform_device at_hdmac_device = {
.name = "at91sam9g45_dma",
.name = "at_hdmac",
.id = -1,
.dev = {
.dma_mask = &hdmac_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &atdma_pdata,
},
.resource = hdmac_resources,
.num_resources = ARRAY_SIZE(hdmac_resources),
};

void __init at91_add_device_hdmac(void)
{
#if defined(CONFIG_OF)
struct device_node *of_node =
of_find_node_by_name(NULL, "dma-controller");

if (of_node)
of_node_put(of_node);
else
#endif
platform_device_register(&at_hdmac_device);
dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask);
platform_device_register(&at_hdmac_device);
}
#else
void __init at91_add_device_hdmac(void) {}
Expand Down
Loading

0 comments on commit 090791c

Please sign in to comment.