Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 262847
b: refs/heads/master
c: c44efba
h: refs/heads/master
i:
  262845: 8a5c7cd
  262843: 11cc138
  262839: f8698ce
  262831: 55c6f24
  262815: 50bdcb3
  262783: 9e33043
v: v3
  • Loading branch information
Linus Torvalds committed Aug 13, 2011
1 parent ad9f843 commit 45f9ff5
Show file tree
Hide file tree
Showing 138 changed files with 8,002 additions and 7,266 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 696314cf53b7b2d1f91af6da259533eb9611e7bf
refs/heads/master: c44efbaa0ef3cf53fbf6bb57c33d4f199b15f2da
29 changes: 29 additions & 0 deletions trunk/Documentation/networking/bonding.txt
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,18 @@ ad_select

This option was added in bonding version 3.4.0.

all_slaves_active

Specifies that duplicate frames (received on inactive ports) should be
dropped (0) or delivered (1).

Normally, bonding will drop duplicate frames (received on inactive
ports), which is desirable for most users. But there are some times
it is nice to allow duplicate frames to be delivered.

The default value is 0 (drop duplicate frames received on inactive
ports).

arp_interval

Specifies the ARP link monitoring frequency in milliseconds.
Expand Down Expand Up @@ -433,6 +445,23 @@ miimon
determined. See the High Availability section for additional
information. The default value is 0.

min_links

Specifies the minimum number of links that must be active before
asserting carrier. It is similar to the Cisco EtherChannel min-links
feature. This allows setting the minimum number of member ports that
must be up (link-up state) before marking the bond device as up
(carrier on). This is useful for situations where higher level services
such as clustering want to ensure a minimum number of low bandwidth
links are active before switchover. This option only affect 802.3ad
mode.

The default value is 0. This will cause carrier to be asserted (for
802.3ad mode) whenever there is an active aggregator, regardless of the
number of available links in that aggregator. Note that, because an
aggregator cannot be active without at least one available link,
setting this option to 0 or to 1 has the exact same effect.

mode

Specifies one of the bonding policies. The default is
Expand Down
371 changes: 371 additions & 0 deletions trunk/Documentation/networking/scaling.txt

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -3905,9 +3905,9 @@ F: arch/powerpc/platforms/powermac/
F: drivers/macintosh/

LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Grant Likely <grant.likely@secretlab.ca>
M: Anatolij Gustschin <agust@denx.de>
L: linuxppc-dev@lists.ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
T: git git://git.denx.de/linux-2.6-agust.git
S: Maintained
F: arch/powerpc/platforms/512x/
F: arch/powerpc/platforms/52xx/
Expand Down Expand Up @@ -7357,7 +7357,7 @@ THE REST
M: Linus Torvalds <torvalds@linux-foundation.org>
L: linux-kernel@vger.kernel.org
Q: http://patchwork.kernel.org/project/LKML/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
S: Buried alive in reporters
F: *
F: */
3 changes: 1 addition & 2 deletions trunk/arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,7 @@ config VECTORS_BASE
The base address of exception vectors.

config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
depends on EXPERIMENTAL
bool "Patch physical to virtual translations at runtime"
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/arm/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
#ifdef CONFIG_SMP_ON_UP
fixup_smp((void *)s->sh_addr, s->sh_size);
#else
return -EINVAL;
#endif
return 0;
}

Expand Down
18 changes: 9 additions & 9 deletions trunk/arch/arm/mach-s3c64xx/mach-crag6410.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
#include <plat/iic.h>
#include <plat/pm.h>

#include <sound/wm8915.h>
#include <sound/wm8996.h>
#include <sound/wm8962.h>
#include <sound/wm9081.h>

Expand Down Expand Up @@ -614,7 +614,7 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
.disable_touch = true,
};

static struct wm8915_retune_mobile_config wm8915_retune[] = {
static struct wm8996_retune_mobile_config wm8996_retune[] = {
{
.name = "Sub LPF",
.rate = 48000,
Expand All @@ -635,12 +635,12 @@ static struct wm8915_retune_mobile_config wm8915_retune[] = {
},
};

static struct wm8915_pdata wm8915_pdata __initdata = {
static struct wm8996_pdata wm8996_pdata __initdata = {
.ldo_ena = S3C64XX_GPN(7),
.gpio_base = CODEC_GPIO_BASE,
.micdet_def = 1,
.inl_mode = WM8915_DIFFERRENTIAL_1,
.inr_mode = WM8915_DIFFERRENTIAL_1,
.inl_mode = WM8996_DIFFERRENTIAL_1,
.inr_mode = WM8996_DIFFERRENTIAL_1,

.irq_flags = IRQF_TRIGGER_RISING,

Expand All @@ -652,8 +652,8 @@ static struct wm8915_pdata wm8915_pdata __initdata = {
0x020e, /* GPIO5 == CLKOUT */
},

.retune_mobile_cfgs = wm8915_retune,
.num_retune_mobile_cfgs = ARRAY_SIZE(wm8915_retune),
.retune_mobile_cfgs = wm8996_retune,
.num_retune_mobile_cfgs = ARRAY_SIZE(wm8996_retune),
};

static struct wm8962_pdata wm8962_pdata __initdata = {
Expand All @@ -679,8 +679,8 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
.platform_data = &glenfarclas_pmic_pdata },

{ I2C_BOARD_INFO("wm1250-ev1", 0x27) },
{ I2C_BOARD_INFO("wm8915", 0x1a),
.platform_data = &wm8915_pdata,
{ I2C_BOARD_INFO("wm8996", 0x1a),
.platform_data = &wm8996_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),
Expand Down
56 changes: 41 additions & 15 deletions trunk/arch/arm/mm/alignment.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/uaccess.h>

#include <asm/system.h>
#include <asm/unaligned.h>

#include "fault.h"
Expand Down Expand Up @@ -95,6 +96,33 @@ static const char *usermode_action[] = {
"signal+warn"
};

/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U);
}

static int safe_usermode(int new_usermode, bool warn)
{
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
new_usermode |= UM_FIXUP;

if (warn)
printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}

return new_usermode;
}

static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
Expand Down Expand Up @@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer
if (get_user(mode, buffer))
return -EFAULT;
if (mode >= '0' && mode <= '5')
ai_usermode = mode - '0';
ai_usermode = safe_usermode(mode - '0', true);
}
return count;
}
Expand Down Expand Up @@ -886,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_FIXUP)
goto fixup;

if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else {
if (ai_usermode & UM_SIGNAL) {
siginfo_t si;

si.si_signo = SIGBUS;
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void __user *)addr;

force_sig_info(si.si_signo, &si, current);
} else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
Expand Down Expand Up @@ -926,20 +961,11 @@ static int __init alignment_init(void)
return -ENOMEM;
#endif

/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
ai_usermode = safe_usermode(ai_usermode, false);
}

hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/arm/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
while ((count = count - 4))
for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}

Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/arm/mm/proc-arm946.S
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,7 @@ __arm946_proc_info:
.long 0x41009460
.long 0xff00fff0
.long 0
.long 0
b __arm946_setup
.long cpu_arch_name
.long cpu_elf_name
Expand All @@ -418,6 +419,6 @@ __arm946_proc_info:
.long arm946_processor_functions
.long 0
.long 0
.long arm940_cache_fns
.long arm946_cache_fns
.size __arm946_proc_info, . - __arm946_proc_info

2 changes: 0 additions & 2 deletions trunk/arch/powerpc/include/asm/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key)
asm goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 4\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
: : "i" (key) : : l_yes);
Expand All @@ -41,7 +40,6 @@ struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
jump_label_t pad;
};

#endif /* _ASM_POWERPC_JUMP_LABEL_H */
10 changes: 0 additions & 10 deletions trunk/arch/powerpc/include/asm/kdump.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,7 @@

#include <asm/page.h>

/*
* If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere.
* To keep enough space in the RMO for the first stage kernel on 64bit, we
* place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place
* the second stage at 32MB.
*/
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64)
#define KDUMP_KERNELBASE 0x4000000
#else
#define KDUMP_KERNELBASE 0x2000000
#endif

/* How many bytes to reserve at zero for kdump. The reserve limit should
* be greater or equal to the trampoline's end address.
Expand Down
8 changes: 5 additions & 3 deletions trunk/arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -1003,7 +1003,6 @@
#define PV_970 0x0039
#define PV_POWER5 0x003A
#define PV_POWER5p 0x003B
#define PV_POWER7 0x003F
#define PV_970FX 0x003C
#define PV_POWER6 0x003E
#define PV_POWER7 0x003F
Expand All @@ -1024,13 +1023,16 @@
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#define mtmsr(v) asm volatile("mtmsr %0" : \
: "r" ((unsigned long)(v)) \
: "memory")
#endif

#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
: "r" ((unsigned long)(v)) \
: "memory")

#ifdef __powerpc64__
Expand Down
11 changes: 6 additions & 5 deletions trunk/arch/powerpc/kernel/cputable.c
Original file line number Diff line number Diff line change
Expand Up @@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = {

static struct cpu_spec the_cpu_spec;

static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
{
struct cpu_spec *t = &the_cpu_spec;
struct cpu_spec old;
Expand Down Expand Up @@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */

return t;
}

struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
Expand All @@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
s = PTRRELOC(s);

for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
if ((pvr & s->pvr_mask) == s->pvr_value) {
setup_cpu_spec(offset, s);
return s;
}
if ((pvr & s->pvr_mask) == s->pvr_value)
return setup_cpu_spec(offset, s);
}

BUG();
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/powerpc/kernel/iomap.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);

#ifdef CONFIG_PCI
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
{
resource_size_t start = pci_resource_start(dev, bar);
Expand Down Expand Up @@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)

EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_PCI */
10 changes: 7 additions & 3 deletions trunk/arch/powerpc/kernel/machine_kexec.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,16 @@ void __init reserve_crashkernel(void)
crashk_res.start = KDUMP_KERNELBASE;
#else
if (!crashk_res.start) {
#ifdef CONFIG_PPC64
/*
* unspecified address, choose a region of specified size
* can overlap with initrd (ignoring corruption when retained)
* ppc64 requires kernel and some stacks to be in first segemnt
* On 64bit we split the RMO in half but cap it at half of
* a small SLB (128MB) since the crash kernel needs to place
* itself and some stacks to be in the first segment.
*/
crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
}

crash_base = PAGE_ALIGN(crashk_res.start);
Expand Down
Loading

0 comments on commit 45f9ff5

Please sign in to comment.