diff --git a/[refs] b/[refs]
index c963a70308ee..4aaad6ca9ee9 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 0f177f873975fe7ece64d334887e83e081d88dec
+refs/heads/master: 73053d973dd6f56472309cffa5a5d15a62dd6f96
diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl
index c36892c072da..7514dbf0a679 100644
--- a/trunk/Documentation/DocBook/device-drivers.tmpl
+++ b/trunk/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
16x50 UART Driver
!Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250_core.c
+!Edrivers/tty/serial/8250/8250.c
diff --git a/trunk/Documentation/scsi/LICENSE.qla2xxx b/trunk/Documentation/scsi/LICENSE.qla2xxx
index 5020b7b5a244..27a91cf43d6d 100644
--- a/trunk/Documentation/scsi/LICENSE.qla2xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2013 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 8bdd7a7ef2f4..456c01c5dc6d 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -6631,7 +6631,7 @@ S: Supported
F: fs/reiserfs/
REGISTER MAP ABSTRACTION
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
S: Supported
F: drivers/base/regmap/
@@ -6957,6 +6957,7 @@ F: drivers/scsi/st*
SCTP PROTOCOL
M: Vlad Yasevich
+M: Sridhar Samudrala
M: Neil Horman
L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net
@@ -7379,7 +7380,7 @@ F: sound/
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
@@ -7468,7 +7469,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely
-M: Mark Brown
+M: Mark Brown
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -8713,7 +8714,7 @@ F: drivers/scsi/vmw_pvscsi.h
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
diff --git a/trunk/Makefile b/trunk/Makefile
index ac509b25b701..6db672b15bda 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc6
NAME = Unicycling Gorilla
# *DOCUMENTATION*
@@ -513,8 +513,7 @@ ifeq ($(KBUILD_EXTMOD),)
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
diff --git a/trunk/arch/arc/include/asm/irqflags.h b/trunk/arch/arc/include/asm/irqflags.h
index eac071668201..ccd84806b62f 100644
--- a/trunk/arch/arc/include/asm/irqflags.h
+++ b/trunk/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory", "cc");
+ : "cc");
return flags;
}
@@ -53,8 +53,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
__asm__ __volatile__(
" flag %0 \n"
:
- : "r"(flags)
- : "memory");
+ : "r"(flags));
}
/*
@@ -74,8 +73,7 @@ static inline void arch_local_irq_disable(void)
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory");
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
}
/*
@@ -87,9 +85,7 @@ static inline long arch_local_save_flags(void)
__asm__ __volatile__(
" lr %0, [status32] \n"
- : "=&r"(temp)
- :
- : "memory");
+ : "=&r"(temp));
return temp;
}
diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h
index 86b8fe398b95..6ef8afd1b64c 100644
--- a/trunk/arch/arm/include/asm/pgtable-3level.h
+++ b/trunk/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c
index c1fe498983ac..5a936988eb24 100644
--- a/trunk/arch/arm/kvm/arm.c
+++ b/trunk/arch/arm/kvm/arm.c
@@ -201,7 +201,6 @@ int kvm_dev_ioctl_check_extension(long ext)
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
- break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c
index 7bed7556077a..4ea9a982269c 100644
--- a/trunk/arch/arm/kvm/coproc.c
+++ b/trunk/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
u32 val;
int cpu;
+ cpu = get_cpu();
+
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
diff --git a/trunk/arch/arm/mach-highbank/hotplug.c b/trunk/arch/arm/mach-highbank/hotplug.c
index f30c52843396..890cae23c12a 100644
--- a/trunk/arch/arm/mach-highbank/hotplug.c
+++ b/trunk/arch/arm/mach-highbank/hotplug.c
@@ -28,13 +28,11 @@ extern void secondary_startup(void);
*/
void __ref highbank_cpu_die(unsigned int cpu)
{
- flush_cache_all();
-
highbank_set_cpu_jump(cpu, phys_to_virt(0));
- highbank_set_core_pwr();
- cpu_do_idle();
+ flush_cache_louis();
+ highbank_set_core_pwr();
- /* We should never return from idle */
- panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
+ while (1)
+ cpu_do_idle();
}
diff --git a/trunk/arch/arm/mm/proc-arm920.S b/trunk/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..2c3b9421ab5e 100644
--- a/trunk/arch/arm/mm/proc-arm920.S
+++ b/trunk/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-arm926.S b/trunk/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..f1803f7e2972 100644
--- a/trunk/arch/arm/mm/proc-arm926.S
+++ b/trunk/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-mohawk.S b/trunk/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..82f9cdc751d6 100644
--- a/trunk/arch/arm/mm/proc-mohawk.S
+++ b/trunk/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-sa1100.S b/trunk/arch/arm/mm/proc-sa1100.S
index d92dfd081429..3aa0da11fd84 100644
--- a/trunk/arch/arm/mm/proc-sa1100.S
+++ b/trunk/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/trunk/arch/arm/mm/proc-v6.S b/trunk/arch/arm/mm/proc-v6.S
index 5c07ee4fe3eb..bcaaa8de9325 100644
--- a/trunk/arch/arm/mm/proc-v6.S
+++ b/trunk/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/trunk/arch/arm/mm/proc-xsc3.S b/trunk/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..eb93d6487f35 100644
--- a/trunk/arch/arm/mm/proc-xsc3.S
+++ b/trunk/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S
index e766f889bfd6..25510361aa18 100644
--- a/trunk/arch/arm/mm/proc-xscale.S
+++ b/trunk/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/avr32/include/asm/io.h b/trunk/arch/avr32/include/asm/io.h
index fc6483f83ccc..cf60d0a9f176 100644
--- a/trunk/arch/avr32/include/asm/io.h
+++ b/trunk/arch/avr32/include/asm/io.h
@@ -165,10 +165,6 @@ BUILDIO_IOPORT(l, u32)
#define readw_be __raw_readw
#define readl_be __raw_readl
-#define writeb_relaxed writeb
-#define writew_relaxed writew
-#define writel_relaxed writel
-
#define writeb_be __raw_writeb
#define writew_be __raw_writew
#define writel_be __raw_writel
diff --git a/trunk/arch/c6x/include/asm/irqflags.h b/trunk/arch/c6x/include/asm/irqflags.h
index 2c71d5634ec2..cf78e09e18c3 100644
--- a/trunk/arch/c6x/include/asm/irqflags.h
+++ b/trunk/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
+ asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
}
/* unconditionally enable interrupts */
diff --git a/trunk/arch/ia64/kernel/palinfo.c b/trunk/arch/ia64/kernel/palinfo.c
index 79521d5499f9..77597e5ea60a 100644
--- a/trunk/arch/ia64/kernel/palinfo.c
+++ b/trunk/arch/ia64/kernel/palinfo.c
@@ -849,6 +849,17 @@ static palinfo_entry_t palinfo_entries[]={
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
+/*
+ * this array is used to keep track of the proc entries we create. This is
+ * required in the module mode when we need to remove all entries. The procfs code
+ * does not do recursion of deletion
+ *
+ * Notes:
+ * - +1 accounts for the cpuN directory entry in /proc/pal
+ */
+#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
+
+static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
static struct proc_dir_entry *palinfo_dir;
/*
@@ -960,32 +971,60 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
static void __cpuinit
create_palinfo_proc_entries(unsigned int cpu)
{
+# define CPUSTR "cpu%d"
+
pal_func_cpu_u_t f;
+ struct proc_dir_entry **pdir;
struct proc_dir_entry *cpu_dir;
int j;
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
- sprintf(cpustr, "cpu%d", cpu);
+ char cpustr[sizeof(CPUSTR)];
+
+
+ /*
+ * we keep track of created entries in a depth-first order for
+ * cleanup purposes. Each entry is stored into palinfo_proc_entries
+ */
+ sprintf(cpustr,CPUSTR, cpu);
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
- if (!cpu_dir)
- return;
f.req_cpu = cpu;
+ /*
+ * Compute the location to store per cpu entries
+ * We dont store the top level entry in this list, but
+ * remove it finally after removing all cpu entries.
+ */
+ pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
+ *pdir++ = cpu_dir;
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
f.func_id = j;
- create_proc_read_entry(
- palinfo_entries[j].name, 0, cpu_dir,
- palinfo_read_entry, (void *)f.value);
+ *pdir = create_proc_read_entry(
+ palinfo_entries[j].name, 0, cpu_dir,
+ palinfo_read_entry, (void *)f.value);
+ pdir++;
}
}
static void
remove_palinfo_proc_entries(unsigned int hcpu)
{
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
- sprintf(cpustr, "cpu%d", hcpu);
- remove_proc_subtree(cpustr, palinfo_dir);
+ int j;
+ struct proc_dir_entry *cpu_dir, **pdir;
+
+ pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
+ cpu_dir = *pdir;
+ *pdir++=NULL;
+ for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
+ if ((*pdir)) {
+ remove_proc_entry ((*pdir)->name, cpu_dir);
+ *pdir ++= NULL;
+ }
+ }
+
+ if (cpu_dir) {
+ remove_proc_entry(cpu_dir->name, palinfo_dir);
+ }
}
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1019,8 +1058,6 @@ palinfo_init(void)
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
palinfo_dir = proc_mkdir("pal", NULL);
- if (!palinfo_dir)
- return -ENOMEM;
/* Create palinfo dirs in /proc for all online cpus */
for_each_online_cpu(i) {
@@ -1036,8 +1073,22 @@ palinfo_init(void)
static void __exit
palinfo_exit(void)
{
+ int i = 0;
+
+ /* remove all nodes: depth first pass. Could optimize this */
+ for_each_online_cpu(i) {
+ remove_palinfo_proc_entries(i);
+ }
+
+ /*
+ * Remove the top level entry finally
+ */
+ remove_proc_entry(palinfo_dir->name, NULL);
+
+ /*
+ * Unregister from cpu notifier callbacks
+ */
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
- remove_proc_subtree("pal", NULL);
}
module_init(palinfo_init);
diff --git a/trunk/arch/m68k/include/asm/gpio.h b/trunk/arch/m68k/include/asm/gpio.h
index 8cc83431805b..4395ffc51fdb 100644
--- a/trunk/arch/m68k/include/asm/gpio.h
+++ b/trunk/arch/m68k/include/asm/gpio.h
@@ -86,24 +86,4 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
-static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
-{
- int err;
-
- err = gpio_request(gpio, label);
- if (err)
- return err;
-
- if (flags & GPIOF_DIR_IN)
- err = gpio_direction_input(gpio);
- else
- err = gpio_direction_output(gpio,
- (flags & GPIOF_INIT_HIGH) ? 1 : 0);
-
- if (err)
- gpio_free(gpio);
-
- return err;
-}
-
#endif
diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S
index 04d69c4a5ac2..256c5bf0adb7 100644
--- a/trunk/arch/powerpc/kernel/entry_64.S
+++ b/trunk/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -657,7 +657,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
+ ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 16e77a81ab4f..59dd545fdde1 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -555,12 +555,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
-#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..3acb28e245b4 100644
--- a/trunk/arch/powerpc/kernel/signal_32.c
+++ b/trunk/arch/powerpc/kernel/signal_32.c
@@ -866,12 +866,10 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return 0;
}
diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c
index c1794286098c..995f8543cb57 100644
--- a/trunk/arch/powerpc/kernel/signal_64.c
+++ b/trunk/arch/powerpc/kernel/signal_64.c
@@ -522,12 +522,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return err;
}
diff --git a/trunk/arch/powerpc/kernel/tm.S b/trunk/arch/powerpc/kernel/tm.S
index 2da67e7a16d5..84dbace657ce 100644
--- a/trunk/arch/powerpc/kernel/tm.S
+++ b/trunk/arch/powerpc/kernel/tm.S
@@ -309,7 +309,6 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
-#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -324,7 +323,6 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
-#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/trunk/arch/powerpc/kvm/e500.h b/trunk/arch/powerpc/kvm/e500.h
index 33db48a8ce24..41cefd43655f 100644
--- a/trunk/arch/powerpc/kvm/e500.h
+++ b/trunk/arch/powerpc/kvm/e500.h
@@ -26,20 +26,17 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-/* entry is mapped somewhere in host TLB */
-#define E500_TLB_VALID (1 << 0)
-/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
-#define E500_TLB_BITMAP (1 << 1)
-/* TLB1 entry is mapped by host TLB0 */
+#define E500_TLB_VALID 1
+#define E500_TLB_BITMAP 2
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn; /* valid only for TLB0, except briefly */
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref;
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
};
#ifdef CONFIG_KVM_E500V2
@@ -66,6 +63,17 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
diff --git a/trunk/arch/powerpc/kvm/e500_mmu_host.c b/trunk/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..a222edfb9a9b 100644
--- a/trunk/arch/powerpc/kvm/e500_mmu_host.c
+++ b/trunk/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,11 +193,8 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID)) {
- WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
- "%s: flags %x\n", __func__, ref->flags);
- WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
- }
+ if (!(ref->flags & E500_TLB_VALID))
+ return;
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -251,7 +248,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags |= E500_TLB_VALID;
+ ref->flags = E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -260,7 +257,6 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
- /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -278,23 +274,36 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel;
+ int tlbsel = 0;
int i;
- for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
}
}
-void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int stlbsel = 1;
+ int i;
+
kvmppc_e500_tlbil_all(vcpu_e500);
+
+ for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->tlb_refs[stlbsel][i];
+ kvmppc_e500_ref_release(ref);
+ }
+
clear_tlb_privs(vcpu_e500);
+}
+
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ clear_tlb_refs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -449,6 +458,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
+ /* Drop old ref and setup new one. */
+ kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -496,15 +507,14 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
+ vcpu_e500->tlb_refs[1][sesel] = *ref;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
-
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
- WARN_ON(!(ref->flags & E500_TLB_VALID));
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
return sesel;
}
@@ -516,12 +526,13 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
+ struct tlbe_ref ref;
int sesel;
int r;
+ ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- ref);
+ &ref);
if (r)
return r;
@@ -533,7 +544,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -554,7 +565,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_privs or on initial mapping */
+ /* Triggers after clear_tlb_refs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -654,16 +665,35 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
+ vcpu_e500->tlb_refs[0] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[0])
+ goto err;
+
+ vcpu_e500->tlb_refs[1] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[1])
+ goto err;
+
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- return -EINVAL;
+ goto err;
return 0;
+
+err:
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
+ return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/trunk/arch/powerpc/kvm/e500mc.c b/trunk/arch/powerpc/kvm/e500mc.c
index 2f4baa074b2e..1f89d26e65fb 100644
--- a/trunk/arch/powerpc/kvm/e500mc.c
+++ b/trunk/arch/powerpc/kvm/e500mc.c
@@ -108,8 +108,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
-
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -138,11 +136,8 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_on_cpu) = vcpu;
- }
kvmppc_load_guest_fp(vcpu);
}
diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c
index 299731e9036b..0da39fed355a 100644
--- a/trunk/arch/powerpc/platforms/pseries/lpar.c
+++ b/trunk/arch/powerpc/platforms/pseries/lpar.c
@@ -186,13 +186,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
-
- /*
- * The test for adjunct partition is performed before the
- * ANDCOND test. H_RESOURCE may be returned, so we need to
- * check for that as well.
- */
- BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
+ BUG_ON(lpar_rc != H_NOT_FOUND);
slot_offset++;
slot_offset &= 0x7;
diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h
index 379d96e2105e..27cb32185ce1 100644
--- a/trunk/arch/s390/include/asm/io.h
+++ b/trunk/arch/s390/include/asm/io.h
@@ -50,6 +50,10 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..4a5443118cfb 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -57,10 +57,6 @@ extern unsigned long zero_page_mask;
(((unsigned long)(vaddr)) &zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/trunk/arch/tile/include/asm/irqflags.h b/trunk/arch/tile/include/asm/irqflags.h
index c96f9bbb760d..241c0bb60b12 100644
--- a/trunk/arch/tile/include/asm/irqflags.h
+++ b/trunk/arch/tile/include/asm/irqflags.h
@@ -40,15 +40,7 @@
#include
#include
-/*
- * Set and clear kernel interrupt masks.
- *
- * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
- * clobber. We rely on it being equivalent to a compiler barrier in
- * this code since arch_local_irq_save() and friends must act as
- * compiler barriers. This compiler semantic is baked into enough
- * places that the compiler will maintain it going forward.
- */
+/* Set and clear kernel interrupt masks. */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h
index 7361e47db79f..5edd1742cfd0 100644
--- a/trunk/arch/x86/include/asm/paravirt.h
+++ b/trunk/arch/x86/include/asm/paravirt.h
@@ -703,10 +703,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
-}
+void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h
index b3b0ec1dac86..142236ed83af 100644
--- a/trunk/arch/x86/include/asm/paravirt_types.h
+++ b/trunk/arch/x86/include/asm/paravirt_types.h
@@ -91,7 +91,6 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
- void (*flush)(void);
};
struct pv_time_ops {
@@ -680,7 +679,6 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
-void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/trunk/arch/x86/include/asm/tlb.h b/trunk/arch/x86/include/asm/tlb.h
index c7797307fc2b..4fef20773b8f 100644
--- a/trunk/arch/x86/include/asm/tlb.h
+++ b/trunk/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
#define tlb_flush(tlb) \
{ \
- if (!tlb->fullmm && !tlb->need_flush_all) \
+ if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 26830f3af0df..b05a575d56f4 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,11 +314,10 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at)
return 0;
- memset(®s, 0, sizeof(regs));
-
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period);
+ regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/trunk/arch/x86/kernel/paravirt.c b/trunk/arch/x86/kernel/paravirt.c
index 8bfb335f74bb..17fff18a1031 100644
--- a/trunk/arch/x86/kernel/paravirt.c
+++ b/trunk/arch/x86/kernel/paravirt.c
@@ -263,18 +263,6 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
-void paravirt_flush_lazy_mmu(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -304,6 +292,18 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
+void arch_flush_lazy_mmu_mode(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -475,7 +475,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
- .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c
index 7114c63f047d..1cbd89ca5569 100644
--- a/trunk/arch/x86/lguest/boot.c
+++ b/trunk/arch/x86/lguest/boot.c
@@ -1334,7 +1334,6 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
- pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 0e883364abb5..2b97525246d4 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -378,12 +378,10 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd)) {
+ if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
- arch_flush_lazy_mmu_mode();
- } else {
+ else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
- }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/trunk/arch/x86/mm/pageattr-test.c b/trunk/arch/x86/mm/pageattr-test.c
index 0e38951e65eb..b0086567271c 100644
--- a/trunk/arch/x86/mm/pageattr-test.c
+++ b/trunk/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
s->gpg++;
i += GPS/PAGE_SIZE;
} else if (level == PG_LEVEL_2M) {
- if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
+ if (!(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
addr, level, (u64)pte_val(*pte));
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index fb4e73ec24d8..091934e1d0d9 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* We are safe now. Check whether the new pgprot is the same:
*/
old_pte = *kpte;
- old_prot = req_prot = pte_pgprot(old_pte);
+ old_prot = new_prot = req_prot = pte_pgprot(old_pte);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
* for the ancient hardware that doesn't support it.
*/
- if (pgprot_val(req_prot) & _PAGE_PRESENT)
- pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ if (pgprot_val(new_prot) & _PAGE_PRESENT)
+ pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+ pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
- req_prot = canon_pgprot(req_prot);
+ new_prot = canon_pgprot(new_prot);
/*
* old_pte points to the large page base address. So we need
@@ -1413,8 +1413,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
-
- arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_HIBERNATION
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 17fda6a8b3c2..193350b51f90 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -58,13 +58,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
- /*
- * NOTE! For PAE, any changes to the top page-directory-pointer-table
- * entries need a full cr3 reload to flush.
- */
-#ifdef CONFIG_X86_PAE
- tlb->need_flush_all = 1;
-#endif
tlb_remove_page(tlb, virt_to_page(pmd));
}
diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c
index e006c18d288a..6afbb2ca9a0a 100644
--- a/trunk/arch/x86/xen/mmu.c
+++ b/trunk/arch/x86/xen/mmu.c
@@ -1748,18 +1748,14 @@ static void *m2v(phys_addr_t maddr)
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
+static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
BUG();
}
-static void set_page_prot(void *addr, pgprot_t prot)
-{
- return set_page_prot_flags(addr, prot, UVMF_NONE);
-}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
@@ -1843,12 +1839,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_end)--;
}
@@ -2200,7 +2196,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
- .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 7c288358a745..074b758efc42 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -39,7 +39,6 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c
index 5efc5a647183..6206a934eb8c 100644
--- a/trunk/block/blk-sysfs.c
+++ b/trunk/block/blk-sysfs.c
@@ -229,8 +229,6 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
- if (ret < 0) \
- return ret; \
if (neg) \
val = !val; \
\
diff --git a/trunk/block/partition-generic.c b/trunk/block/partition-generic.c
index 789cdea05893..ae95ee6a58aa 100644
--- a/trunk/block/partition-generic.c
+++ b/trunk/block/partition-generic.c
@@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
+EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/trunk/crypto/gcm.c b/trunk/crypto/gcm.c
index 13ccbda34ff9..137ad1ec5438 100644
--- a/trunk/crypto/gcm.c
+++ b/trunk/crypto/gcm.c
@@ -44,7 +44,6 @@ struct crypto_rfc4543_ctx {
struct crypto_rfc4543_req_ctx {
u8 auth_tag[16];
- u8 assocbuf[32];
struct scatterlist cipher[1];
struct scatterlist payload[2];
struct scatterlist assoc[2];
@@ -1134,19 +1133,9 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
- if (req->assoc->length == req->assoclen) {
- sg_init_table(assoc, 2);
- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
- req->assoc->offset);
- } else {
- BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
-
- scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
- req->assoclen, 0);
-
- sg_init_table(assoc, 2);
- sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
- }
+ sg_init_table(assoc, 2);
+ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+ req->assoc->offset);
scatterwalk_crypto_chain(assoc, payload, 0, 2);
aead_request_set_tfm(subreq, ctx->child);
diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c
index 2f48123d74c4..ffdd32d22602 100644
--- a/trunk/drivers/ata/ata_piix.c
+++ b/trunk/drivers/ata/ata_piix.c
@@ -150,7 +150,6 @@ enum piix_controller_ids {
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
- ich8_2port_sata_snb,
};
struct piix_map_db {
@@ -305,7 +304,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
- { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
@@ -440,7 +439,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
- [ich8_2port_sata_snb] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
@@ -1244,16 +1242,6 @@ static struct ata_port_info piix_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
-
- [ich8_2port_sata_snb] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
- | PIIX_FLAG_PIO16,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
};
#define AHCI_PCI_BAR 5
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index 63c743baf920..497adea1f0d6 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
* from SATA Settings page of Identify Device Data Log.
*/
if (ata_id_has_devslp(dev->id)) {
- u8 *sata_setting = ap->sector_buf;
+ u8 sata_setting[ATA_SECT_SIZE];
int i, j;
dev->flags |= ATA_DFLAG_DEVSLP;
@@ -2439,9 +2439,6 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
- dev->max_sectors = ATA_MAX_SECTORS_LBA48;
-
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
@@ -4103,7 +4100,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
- { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c
index ff44787e5a45..318b41358187 100644
--- a/trunk/drivers/ata/libata-scsi.c
+++ b/trunk/drivers/ata/libata-scsi.c
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == RECOVERED_ERROR &&
- sshdr.asc == 0 && sshdr.ascq == 0x1d)
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == RECOVERED_ERROR &&
- sshdr.asc == 0 && sshdr.ascq == 0x1d)
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c
index 58cfb3232428..d34adef1e63e 100644
--- a/trunk/drivers/base/regmap/regmap.c
+++ b/trunk/drivers/base/regmap/regmap.c
@@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
unsigned int ival;
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
- memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
- ival = map->format.parse_val(map->work_buf);
+ ival = map->format.parse_val(val + (i * val_bytes));
ret = regcache_write(map, reg + (i * map->reg_stride),
ival);
if (ret) {
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index dfe758382eaf..2c127f9c3f3b 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -1051,12 +1051,29 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
- ioctl_by_bdev(bdev, BLKRRPART, 0);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
+
+ /*
+ * Remove all partitions, since BLKRRPART won't remove user
+ * added partitions when max_part=0
+ */
+ if (bdev) {
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ invalidate_partition(bdev->bd_disk, 0);
+ disk_part_iter_init(&piter, bdev->bd_disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter)))
+ delete_partition(bdev->bd_disk, part->partno);
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdev->bd_mutex);
+ }
+
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c
index 32c678028e53..92250af84e7d 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.c
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.c
@@ -81,17 +81,12 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
-struct list_head online_list;
-struct list_head removing_list;
-spinlock_t dev_lock;
-
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
-static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@@ -248,31 +243,40 @@ static inline void release_slot(struct mtip_port *port, int tag)
/*
* Reset the HBA (without sleeping)
*
+ * Just like hba_reset, except does not call sleep, so can be
+ * run from interrupt/tasklet context.
+ *
* @dd Pointer to the driver data structure.
*
* return value
* 0 The reset was successful.
* -1 The HBA Reset bit did not clear.
*/
-static int mtip_hba_reset(struct driver_data *dd)
+static int hba_reset_nosleep(struct driver_data *dd)
{
unsigned long timeout;
+ /* Chip quirk: quiesce any chip function */
+ mdelay(10);
+
/* Set the reset bit */
writel(HOST_RESET, dd->mmio + HOST_CTL);
/* Flush */
readl(dd->mmio + HOST_CTL);
- /* Spin for up to 2 seconds, waiting for reset acknowledgement */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- mdelay(10);
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
- return -1;
+ /*
+ * Wait 10ms then spin for up to 1 second
+ * waiting for reset acknowledgement
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ mdelay(10);
+ while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout))
+ mdelay(1);
- } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
- && time_before(jiffies, timeout));
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return -1;
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
@@ -477,7 +481,7 @@ static void mtip_restart_port(struct mtip_port *port)
dev_warn(&port->dd->pdev->dev,
"PxCMD.CR not clear, escalating reset\n");
- if (mtip_hba_reset(port->dd))
+ if (hba_reset_nosleep(port->dd))
dev_err(&port->dd->pdev->dev,
"HBA reset escalation failed.\n");
@@ -523,26 +527,6 @@ static void mtip_restart_port(struct mtip_port *port)
}
-static int mtip_device_reset(struct driver_data *dd)
-{
- int rv = 0;
-
- if (mtip_check_surprise_removal(dd->pdev))
- return 0;
-
- if (mtip_hba_reset(dd) < 0)
- rv = -EFAULT;
-
- mdelay(1);
- mtip_init_port(dd->port);
- mtip_start_port(dd->port);
-
- /* Enable interrupts on the HBA. */
- writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
- dd->mmio + HOST_CTL);
- return rv;
-}
-
/*
* Helper function for tag logging
*/
@@ -648,7 +632,7 @@ static void mtip_timeout_function(unsigned long int data)
if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- mtip_device_reset(port->dd);
+ mtip_restart_port(port);
wake_up_interruptible(&port->svc_wait);
}
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
@@ -1299,11 +1283,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
int rv = 0, ready2go = 1;
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
unsigned long to;
- struct driver_data *dd = port->dd;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
- dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
+ dev_err(&port->dd->pdev->dev,
+ "SG buffer is not 8 byte aligned\n");
return -EFAULT;
}
@@ -1316,21 +1300,23 @@ static int mtip_exec_internal_command(struct mtip_port *port,
mdelay(100);
} while (time_before(jiffies, to));
if (!ready2go) {
- dev_warn(&dd->pdev->dev,
+ dev_warn(&port->dd->pdev->dev,
"Internal cmd active. new cmd [%02X]\n", fis->command);
return -EBUSY;
}
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
port->ic_pause_timer = 0;
- clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
- clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (atomic == GFP_KERNEL) {
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port, 5000) < 0) {
- dev_warn(&dd->pdev->dev,
+ dev_warn(&port->dd->pdev->dev,
"Failed to quiesce IO\n");
release_slot(port, MTIP_TAG_INTERNAL);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1375,84 +1361,58 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ /* Poll if atomic, wait_for_completion otherwise */
if (atomic == GFP_KERNEL) {
/* Wait for the command to complete or timeout. */
- if (wait_for_completion_interruptible_timeout(
+ if (wait_for_completion_timeout(
&wait,
- msecs_to_jiffies(timeout)) <= 0) {
- if (rv == -ERESTARTSYS) { /* interrupted */
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
- rv = -EINTR;
- goto exec_ic_exit;
- } else if (rv == 0) /* timeout */
- dev_err(&dd->pdev->dev,
- "Internal command did not complete [%02X] within timeout of %lu ms\n",
- fis->command, timeout);
- else
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
- fis->command, rv, timeout);
-
- if (mtip_check_surprise_removal(dd->pdev) ||
+ msecs_to_jiffies(timeout)) == 0) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d] "
+ "within timeout of %lu ms\n",
+ atomic, timeout);
+ if (mtip_check_surprise_removal(port->dd->pdev) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned due to SR\n",
- fis->command);
+ &port->dd->dd_flag)) {
rv = -ENXIO;
goto exec_ic_exit;
}
- mtip_device_reset(dd); /* recover from timeout issue */
rv = -EAGAIN;
- goto exec_ic_exit;
}
} else {
- u32 hba_stat, port_stat;
-
/* Spin for checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))
&& time_before(jiffies, timeout)) {
- if (mtip_check_surprise_removal(dd->pdev)) {
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
rv = -ENXIO;
goto exec_ic_exit;
}
if ((fis->command != ATA_CMD_STANDBYNOW1) &&
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
+ &port->dd->dd_flag)) {
rv = -ENXIO;
goto exec_ic_exit;
}
- port_stat = readl(port->mmio + PORT_IRQ_STAT);
- if (!port_stat)
- continue;
-
- if (port_stat & PORT_IRQ_ERR) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] failed\n",
- fis->command);
- mtip_device_reset(dd);
- rv = -EIO;
- goto exec_ic_exit;
- } else {
- writel(port_stat, port->mmio + PORT_IRQ_STAT);
- hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
- if (hba_stat)
- writel(hba_stat,
- dd->mmio + HOST_IRQ_STAT);
+ if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+ atomic_inc(&int_cmd->active); /* error */
+ break;
}
- break;
}
}
+ if (atomic_read(&int_cmd->active) > 1) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command [%02X] failed\n", fis->command);
+ rv = -EIO;
+ }
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
rv = -ENXIO;
- if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
- mtip_device_reset(dd);
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ mtip_restart_port(port);
rv = -EAGAIN;
}
}
@@ -1764,8 +1724,7 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
* -EINVAL Invalid parameters passed in, trim not supported
* -EIO Error submitting trim request to hw
*/
-static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
- unsigned int len)
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
{
int i, rv = 0;
u64 tlba, tlen, sect_left;
@@ -1851,6 +1810,45 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
return (bool) !!port->identify_valid;
}
+/*
+ * Reset the HBA.
+ *
+ * Resets the HBA by setting the HBA Reset bit in the Global
+ * HBA Control register. After setting the HBA Reset bit the
+ * function waits for 1 second before reading the HBA Reset
+ * bit to make sure it has cleared. If HBA Reset is not clear
+ * an error is returned. Cannot be used in non-blockable
+ * context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int mtip_hba_reset(struct driver_data *dd)
+{
+ mtip_deinit_port(dd->port);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /* Wait for reset to clear */
+ ssleep(1);
+
+ /* Check the bit has cleared */
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
+ dev_err(&dd->pdev->dev,
+ "Reset bit did not clear.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Display the identify command data.
*
@@ -2712,100 +2710,6 @@ static ssize_t mtip_hw_show_status(struct device *dev,
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-/* debugsfs entries */
-
-static ssize_t show_device_status(struct device_driver *drv, char *buf)
-{
- int size = 0;
- struct driver_data *dd, *tmp;
- unsigned long flags;
- char id_buf[42];
- u16 status = 0;
-
- spin_lock_irqsave(&dev_lock, flags);
- size += sprintf(&buf[size], "Devices Present:\n");
- list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify + 10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
-
- size += sprintf(&buf[size], "Devices Being Removed:\n");
- list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify+10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
- spin_unlock_irqrestore(&dev_lock, flags);
-
- return size;
-}
-
-static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
- size_t len, loff_t *offset)
-{
- int size = *offset;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
-
- if (!len || *offset)
- return 0;
-
- size += show_device_status(NULL, buf);
-
- *offset = size <= len ? size : len;
- size = copy_to_user(ubuf, buf, *offset);
- if (size)
- return -EFAULT;
-
- return *offset;
-}
-
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@@ -2900,13 +2804,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return *offset;
}
-static const struct file_operations mtip_device_status_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mtip_hw_read_device_status,
- .llseek = no_llseek,
-};
-
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -4264,7 +4161,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
- unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@@ -4322,9 +4218,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
- INIT_LIST_HEAD(&dd->online_list);
- INIT_LIST_HEAD(&dd->remove_list);
-
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@@ -4412,14 +4305,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
instance++;
if (rv != MTIP_FTL_REBUILD_MAGIC)
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
- else
- rv = 0; /* device in rebuild state, return 0 from probe */
-
- /* Add to online list even if in ftl rebuild */
- spin_lock_irqsave(&dev_lock, flags);
- list_add(&dd->online_list, &online_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
goto done;
block_initialize_err:
@@ -4453,15 +4338,9 @@ static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
int counter = 0;
- unsigned long flags;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->online_list);
- list_add(&dd->remove_list, &removing_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
if (mtip_check_surprise_removal(pdev)) {
while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
counter++;
@@ -4487,10 +4366,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->remove_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
kfree(dd);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
}
@@ -4638,11 +4513,6 @@ static int __init mtip_init(void)
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
- spin_lock_init(&dev_lock);
-
- INIT_LIST_HEAD(&online_list);
- INIT_LIST_HEAD(&removing_list);
-
/* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) {
@@ -4652,18 +4522,11 @@ static int __init mtip_init(void)
}
mtip_major = error;
- dfs_parent = debugfs_create_dir("rssd", NULL);
- if (IS_ERR_OR_NULL(dfs_parent)) {
- pr_warn("Error creating debugfs parent\n");
- dfs_parent = NULL;
- }
- if (dfs_parent) {
- dfs_device_status = debugfs_create_file("device_status",
- S_IRUGO, dfs_parent, NULL,
- &mtip_device_status_fops);
- if (IS_ERR_OR_NULL(dfs_device_status)) {
- pr_err("Error creating device_status node\n");
- dfs_device_status = NULL;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ pr_warn("Error creating debugfs parent\n");
+ dfs_parent = NULL;
}
}
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h
index 8e8334c9dd0f..3bffff5f670c 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.h
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.h
@@ -129,9 +129,9 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
- MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
- (1 << MTIP_PF_EH_ACTIVE_BIT) |
- (1 << MTIP_PF_SE_ACTIVE_BIT) |
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+ (1 << MTIP_PF_EH_ACTIVE_BIT) | \
+ (1 << MTIP_PF_SE_ACTIVE_BIT) | \
(1 << MTIP_PF_DM_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
@@ -144,9 +144,9 @@ enum {
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
- MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
- (1 << MTIP_DDF_SEC_LOCK_BIT) |
- (1 << MTIP_DDF_OVER_TEMP_BIT) |
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_DDF_SEC_LOCK_BIT) | \
+ (1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
MTIP_DDF_CLEANUP_BIT = 5,
@@ -180,7 +180,7 @@ struct mtip_work {
#define MTIP_TRIM_TIMEOUT_MS 240000
#define MTIP_MAX_TRIM_ENTRIES 8
-#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
+#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
struct mtip_trim_entry {
u32 lba; /* starting lba of region */
@@ -501,10 +501,6 @@ struct driver_data {
atomic_t irq_workers_active;
int isr_binding;
-
- struct list_head online_list; /* linkage for online list */
-
- struct list_head remove_list; /* linkage for removing list */
};
#endif
diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c
index b7b7a88d9f68..f556f8a8b3f9 100644
--- a/trunk/drivers/block/rbd.c
+++ b/trunk/drivers/block/rbd.c
@@ -1742,10 +1742,9 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
+ for_each_obj_request(img_request, obj_request) {
int ret;
obj_request->callback = rbd_img_obj_callback;
diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..ad72922919ed 100644
--- a/trunk/drivers/cpufreq/intel_pstate.c
+++ b/trunk/drivers/cpufreq/intel_pstate.c
@@ -502,6 +502,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
sample_time = cpu->pstate_policy->sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
diff --git a/trunk/drivers/crypto/ux500/cryp/cryp_core.c b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
index 22c9063e0120..8bc5fef07e7a 100644
--- a/trunk/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
.shutdown = ux500_cryp_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "cryp1",
+ .name = "cryp1"
.pm = &ux500_cryp_pm,
}
};
diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c
index 88cfc61329d2..6e13f262139a 100644
--- a/trunk/drivers/dma/at_hdmac.c
+++ b/trunk/drivers/dma/at_hdmac.c
@@ -310,6 +310,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
+ BUG_ON(atc_chan_is_enabled(atchan));
+
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
@@ -366,9 +368,6 @@ static void atc_advance_work(struct at_dma_chan *atchan)
{
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
- if (atc_chan_is_enabled(atchan))
- return;
-
if (list_empty(&atchan->active_list) ||
list_is_singular(&atchan->active_list)) {
atc_complete_all(atchan);
@@ -1079,7 +1078,9 @@ static void atc_issue_pending(struct dma_chan *chan)
return;
spin_lock_irqsave(&atchan->lock, flags);
- atc_advance_work(atchan);
+ if (!atc_chan_is_enabled(atchan)) {
+ atc_advance_work(atchan);
+ }
spin_unlock_irqrestore(&atchan->lock, flags);
}
diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c
index 08b43bf37158..c4b4fd2acc42 100644
--- a/trunk/drivers/dma/omap-dma.c
+++ b/trunk/drivers/dma/omap-dma.c
@@ -276,20 +276,12 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
if (vchan_issue_pending(&c->vc) && !c->desc) {
- /*
- * c->cyclic is used only by audio and in this case the DMA need
- * to be started without delay.
- */
- if (!c->cyclic) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
- } else {
- omap_dma_start_desc(c);
- }
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
}
spin_unlock_irqrestore(&c->vc.lock, flags);
}
diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c
index 5dbc5946c4c3..718153122759 100644
--- a/trunk/drivers/dma/pl330.c
+++ b/trunk/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
- struct dma_pl330_chan *pch, *_p;
+ struct dma_pl330_chan *pch;
struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
@@ -2984,16 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err3;
- }
-
- if (adev->dev.of_node) {
- ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
- if (ret) {
- dev_err(&adev->dev,
- "unable to register DMA to the generic DT DMA helpers\n");
- }
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -3004,21 +2995,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
- return 0;
-probe_err3:
- amba_set_drvdata(adev, NULL);
-
- /* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
- chan.device_node) {
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
- /* Remove the channel */
- list_del(&pch->chan.device_node);
+ return 0;
- /* Flush the channel */
- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
- pl330_free_chan_resources(&pch->chan);
- }
probe_err2:
pl330_del(pi);
probe_err1:
@@ -3037,10 +3023,8 @@ static int pl330_remove(struct amba_device *adev)
if (!pdmac)
return 0;
- if (adev->dev.of_node)
- of_dma_controller_free(adev->dev.of_node);
+ of_dma_controller_free(adev->dev.of_node);
- dma_async_device_unregister(&pdmac->ddma);
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
diff --git a/trunk/drivers/gpio/gpio-pca953x.c b/trunk/drivers/gpio/gpio-pca953x.c
index 9391cf16e990..24059462c87f 100644
--- a/trunk/drivers/gpio/gpio-pca953x.c
+++ b/trunk/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
chip->gpio_chip.ngpio,
irq_base,
&pca953x_irq_simple_ops,
- chip);
+ NULL);
if (!chip->domain)
return -ENODEV;
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 892ff9f95975..59d6b9bf204b 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
if (!fb_helper->fb)
return 0;
- mutex_lock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1558,11 +1558,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
-
- drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
+
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
index 78d8e919509f..fe22bb780e1d 100644
--- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -751,6 +751,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
int i;
unsigned char misc = 0;
unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
u8 bppshift;
static unsigned char dacvalue[] = {
@@ -801,6 +803,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
option2 = 0x0000b000;
break;
case G200_ER:
+ dac_index90 = 0;
break;
}
@@ -849,8 +852,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG_DAC(i, dacvalue[i]);
}
- if (mdev->type == G200_ER)
- WREG_DAC(0x90, 0);
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
if (option)
pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -947,6 +952,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (mdev->type == G200_WB)
ext_vga[1] |= 0x88;
+ ext_vga_index24 = 0x05;
+
/* Set pixel clocks */
misc = 0x2d;
WREG8(MGA_MISC_OUT, misc);
@@ -958,7 +965,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
}
if (mdev->type == G200_ER)
- WREG_ECRT(0x24, 0x5);
+ WREG_ECRT(24, ext_vga_index24);
if (mdev->type == G200_EV) {
WREG_ECRT(6, 0);
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
index 1ddc03e51bf4..7f0e6c3f37d1 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
{
struct nv50_display_flip *flip = data;
if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
+ flip->chan->data);
return true;
usleep_range(1, 2);
return false;
diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..fe5cdbcf2636 100644
--- a/trunk/drivers/gpu/drm/udl/udl_connector.c
+++ b/trunk/drivers/gpu/drm/udl/udl_connector.c
@@ -61,10 +61,6 @@ static int udl_get_modes(struct drm_connector *connector)
int ret;
edid = (struct edid *)udl_get_edid(udl);
- if (!edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
- }
/*
* We only read the main block, but if the monitor reports extension
diff --git a/trunk/drivers/hwspinlock/hwspinlock_core.c b/trunk/drivers/hwspinlock/hwspinlock_core.c
index 461a0d739d75..db713c0dfba4 100644
--- a/trunk/drivers/hwspinlock/hwspinlock_core.c
+++ b/trunk/drivers/hwspinlock/hwspinlock_core.c
@@ -416,8 +416,6 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
- pm_runtime_put_noidle(dev);
- module_put(dev->driver->owner);
return ret;
}
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 1a38dd7dfe4e..5d6675013864 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -465,7 +465,6 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3c, idle_cpu_hsw),
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
- ICPU(0x46, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 0bfd8cf25200..1daa97913b7d 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
- case 0x18802: /* DTH2242 Grip Pen */
+ case 0x18803: /* DTH2242 Grip Pen */
case 0x022:
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBC =
- { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
+ { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x26 =
{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
{ USB_DEVICE_WACOM(0x59) },
- { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x5D) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
- { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0xF6) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c
index fc6aebf1e4b2..a32e0d5aa45f 100644
--- a/trunk/drivers/irqchip/irq-gic.c
+++ b/trunk/drivers/irqchip/irq-gic.c
@@ -236,8 +236,7 @@ static int gic_retrigger(struct irq_data *d)
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
- /* the genirq layer expects 0 if we can't retrigger in hardware */
- return 0;
+ return -ENXIO;
}
#ifdef CONFIG_SMP
diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c
index 9a0bdad9ad8f..7e469260fe5e 100644
--- a/trunk/drivers/md/dm.c
+++ b/trunk/drivers/md/dm.c
@@ -611,7 +611,6 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c
index f4e87bfc7567..24909eb13fec 100644
--- a/trunk/drivers/md/raid5.c
+++ b/trunk/drivers/md/raid5.c
@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -3916,8 +3914,6 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
- raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4386,8 +4382,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
}
}
@@ -4764,11 +4758,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0) {
- trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
- raid_bio, 0);
+ if (remaining == 0)
bio_endio(raid_bio, 0);
- }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig
index ea98f7e9ccd1..39c2ecadb273 100644
--- a/trunk/drivers/misc/vmw_vmci/Kconfig
+++ b/trunk/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI && NET
+ depends on X86 && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c
index a61a760484f7..171b10f167a5 100644
--- a/trunk/drivers/net/bonding/bond_main.c
+++ b/trunk/drivers/net/bonding/bond_main.c
@@ -1906,7 +1906,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
write_unlock_bh(&bond->lock);
err_close:
- slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_unset_master:
@@ -3169,20 +3168,11 @@ static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev);
- struct bonding *bond;
- struct net_device *bond_dev;
+ struct bonding *bond = slave->bond;
+ struct net_device *bond_dev = slave->bond->dev;
u32 old_speed;
u8 old_duplex;
- /* A netdev event can be generated while enslaving a device
- * before netdev_rx_handler_register is called in which case
- * slave will be NULL
- */
- if (!slave)
- return NOTIFY_DONE;
- bond_dev = slave->bond->dev;
- bond = slave->bond;
-
switch (event) {
case NETDEV_UNREGISTER:
if (bond->setup_by_slave)
@@ -4856,18 +4846,9 @@ static int __net_init bond_net_init(struct net *net)
static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
- LIST_HEAD(list);
bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
-
- /* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations bond_net_ops = {
@@ -4921,8 +4902,8 @@ static void __exit bonding_exit(void)
bond_destroy_debugfs();
- rtnl_link_unregister(&bond_link_ops);
unregister_pernet_subsys(&bond_net_ops);
+ rtnl_link_unregister(&bond_link_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
diff --git a/trunk/drivers/net/can/mcp251x.c b/trunk/drivers/net/can/mcp251x.c
index 9aa0c64c33c8..f32b9fc6a983 100644
--- a/trunk/drivers/net/can/mcp251x.c
+++ b/trunk/drivers/net/can/mcp251x.c
@@ -929,7 +929,6 @@ static int mcp251x_open(struct net_device *net)
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- unsigned long flags;
int ret;
ret = open_candev(net);
@@ -946,14 +945,9 @@ static int mcp251x_open(struct net_device *net)
priv->tx_skb = NULL;
priv->tx_len = 0;
- flags = IRQF_ONESHOT;
- if (pdata->irq_flags)
- flags |= pdata->irq_flags;
- else
- flags |= IRQF_TRIGGER_FALLING;
-
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags, DEVICE_NAME, priv);
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
index 8e0c4a001939..6433b81256cd 100644
--- a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
- u32 prop;
- int err, irq, res_size;
+ const u32 *prop;
+ int err, irq, res_size, prop_size;
void __iomem *base;
err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->read_reg = sja1000_ofp_read_reg;
priv->write_reg = sja1000_ofp_write_reg;
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
+ prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->can.clock.freq = *prop / 2;
else
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
+ prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= *prop & OCR_MODE_MASK;
else
priv->ocr |= OCR_MODE_NORMAL; /* default */
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
else
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
+ prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop) {
+ u32 divider = priv->can.clock.freq * 2 / *prop;
if (divider > 1)
priv->cdr |= divider / 2 - 1;
@@ -168,7 +168,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->cdr |= CDR_CLK_OFF; /* default */
}
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
+ prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
+ if (!prop)
priv->cdr |= CDR_CBP; /* default */
priv->irq_flags = IRQF_SHARED;
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 0283f343b0d1..77ebae0ac64a 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13437,7 +13437,13 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
- int sigdet;
+ int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ bnx2x_kr2_recovery(params, vars, phy);
+ return;
+ }
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* since some switches tend to reinit the AN process and clear the
@@ -13448,16 +13454,6 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
vars->check_kr2_recovery_cnt--;
return;
}
-
- sigdet = bnx2x_warpcore_get_sigdet(phy, params);
- if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
- bnx2x_kr2_recovery(params, vars, phy);
- DP(NETIF_MSG_LINK, "No sigdet\n");
- }
- return;
- }
-
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 8e58da909f5c..e81a747ea8ce 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
- if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
+ if (!NO_FCOE(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -13354,7 +13354,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
- bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;
diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
index 2886c9b63f90..08e54f3d288b 100644
--- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
@@ -759,9 +759,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (vlan_tx_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- skb = __vlan_put_tag(skb, vlan_tag);
- if (skb)
- skb->vlan_tci = 0;
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
}
return skb;
diff --git a/trunk/drivers/net/ethernet/freescale/fec.c b/trunk/drivers/net/ethernet/freescale/fec.c
index 73195f643c9c..f292c3aa423f 100644
--- a/trunk/drivers/net/ethernet/freescale/fec.c
+++ b/trunk/drivers/net/ethernet/freescale/fec.c
@@ -1002,7 +1002,6 @@ static void fec_enet_adjust_link(struct net_device *ndev)
} else {
if (fep->link) {
fec_stop(ndev);
- fep->link = phy_dev->link;
status_change = 1;
}
}
diff --git a/trunk/drivers/net/ethernet/intel/e100.c b/trunk/drivers/net/ethernet/intel/e100.c
index d2bea3f07c73..ec800b093e7e 100644
--- a/trunk/drivers/net/ethernet/intel/e100.c
+++ b/trunk/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
}
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
- int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+ void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
struct cb *cb;
unsigned long flags;
@@ -888,13 +888,10 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
nic->cbs_avail--;
cb->skb = skb;
- err = cb_prepare(nic, cb, skb);
- if (err)
- goto err_unlock;
-
if (unlikely(!nic->cbs_avail))
err = -ENOSPC;
+ cb_prepare(nic, cb, skb);
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
@@ -1094,7 +1091,7 @@ static void e100_get_defaults(struct nic *nic)
nic->mii.mdio_write = mdio_write;
}
-static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
@@ -1184,7 +1181,6 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
- return 0;
}
/*************************************************************************
@@ -1335,7 +1331,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
return fw;
}
-static int e100_setup_ucode(struct nic *nic, struct cb *cb,
+static void e100_setup_ucode(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
const struct firmware *fw = (void *)skb;
@@ -1362,7 +1358,6 @@ static int e100_setup_ucode(struct nic *nic, struct cb *cb,
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
cb->command = cpu_to_le16(cb_ucode | cb_el);
- return 0;
}
static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1405,20 +1400,18 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return err;
}
-static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_iaaddr);
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
- return 0;
}
-static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_dump);
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
offsetof(struct mem, dump_buf));
- return 0;
}
static int e100_phy_check_without_mii(struct nic *nic)
@@ -1588,7 +1581,7 @@ static int e100_hw_init(struct nic *nic)
return 0;
}
-static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct netdev_hw_addr *ha;
@@ -1603,7 +1596,6 @@ static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
- return 0;
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1764,18 +1756,11 @@ static void e100_watchdog(unsigned long data)
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
}
-static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
+static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
- dma_addr_t dma_addr;
cb->command = nic->tx_command;
- dma_addr = pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
- /* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr))
- return -ENOMEM;
-
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
* testing, ie sending frames with bad CRC.
@@ -1792,10 +1777,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
- cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+ /* check for mapping failure? */
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb_tx_timestamp(skb);
- return 0;
}
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/trunk/drivers/net/ethernet/marvell/Kconfig b/trunk/drivers/net/ethernet/marvell/Kconfig
index 434e33c527df..edfba9370922 100644
--- a/trunk/drivers/net/ethernet/marvell/Kconfig
+++ b/trunk/drivers/net/ethernet/marvell/Kconfig
@@ -33,7 +33,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
- select PHYLIB
---help---
This driver supports the MDIO interface found in the network
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -46,6 +45,7 @@ config MVMDIO
config MVNETA
tristate "Marvell Armada 370/XP network interface support"
depends on MACH_ARMADA_370_XP
+ select PHYLIB
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/trunk/drivers/net/ethernet/marvell/mvneta.c b/trunk/drivers/net/ethernet/marvell/mvneta.c
index a47a097c21e1..cd345b8969bc 100644
--- a/trunk/drivers/net/ethernet/marvell/mvneta.c
+++ b/trunk/drivers/net/ethernet/marvell/mvneta.c
@@ -374,6 +374,7 @@ static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
+static int txq_def;
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -1474,8 +1475,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- u16 txq_id = skb_get_queue_mapping(skb);
- struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
struct mvneta_tx_desc *tx_desc;
struct netdev_queue *nq;
int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_id);
+ nq = netdev_get_tx_queue(dev, txq_def);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+ dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
if (!dev)
return -ENOMEM;
@@ -2771,17 +2771,16 @@ static int mvneta_probe(struct platform_device *pdev)
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->priv_flags |= IFF_UNICAST_FLT;
-
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
goto err_deinit;
}
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev);
@@ -2844,3 +2843,4 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
+module_param(txq_def, int, S_IRUGO);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 5ef328af61d0..987fb6f8adc3 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -200,10 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
- if (!err) {
+ if (!err)
err = len;
+ else
ahw->beacon_state = b_state;
- }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..0c74a702d461 100644
--- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -149,7 +149,6 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/trunk/drivers/net/ethernet/ti/cpsw.c b/trunk/drivers/net/ethernet/ti/cpsw.c
index 4781d3d8e182..80cad06e5eb2 100644
--- a/trunk/drivers/net/ethernet/ti/cpsw.c
+++ b/trunk/drivers/net/ethernet/ti/cpsw.c
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
if (data->dual_emac) {
- if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
&prop)) {
pr_err("Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c
index f5f0f09e4cc5..1cd77483da50 100644
--- a/trunk/drivers/net/hyperv/netvsc.c
+++ b/trunk/drivers/net/hyperv/netvsc.c
@@ -470,10 +470,8 @@ static void netvsc_send_completion(struct hv_device *device,
packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet)
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.
- send_completion_ctx);
+ nvsc_packet->completion.send.send_completion(
+ nvsc_packet->completion.send.send_completion_ctx);
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
@@ -500,7 +498,6 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
- u64 req_id;
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -521,24 +518,20 @@ int netvsc_send(struct hv_device *device,
0xFFFFFFFF;
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
- if (packet->completion.send.send_completion)
- req_id = (u64)packet;
- else
- req_id = 0;
-
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(device->channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
- req_id);
+ (unsigned long)packet);
} else {
ret = vmbus_sendpacket(device->channel, &sendMessage,
sizeof(struct nvsp_message),
- req_id,
+ (unsigned long)packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
}
if (ret == 0) {
diff --git a/trunk/drivers/net/hyperv/netvsc_drv.c b/trunk/drivers/net/hyperv/netvsc_drv.c
index 8341b62e5521..5f85205cd12b 100644
--- a/trunk/drivers/net/hyperv/netvsc_drv.c
+++ b/trunk/drivers/net/hyperv/netvsc_drv.c
@@ -241,11 +241,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
+ netif_wake_queue(net);
ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
+ netif_tx_disable(net);
}
}
diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..2b657d4d63a8 100644
--- a/trunk/drivers/net/hyperv/rndis_filter.c
+++ b/trunk/drivers/net/hyperv/rndis_filter.c
@@ -61,6 +61,9 @@ struct rndis_request {
static void rndis_filter_send_completion(void *ctx);
+static void rndis_filter_send_request_completion(void *ctx);
+
+
static struct rndis_device *get_rndis_device(void)
{
@@ -238,7 +241,10 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion = NULL;
+ packet->completion.send.send_completion_ctx = req;/* packet; */
+ packet->completion.send.send_completion =
+ rndis_filter_send_request_completion;
+ packet->completion.send.send_completion_tid = (unsigned long)dev;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -993,3 +999,9 @@ static void rndis_filter_send_completion(void *ctx)
/* Pass it back to the original handler */
filter_pkt->completion(filter_pkt->completion_ctx);
}
+
+
+static void rndis_filter_send_request_completion(void *ctx)
+{
+ /* Noop */
+}
diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c
index 729ed533bb33..b7c457adc0dc 100644
--- a/trunk/drivers/net/tun.c
+++ b/trunk/drivers/net/tun.c
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun->flags & TUN_TAP_MQ &&
(tun->numqueues + tun->numdisabled > 1))
- return -EBUSY;
+ return err;
}
else {
char *name;
diff --git a/trunk/drivers/net/usb/cdc_mbim.c b/trunk/drivers/net/usb/cdc_mbim.c
index 6bd91676d2cb..16c842997291 100644
--- a/trunk/drivers/net/usb/cdc_mbim.c
+++ b/trunk/drivers/net/usb/cdc_mbim.c
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
goto error;
if (skb) {
- if (skb->len <= ETH_HLEN)
+ if (skb->len <= sizeof(ETH_HLEN))
goto error;
/* mapping VLANs to MBIM sessions:
diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c
index 988372d218a4..6e66f9c6782b 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/main.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/main.c
@@ -280,10 +280,6 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
if (r) {
ath_err(common,
"Unable to reset channel, reset status %d\n", r);
-
- ath9k_hw_enable_interrupts(ah);
- ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
-
goto out;
}
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 35fc68be158d..4469321c0eb3 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
goto err;
}
+ /* External image takes precedence if specified */
if (brcmf_sdbrcm_download_code_file(bus)) {
brcmf_err("dongle image file download failed\n");
goto err;
}
- if (brcmf_sdbrcm_download_nvram(bus)) {
+ /* External nvram takes precedence if specified */
+ if (brcmf_sdbrcm_download_nvram(bus))
brcmf_err("dongle nvram file download failed\n");
- goto err;
- }
/* Take arm out of reset */
if (brcmf_sdbrcm_download_state(bus, false)) {
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index ec46ffff5409..2af9c0f0798d 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1891,10 +1891,8 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
- u8 keybuf[8];
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -1918,9 +1916,8 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
- if ((ifp->vif->mode != WL_MODE_AP) &&
- (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
- brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ u8 keybuf[8];
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2016,7 +2013,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
break;
case WLAN_CIPHER_SUITE_TKIP:
if (ifp->vif->mode != WL_MODE_AP) {
- brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ brcmf_dbg(CONN, "Swapping key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2121,7 +2118,8 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
- if (wsec & WEP_ENABLED) {
+ switch (wsec & ~SES_OW_ENABLED) {
+ case WEP_ENABLED:
sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2130,13 +2128,16 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
params.cipher = WLAN_CIPHER_SUITE_WEP104;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
}
- } else if (wsec & TKIP_ENABLED) {
+ break;
+ case TKIP_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_TKIP;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
- } else if (wsec & AES_ENABLED) {
+ break;
+ case AES_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
- } else {
+ break;
+ default:
brcmf_err("Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
@@ -3823,9 +3824,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err;
+ s32 err = -EPERM;
struct brcmf_fil_bss_enable_le bss_enable;
- struct brcmf_join_params join_params;
brcmf_dbg(TRACE, "Enter\n");
@@ -3833,21 +3833,16 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
-
- memset(&join_params, 0, sizeof(join_params));
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
- &join_params, sizeof(join_params));
- if (err < 0)
- brcmf_err("SET SSID error (%d)\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
- if (err < 0)
- brcmf_err("BRCMF_C_UP error %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
- if (err < 0)
+ if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
- if (err < 0)
- brcmf_err("setting INFRA mode failed %d\n", err);
+ goto exit;
+ }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_UP error %d\n", err);
+ goto exit;
+ }
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@@ -3860,6 +3855,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+exit:
return err;
}
diff --git a/trunk/drivers/net/wireless/mwifiex/scan.c b/trunk/drivers/net/wireless/mwifiex/scan.c
index e7f6deaf715e..d215b4d3c51b 100644
--- a/trunk/drivers/net/wireless/mwifiex/scan.c
+++ b/trunk/drivers/net/wireless/mwifiex/scan.c
@@ -1393,10 +1393,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
- if (!priv->scan_request) {
- dev_dbg(adapter->dev, "wait internal scan\n");
+ if (!priv->scan_request)
mwifiex_wait_queue_complete(adapter, cmd_node);
- }
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@@ -1795,12 +1793,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
if (priv->report_scan_result)
priv->report_scan_result = false;
diff --git a/trunk/drivers/net/wireless/rt2x00/Kconfig b/trunk/drivers/net/wireless/rt2x00/Kconfig
index 76cd47eb901e..2bf4efa33186 100644
--- a/trunk/drivers/net/wireless/rt2x00/Kconfig
+++ b/trunk/drivers/net/wireless/rt2x00/Kconfig
@@ -20,7 +20,6 @@ if RT2X00
config RT2400PCI
tristate "Ralink rt2400 (PCI/PCMCIA) support"
depends on PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -32,7 +31,6 @@ config RT2400PCI
config RT2500PCI
tristate "Ralink rt2500 (PCI/PCMCIA) support"
depends on PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -45,7 +43,6 @@ config RT61PCI
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_ITU_T
@@ -60,7 +57,6 @@ config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
depends on PCI || SOC_RT288X || SOC_RT305X
select RT2800_LIB
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI if PCI
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
select RT2X00_LIB_FIRMWARE
@@ -189,9 +185,6 @@ endif
config RT2800_LIB
tristate
-config RT2X00_LIB_MMIO
- tristate
-
config RT2X00_LIB_PCI
tristate
select RT2X00_LIB
diff --git a/trunk/drivers/net/wireless/rt2x00/Makefile b/trunk/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b67..349d5b8284a4 100644
--- a/trunk/drivers/net/wireless/rt2x00/Makefile
+++ b/trunk/drivers/net/wireless/rt2x00/Makefile
@@ -9,7 +9,6 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
-obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
index dcfb54e0c516..221beaaa83f1 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -34,7 +34,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2400pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
index e1d2dc9ed28a..39edc59e8d03 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -34,7 +34,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2500pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
index ba5a05625aaa..ded73da4de0b 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -41,7 +41,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2x00soc.h"
#include "rt2800lib.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
deleted file mode 100644
index d84a680ba0c9..000000000000
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn
-
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: rt2x00mmio
- Abstract: rt2x00 generic mmio device routines.
- */
-
-#include
-#include
-#include
-#include
-
-#include "rt2x00.h"
-#include "rt2x00mmio.h"
-
-/*
- * Register access.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg)
-{
- unsigned int i;
-
- if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- return 0;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, offset, reg);
- if (!rt2x00_get_field32(*reg, field))
- return 1;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- printk_once(KERN_ERR "%s() Indirect register access failed: "
- "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
- *reg = ~0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
-
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue = rt2x00dev->rx;
- struct queue_entry *entry;
- struct queue_entry_priv_pci *entry_priv;
- struct skb_frame_desc *skbdesc;
- int max_rx = 16;
-
- while (--max_rx) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- entry_priv = entry->priv_data;
-
- if (rt2x00dev->ops->lib->get_entry_state(entry))
- break;
-
- /*
- * Fill in desc fields of the skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
-
- /*
- * DMA is already done, notify rt2x00lib that
- * it finished successfully.
- */
- rt2x00lib_dmastart(entry);
- rt2x00lib_dmadone(entry);
-
- /*
- * Send the frame to rt2x00lib for further processing.
- */
- rt2x00lib_rxdone(entry, GFP_ATOMIC);
- }
-
- return !max_rx;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
-
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
-{
- unsigned int i;
-
- for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
- msleep(10);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
-
-/*
- * Device initialization handlers.
- */
-static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv;
- void *addr;
- dma_addr_t dma;
- unsigned int i;
-
- /*
- * Allocate DMA memory for descriptor and buffer.
- */
- addr = dma_alloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- &dma, GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- memset(addr, 0, queue->limit * queue->desc_size);
-
- /*
- * Initialize all queue entries to contain valid addresses.
- */
- for (i = 0; i < queue->limit; i++) {
- entry_priv = queue->entries[i].priv_data;
- entry_priv->desc = addr + i * queue->desc_size;
- entry_priv->desc_dma = dma + i * queue->desc_size;
- }
-
- return 0;
-}
-
-static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv =
- queue->entries[0].priv_data;
-
- if (entry_priv->desc)
- dma_free_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- entry_priv->desc, entry_priv->desc_dma);
- entry_priv->desc = NULL;
-}
-
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- int status;
-
- /*
- * Allocate DMA
- */
- queue_for_each(rt2x00dev, queue) {
- status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
- if (status)
- goto exit;
- }
-
- /*
- * Register interrupt handler.
- */
- status = request_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
- if (status) {
- ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
- rt2x00dev->irq, status);
- goto exit;
- }
-
- return 0;
-
-exit:
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
-
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- /*
- * Free irq line.
- */
- free_irq(rt2x00dev->irq, rt2x00dev);
-
- /*
- * Free DMA
- */
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
-
-/*
- * rt2x00mmio module information.
- */
-MODULE_AUTHOR(DRV_PROJECT);
-MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION("rt2x00 mmio library");
-MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
deleted file mode 100644
index 4ecaf60175bf..000000000000
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn
-
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: rt2x00mmio
- Abstract: Data structures for the rt2x00mmio module.
- */
-
-#ifndef RT2X00MMIO_H
-#define RT2X00MMIO_H
-
-#include
-
-/*
- * Register access.
- */
-static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
-{
- *value = readl(rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- void *value, const u32 length)
-{
- memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
-}
-
-static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 value)
-{
- writel(value, rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const void *value,
- const u32 length)
-{
- __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
-}
-
-/**
- * rt2x00pci_regbusy_read - Read from register with busy check
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- * @offset: Register offset
- * @field: Field to check if register is busy
- * @reg: Pointer to where register contents should be stored
- *
- * This function will read the given register, and checks if the
- * register is busy. If it is, it will sleep for a couple of
- * microseconds before reading the register again. If the register
- * is not read after a certain timeout, this function will return
- * FALSE.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg);
-
-/**
- * struct queue_entry_priv_pci: Per entry PCI specific information
- *
- * @desc: Pointer to device descriptor
- * @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
- */
-struct queue_entry_priv_pci {
- __le32 *desc;
- dma_addr_t desc_dma;
-};
-
-/**
- * rt2x00pci_rxdone - Handle RX done events
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- *
- * Returns true if there are still rx frames pending and false if all
- * pending rx frames were processed.
- */
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00pci_flush_queue - Flush data queue
- * @queue: Data queue to stop
- * @drop: True to drop all pending frames.
- *
- * This will wait for a maximum of 100ms, waiting for the queues
- * to become empty.
- */
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
-
-/*
- * Device initialization handlers.
- */
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
-
-#endif /* RT2X00MMIO_H */
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
index e87865e33113..a0c8caef3b0a 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -32,6 +32,182 @@
#include "rt2x00.h"
#include "rt2x00pci.h"
+/*
+ * Register access.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg)
+{
+ unsigned int i;
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00pci_register_read(rt2x00dev, offset, reg);
+ if (!rt2x00_get_field32(*reg, field))
+ return 1;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ ERROR(rt2x00dev, "Indirect register access failed: "
+ "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
+ *reg = ~0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
+
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue = rt2x00dev->rx;
+ struct queue_entry *entry;
+ struct queue_entry_priv_pci *entry_priv;
+ struct skb_frame_desc *skbdesc;
+ int max_rx = 16;
+
+ while (--max_rx) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ entry_priv = entry->priv_data;
+
+ if (rt2x00dev->ops->lib->get_entry_state(entry))
+ break;
+
+ /*
+ * Fill in desc fields of the skb descriptor
+ */
+ skbdesc = get_skb_frame_desc(entry->skb);
+ skbdesc->desc = entry_priv->desc;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * DMA is already done, notify rt2x00lib that
+ * it finished successfully.
+ */
+ rt2x00lib_dmastart(entry);
+ rt2x00lib_dmadone(entry);
+
+ /*
+ * Send the frame to rt2x00lib for further processing.
+ */
+ rt2x00lib_rxdone(entry, GFP_ATOMIC);
+ }
+
+ return !max_rx;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
+
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+
+ for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
+
+/*
+ * Device initialization handlers.
+ */
+static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv;
+ void *addr;
+ dma_addr_t dma;
+ unsigned int i;
+
+ /*
+ * Allocate DMA memory for descriptor and buffer.
+ */
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ &dma, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ memset(addr, 0, queue->limit * queue->desc_size);
+
+ /*
+ * Initialize all queue entries to contain valid addresses.
+ */
+ for (i = 0; i < queue->limit; i++) {
+ entry_priv = queue->entries[i].priv_data;
+ entry_priv->desc = addr + i * queue->desc_size;
+ entry_priv->desc_dma = dma + i * queue->desc_size;
+ }
+
+ return 0;
+}
+
+static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv =
+ queue->entries[0].priv_data;
+
+ if (entry_priv->desc)
+ dma_free_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ entry_priv->desc, entry_priv->desc_dma);
+ entry_priv->desc = NULL;
+}
+
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ int status;
+
+ /*
+ * Allocate DMA
+ */
+ queue_for_each(rt2x00dev, queue) {
+ status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
+ if (status)
+ goto exit;
+ }
+
+ /*
+ * Register interrupt handler.
+ */
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ if (status) {
+ ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
+ rt2x00dev->irq, status);
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
+
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * Free irq line.
+ */
+ free_irq(rt2x00dev->irq, rt2x00dev);
+
+ /*
+ * Free DMA
+ */
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
+
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
index 60d90b20f8b9..e2c99f2b9a14 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -35,6 +35,94 @@
*/
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
+/*
+ * Register access.
+ */
+static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 *value)
+{
+ *value = readl(rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ void *value, const u32 length)
+{
+ memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
+}
+
+static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 value)
+{
+ writel(value, rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const void *value,
+ const u32 length)
+{
+ __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
+}
+
+/**
+ * rt2x00pci_regbusy_read - Read from register with busy check
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ * @offset: Register offset
+ * @field: Field to check if register is busy
+ * @reg: Pointer to where register contents should be stored
+ *
+ * This function will read the given register, and checks if the
+ * register is busy. If it is, it will sleep for a couple of
+ * microseconds before reading the register again. If the register
+ * is not read after a certain timeout, this function will return
+ * FALSE.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg);
+
+/**
+ * struct queue_entry_priv_pci: Per entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor
+ * @desc_dma: DMA pointer to &desc.
+ * @data: Pointer to device's entry memory.
+ * @data_dma: DMA pointer to &data.
+ */
+struct queue_entry_priv_pci {
+ __le32 *desc;
+ dma_addr_t desc_dma;
+};
+
+/**
+ * rt2x00pci_rxdone - Handle RX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ *
+ * Returns true if there are still rx frames pending and false if all
+ * pending rx frames were processed.
+ */
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00pci_flush_queue - Flush data queue
+ * @queue: Data queue to stop
+ * @drop: True to drop all pending frames.
+ *
+ * This will wait for a maximum of 100ms, waiting for the queues
+ * to become empty.
+ */
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
+
+/*
+ * Device initialization handlers.
+ */
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
+
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
index 9e3c8ff53e3f..f95792cfcf89 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
@@ -35,7 +35,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt61pci.h"
diff --git a/trunk/drivers/platform/x86/hp-wmi.c b/trunk/drivers/platform/x86/hp-wmi.c
index 1a779bbfb87d..45cacf79f3a7 100644
--- a/trunk/drivers/platform/x86/hp-wmi.c
+++ b/trunk/drivers/platform/x86/hp-wmi.c
@@ -134,6 +134,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -924,6 +925,9 @@ static int __init hp_wmi_init(void)
err = hp_wmi_input_setup();
if (err)
return err;
+
+ //Enable magic for hotkeys that run on the SMBus
+ ec_write(0xe6,0x6e);
}
if (bios_capable) {
diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c
index edec135b1685..9a907567f41e 100644
--- a/trunk/drivers/platform/x86/thinkpad_acpi.c
+++ b/trunk/drivers/platform/x86/thinkpad_acpi.c
@@ -1964,6 +1964,9 @@ struct tp_nvram_state {
/* kthread for the hotkey poller */
static struct task_struct *tpacpi_hotkey_task;
+/* Acquired while the poller kthread is running, use to sync start/stop */
+static struct mutex hotkey_thread_mutex;
+
/*
* Acquire mutex to write poller control variables as an
* atomic block.
@@ -2459,6 +2462,8 @@ static int hotkey_kthread(void *data)
unsigned int poll_freq;
bool was_frozen;
+ mutex_lock(&hotkey_thread_mutex);
+
if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
goto exit;
@@ -2518,6 +2523,7 @@ static int hotkey_kthread(void *data)
}
exit:
+ mutex_unlock(&hotkey_thread_mutex);
return 0;
}
@@ -2527,6 +2533,9 @@ static void hotkey_poll_stop_sync(void)
if (tpacpi_hotkey_task) {
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
+ mutex_lock(&hotkey_thread_mutex);
+ /* at this point, the thread did exit */
+ mutex_unlock(&hotkey_thread_mutex);
}
}
@@ -3225,6 +3234,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
mutex_init(&hotkey_mutex);
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_init(&hotkey_thread_mutex);
mutex_init(&hotkey_thread_data_mutex);
#endif
diff --git a/trunk/drivers/remoteproc/Kconfig b/trunk/drivers/remoteproc/Kconfig
index c6d77e20622c..cc1f7bf53fd0 100644
--- a/trunk/drivers/remoteproc/Kconfig
+++ b/trunk/drivers/remoteproc/Kconfig
@@ -4,7 +4,7 @@ menu "Remoteproc drivers"
config REMOTEPROC
tristate
depends on HAS_DMA
- select FW_LOADER
+ select FW_CONFIG
select VIRTIO
config OMAP_REMOTEPROC
diff --git a/trunk/drivers/remoteproc/remoteproc_core.c b/trunk/drivers/remoteproc/remoteproc_core.c
index 8edb4aed5d36..29387df4bfc9 100644
--- a/trunk/drivers/remoteproc/remoteproc_core.c
+++ b/trunk/drivers/remoteproc/remoteproc_core.c
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "idr_alloc failed: %d\n", ret);
dma_free_coherent(dev->parent, size, va, dma);
return ret;
@@ -366,12 +366,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* it is now safe to add the virtio device */
ret = rproc_add_virtio_dev(rvdev, rsc->id);
if (ret)
- goto remove_rvdev;
+ goto free_rvdev;
return 0;
-remove_rvdev:
- list_del(&rvdev->node);
free_rvdev:
kfree(rvdev);
return ret;
diff --git a/trunk/drivers/remoteproc/ste_modem_rproc.c b/trunk/drivers/remoteproc/ste_modem_rproc.c
index fb95c4220052..a7743c069339 100644
--- a/trunk/drivers/remoteproc/ste_modem_rproc.c
+++ b/trunk/drivers/remoteproc/ste_modem_rproc.c
@@ -240,8 +240,6 @@ static int sproc_drv_remove(struct platform_device *pdev)
/* Unregister as remoteproc device */
rproc_del(sproc->rproc);
- dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
rproc_put(sproc->rproc);
mdev->drv_data = NULL;
@@ -299,13 +297,10 @@ static int sproc_probe(struct platform_device *pdev)
/* Register as a remoteproc device */
err = rproc_add(rproc);
if (err)
- goto free_mem;
+ goto free_rproc;
return 0;
-free_mem:
- dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
free_rproc:
/* Reset device data upon error */
mdev->drv_data = NULL;
diff --git a/trunk/drivers/s390/net/qeth_core.h b/trunk/drivers/s390/net/qeth_core.h
index 6ccb7457746b..8c0622399fcd 100644
--- a/trunk/drivers/s390/net/qeth_core.h
+++ b/trunk/drivers/s390/net/qeth_core.h
@@ -769,7 +769,6 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct task_struct *recovery_task;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
@@ -863,8 +862,6 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
-void qeth_set_recovery_task(struct qeth_card *);
-void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_wait_for_threads(struct qeth_card *, unsigned long);
diff --git a/trunk/drivers/s390/net/qeth_core_main.c b/trunk/drivers/s390/net/qeth_core_main.c
index 451f92020599..0d73a999983d 100644
--- a/trunk/drivers/s390/net/qeth_core_main.c
+++ b/trunk/drivers/s390/net/qeth_core_main.c
@@ -177,23 +177,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
-void qeth_set_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = current;
-}
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
-
-void qeth_clear_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
-
-static bool qeth_is_recovery_task(const struct qeth_card *card)
-{
- return card->recovery_task == current;
-}
-
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@@ -222,8 +205,6 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
{
- if (qeth_is_recovery_task(card))
- return 0;
return wait_event_interruptible(card->wait_q,
qeth_threads_running(card, threads) == 0);
}
diff --git a/trunk/drivers/s390/net/qeth_l2_main.c b/trunk/drivers/s390/net/qeth_l2_main.c
index 155b101bd730..d690166efeaf 100644
--- a/trunk/drivers/s390/net/qeth_l2_main.c
+++ b/trunk/drivers/s390/net/qeth_l2_main.c
@@ -1143,7 +1143,6 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
if (!rc)
@@ -1154,7 +1153,6 @@ static int qeth_l2_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
diff --git a/trunk/drivers/s390/net/qeth_l3_main.c b/trunk/drivers/s390/net/qeth_l3_main.c
index 1f7edf1b26c3..8710337dab3e 100644
--- a/trunk/drivers/s390/net/qeth_l3_main.c
+++ b/trunk/drivers/s390/net/qeth_l3_main.c
@@ -3515,7 +3515,6 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
if (!rc)
@@ -3526,7 +3525,6 @@ static int qeth_l3_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..a044f593e8b9 100644
--- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
sdev->allow_restart = 1;
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c
index 2197b57fb225..f328089a1060 100644
--- a/trunk/drivers/scsi/ipr.c
+++ b/trunk/drivers/scsi/ipr.c
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -9349,10 +9349,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
return rc;
@@ -9374,10 +9371,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(pdev->irq, ioa_cfg);
LEAVE;
@@ -9728,7 +9722,6 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
- INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/trunk/drivers/scsi/libsas/sas_expander.c b/trunk/drivers/scsi/libsas/sas_expander.c
index 55cbd0180159..aec2e0da5016 100644
--- a/trunk/drivers/scsi/libsas/sas_expander.c
+++ b/trunk/drivers/scsi/libsas/sas_expander.c
@@ -235,17 +235,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* Handle vacant phy - rest of dr data is not valid so skip it */
- if (phy->phy_state == PHY_VACANT) {
- memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->attached_dev_type = NO_DEVICE;
- if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
- phy->phy_id = phy_id;
- goto skip;
- } else
- goto out;
- }
-
phy->attached_dev_type = to_dev_type(dr);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
goto out;
@@ -283,7 +272,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
- skip:
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
@@ -400,7 +388,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
if (!disc_req)
return -ENOMEM;
- disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
if (!disc_resp) {
kfree(disc_req);
return -ENOMEM;
diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..74b67d98e952 100644
--- a/trunk/drivers/scsi/lpfc/lpfc_sli.c
+++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c
@@ -438,12 +438,11 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int put_index = hq->host_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dcc2619..1d82eef4e1eb 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_attr.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c
@@ -1938,6 +1938,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
+ /* No pending activities shall be there on the vha now */
+ if (ql2xextended_error_logging & ql_dbg_user)
+ msleep(random32()%10); /* Just to see if something falls on
+ * the net we have placed below */
+
BUG_ON(atomic_read(&vha->vref_count));
qla2x00_free_fcports(vha);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f1c87c..1626de52e32a 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,6 @@
* | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
- * | | | 0x1155-0x1158 |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
@@ -402,7 +401,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
void *ring;
} aq, *aqp;
- if (!ha->tgt.atio_ring)
+ if (!ha->tgt.atio_q_length)
return ptr;
num_queues = 1;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff75936b..c6509911772b 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_def.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_def.h
@@ -863,6 +863,7 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
/*
diff --git a/trunk/drivers/scsi/qla2xxx/qla_gbl.h b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa97b545..eb3ca21a7f17 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c
index b59203393cb2..edf4d14a1335 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_init.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_init.c
@@ -619,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1397,7 +1399,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
- if (ha->tgt.atio_ring)
+ if (ha->tgt.atio_q_length)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af56431..186dd59ce4fa 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3866,6 +3866,64 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
return rval;
}
+int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h
index ec54036d1e12..2b6e478d9e33 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_version.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.13-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/trunk/drivers/scsi/st.c b/trunk/drivers/scsi/st.c
index 2a32036a9404..86974471af68 100644
--- a/trunk/drivers/scsi/st.c
+++ b/trunk/drivers/scsi/st.c
@@ -4112,10 +4112,6 @@ static int st_probe(struct device *dev)
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
disk->queue = SDp->request_queue;
- /* SCSI tape doesn't register this gendisk via add_disk(). Manually
- * take queue reference that release_disk() expects. */
- if (!blk_get_queue(disk->queue))
- goto out_put_disk;
tpnt->driver = &st_template;
tpnt->device = SDp;
@@ -4189,7 +4185,7 @@ static int st_probe(struct device *dev)
idr_preload_end();
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
- goto out_put_queue;
+ goto out_put_disk;
}
tpnt->index = error;
sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4215,8 +4211,6 @@ static int st_probe(struct device *dev)
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
-out_put_queue:
- blk_put_queue(disk->queue);
out_put_disk:
put_disk(disk);
kfree(tpnt);
diff --git a/trunk/drivers/target/target_core_alua.c b/trunk/drivers/target/target_core_alua.c
index cbe48ab41745..ff1c5ee352cb 100644
--- a/trunk/drivers/target/target_core_alua.c
+++ b/trunk/drivers/target/target_core_alua.c
@@ -409,7 +409,6 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -452,7 +451,6 @@ static inline int core_alua_state_unavailable(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -493,7 +491,6 @@ static inline int core_alua_state_transition(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
diff --git a/trunk/drivers/tty/mxser.c b/trunk/drivers/tty/mxser.c
index 302909ccf183..484b6a3c9b03 100644
--- a/trunk/drivers/tty/mxser.c
+++ b/trunk/drivers/tty/mxser.c
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
mxvar_sdriver, brd->idx + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
goto err_relbrd;
}
}
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, brd->idx + i, NULL);
if (IS_ERR(tty_dev)) {
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
for (i = 0; i < brd->info->nports; i++)
tty_port_destroy(&brd->ports[i].port);
free_irq(brd->irq, brd);
diff --git a/trunk/drivers/tty/serial/8250/8250_pnp.c b/trunk/drivers/tty/serial/8250/8250_pnp.c
index 35d9ab95c5cb..b3455a970a1d 100644
--- a/trunk/drivers/tty/serial/8250/8250_pnp.c
+++ b/trunk/drivers/tty/serial/8250/8250_pnp.c
@@ -429,6 +429,7 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart;
int ret, line, flags = dev_id->driver_data;
+ struct resource *res = NULL;
if (flags & UNKNOWN_DEV) {
ret = serial_pnp_guess_board(dev);
@@ -439,11 +440,12 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if (pnp_irq_valid(dev, 0))
uart.port.irq = pnp_irq(dev, 0);
- if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
- uart.port.iobase = pnp_port_start(dev, 2);
- uart.port.iotype = UPIO_PORT;
- } else if (pnp_port_valid(dev, 0)) {
- uart.port.iobase = pnp_port_start(dev, 0);
+ if ((flags & CIR_PORT) && pnp_port_valid(dev, 2))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 2);
+ else if (pnp_port_valid(dev, 0))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (pnp_resource_enabled(res)) {
+ uart.port.iobase = res->start;
uart.port.iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
diff --git a/trunk/drivers/tty/serial/omap-serial.c b/trunk/drivers/tty/serial/omap-serial.c
index 30d4f7a783cd..4dc41408ecb7 100644
--- a/trunk/drivers/tty/serial/omap-serial.c
+++ b/trunk/drivers/tty/serial/omap-serial.c
@@ -886,17 +886,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
- /*
- * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
- * sets Enables the granularity of 1 for TRIGGER RX
- * level. Along with setting RX FIFO trigger level
- * to 1 (as noted below, 16 characters) and TLR[3:0]
- * to zero this will result RX FIFO threshold level
- * to 1 character, instead of 16 as noted in comment
- * below.
- */
-
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
diff --git a/trunk/drivers/vfio/pci/vfio_pci.c b/trunk/drivers/vfio/pci/vfio_pci.c
index 7abc5c81af2c..8189cb6a86af 100644
--- a/trunk/drivers/vfio/pci/vfio_pci.c
+++ b/trunk/drivers/vfio/pci/vfio_pci.c
@@ -346,7 +346,6 @@ static long vfio_pci_ioctl(void *device_data,
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
size = sizeof(uint8_t);
@@ -356,7 +355,7 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/trunk/drivers/vhost/tcm_vhost.c b/trunk/drivers/vhost/tcm_vhost.c
index 957a0b98a5d9..2968b4934659 100644
--- a/trunk/drivers/vhost/tcm_vhost.c
+++ b/trunk/drivers/vhost/tcm_vhost.c
@@ -74,8 +74,9 @@ enum {
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
- struct tcm_vhost_tpg **vs_tpg;
+ struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+ bool vs_endpoint;
struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -578,27 +579,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
}
}
-static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq, int head, unsigned out)
-{
- struct virtio_scsi_cmd_resp __user *resp;
- struct virtio_scsi_cmd_resp rsp;
- int ret;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
- else
- pr_err("Faulted on virtio_scsi_cmd_resp\n");
-}
-
static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
struct vhost_virtqueue *vq)
{
- struct tcm_vhost_tpg **vs_tpg;
struct virtio_scsi_cmd_req v_req;
struct tcm_vhost_tpg *tv_tpg;
struct tcm_vhost_cmd *tv_cmd;
@@ -607,16 +590,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
int head, ret;
u8 target;
- /*
- * We can handle the vq only after the endpoint is setup by calling the
- * VHOST_SCSI_SET_ENDPOINT ioctl.
- *
- * TODO: Check that we are running from vhost_worker which acts
- * as read-side critical section for vhost kind of RCU.
- * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
- */
- vs_tpg = rcu_dereference_check(vq->private_data, 1);
- if (!vs_tpg)
+ /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+ if (unlikely(!vs->vs_endpoint))
return;
mutex_lock(&vq->mutex);
@@ -686,11 +661,23 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
/* Extract the tpgt */
target = v_req.lun[1];
- tv_tpg = ACCESS_ONCE(vs_tpg[target]);
+ tv_tpg = vs->vs_tpg[target];
/* Target does not exist, fail the request */
if (unlikely(!tv_tpg)) {
- vhost_scsi_send_bad_target(vs, vq, head, out);
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev,
+ vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
continue;
}
@@ -703,13 +690,22 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
if (IS_ERR(tv_cmd)) {
vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
PTR_ERR(tv_cmd));
- goto err_cmd;
+ break;
}
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
": %d\n", tv_cmd, exp_data_len, data_direction);
tv_cmd->tvc_vhost = vs;
tv_cmd->tvc_vq = vq;
+
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes, out: %d, in: %d\n",
+ vq->iov[out].iov_len, out, in);
+ break;
+ }
+
tv_cmd->tvc_resp = vq->iov[out].iov_base;
/*
@@ -729,7 +725,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(tv_cmd->tvc_cdb),
TCM_VHOST_MAX_CDB_SIZE);
- goto err_free;
+ break; /* TODO */
}
tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
@@ -742,7 +738,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
data_direction == DMA_TO_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
- goto err_free;
+ break; /* TODO */
}
}
@@ -763,13 +759,6 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
}
mutex_unlock(&vq->mutex);
- return;
-
-err_free:
- vhost_scsi_free_cmd(tv_cmd);
-err_cmd:
- vhost_scsi_send_bad_target(vs, vq, head, out);
- mutex_unlock(&vq->mutex);
}
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -791,20 +780,6 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->dev.vqs[index].poll);
-}
-
-static void vhost_scsi_flush(struct vhost_scsi *vs)
-{
- int i;
-
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
-}
-
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -815,10 +790,8 @@ static int vhost_scsi_set_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct tcm_vhost_tpg **vs_tpg;
- struct vhost_virtqueue *vq;
- int index, ret, i, len;
bool match = false;
+ int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -830,15 +803,6 @@ static int vhost_scsi_set_endpoint(
}
}
- len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
- vs_tpg = kzalloc(len, GFP_KERNEL);
- if (!vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return -ENOMEM;
- }
- if (vs->vs_tpg)
- memcpy(vs_tpg, vs->vs_tpg, len);
-
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -853,15 +817,14 @@ static int vhost_scsi_set_endpoint(
tv_tport = tv_tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
- if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
+ if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
mutex_unlock(&tcm_vhost_mutex);
mutex_unlock(&vs->dev.mutex);
- kfree(vs_tpg);
return -EEXIST;
}
tv_tpg->tv_tpg_vhost_count++;
- vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+ vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
smp_mb__after_atomic_inc();
match = true;
}
@@ -872,27 +835,12 @@ static int vhost_scsi_set_endpoint(
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, vs_tpg);
- vhost_init_used(vq);
- mutex_unlock(&vq->mutex);
- }
+ vs->vs_endpoint = true;
ret = 0;
} else {
ret = -EEXIST;
}
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = vs_tpg;
-
mutex_unlock(&vs->dev.mutex);
return ret;
}
@@ -903,8 +851,6 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct vhost_virtqueue *vq;
- bool match = false;
int index, ret, i;
u8 target;
@@ -916,14 +862,9 @@ static int vhost_scsi_clear_endpoint(
goto err_dev;
}
}
-
- if (!vs->vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return 0;
- }
-
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
+
tv_tpg = vs->vs_tpg[target];
if (!tv_tpg)
continue;
@@ -945,27 +886,10 @@ static int vhost_scsi_clear_endpoint(
}
tv_tpg->tv_tpg_vhost_count--;
vs->vs_tpg[target] = NULL;
- match = true;
+ vs->vs_endpoint = false;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
- if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, NULL);
- mutex_unlock(&vq->mutex);
- }
- }
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
-
return 0;
err_tpg:
@@ -975,24 +899,6 @@ static int vhost_scsi_clear_endpoint(
return ret;
}
-static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
-{
- if (features & ~VHOST_SCSI_FEATURES)
- return -EOPNOTSUPP;
-
- mutex_lock(&vs->dev.mutex);
- if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&vs->dev)) {
- mutex_unlock(&vs->dev.mutex);
- return -EFAULT;
- }
- vs->dev.acked_features = features;
- smp_wmb();
- vhost_scsi_flush(vs);
- mutex_unlock(&vs->dev.mutex);
- return 0;
-}
-
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
@@ -1033,6 +939,38 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
return 0;
}
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+ vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+ int i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ vhost_scsi_flush_vq(vs, i);
+ vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ if (features & ~VHOST_SCSI_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ vs->dev.acked_features = features;
+ smp_wmb();
+ vhost_scsi_flush(vs);
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
diff --git a/trunk/drivers/video/mmp/core.c b/trunk/drivers/video/mmp/core.c
index 84de2632857a..9ed83419038b 100644
--- a/trunk/drivers/video/mmp/core.c
+++ b/trunk/drivers/video/mmp/core.c
@@ -252,5 +252,7 @@ void mmp_unregister_path(struct mmp_path *path)
kfree(path);
mutex_unlock(&disp_lock);
+
+ dev_info(path->dev, "de-register %s\n", path->name);
}
EXPORT_SYMBOL_GPL(mmp_unregister_path);
diff --git a/trunk/drivers/watchdog/Kconfig b/trunk/drivers/watchdog/Kconfig
index e89fc3133972..9fcc70c11cea 100644
--- a/trunk/drivers/watchdog/Kconfig
+++ b/trunk/drivers/watchdog/Kconfig
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
- depends on ARCH_AT91RM9200
+ depends on ARCH_AT91
help
Watchdog timer embedded into AT91RM9200 chips. This will reboot your
system when the timeout is reached.
diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c
index 2647ad8e1f19..aa85881d17b2 100644
--- a/trunk/drivers/xen/events.c
+++ b/trunk/drivers/xen/events.c
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)
{
int start_word_idx, start_bit_idx;
int word_idx, bit_idx;
- int i, irq;
+ int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1324,8 +1324,6 @@ static void __xen_evtchn_do_upcall(void)
do {
xen_ulong_t pending_words;
- xen_ulong_t pending_bits;
- struct irq_desc *desc;
vcpu_info->evtchn_upcall_pending = 0;
@@ -1337,17 +1335,6 @@ static void __xen_evtchn_do_upcall(void)
* selector flag. xchg_xen_ulong must contain an
* appropriate barrier.
*/
- if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
- int evtchn = evtchn_from_irq(irq);
- word_idx = evtchn / BITS_PER_LONG;
- pending_bits = evtchn % BITS_PER_LONG;
- if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
- }
-
pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
@@ -1356,6 +1343,7 @@ static void __xen_evtchn_do_upcall(void)
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
+ xen_ulong_t pending_bits;
xen_ulong_t words;
words = MASK_LSBS(pending_words, word_idx);
@@ -1384,7 +1372,8 @@ static void __xen_evtchn_do_upcall(void)
do {
xen_ulong_t bits;
- int port;
+ int port, irq;
+ struct irq_desc *desc;
bits = MASK_LSBS(pending_bits, bit_idx);
diff --git a/trunk/fs/binfmt_elf.c b/trunk/fs/binfmt_elf.c
index 86af964c2425..3939829f6c5c 100644
--- a/trunk/fs/binfmt_elf.c
+++ b/trunk/fs/binfmt_elf.c
@@ -1137,7 +1137,6 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
goto whole;
if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
goto whole;
- return 0;
}
/* Do not dump I/O mapped devices or special mappings */
diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c
index b96fc6ce4855..bb5768f59b32 100644
--- a/trunk/fs/bio.c
+++ b/trunk/fs/bio.c
@@ -1428,6 +1428,8 @@ void bio_endio(struct bio *bio, int error)
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
+ trace_block_bio_complete(bio, error);
+
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
}
diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c
index ef96381569a4..451fad96ecd1 100644
--- a/trunk/fs/btrfs/tree-log.c
+++ b/trunk/fs/btrfs/tree-log.c
@@ -317,7 +317,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
unsigned long src_ptr;
unsigned long dst_ptr;
int overwrite_root = 0;
- bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
overwrite_root = 1;
@@ -327,9 +326,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
/* look for the key in the destination tree */
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
- return ret;
-
if (ret == 0) {
char *src_copy;
char *dst_copy;
@@ -371,30 +367,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
return 0;
}
- /*
- * We need to load the old nbytes into the inode so when we
- * replay the extents we've logged we get the right nbytes.
- */
- if (inode_item) {
- struct btrfs_inode_item *item;
- u64 nbytes;
-
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_item);
- nbytes = btrfs_inode_nbytes(path->nodes[0], item);
- item = btrfs_item_ptr(eb, slot,
- struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, nbytes);
- }
- } else if (inode_item) {
- struct btrfs_inode_item *item;
-
- /*
- * New inode, set nbytes to 0 so that the nbytes comes out
- * properly when we replay the extents.
- */
- item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, 0);
}
insert:
btrfs_release_path(path);
@@ -514,7 +486,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
int found_type;
u64 extent_end;
u64 start = key->offset;
- u64 nbytes = 0;
+ u64 saved_nbytes;
struct btrfs_file_extent_item *item;
struct inode *inode = NULL;
unsigned long size;
@@ -524,19 +496,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
found_type = btrfs_file_extent_type(eb, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- nbytes = btrfs_file_extent_num_bytes(eb, item);
- extent_end = start + nbytes;
-
- /*
- * We don't add to the inodes nbytes if we are prealloc or a
- * hole.
- */
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
- nbytes = 0;
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ found_type == BTRFS_FILE_EXTENT_PREALLOC)
+ extent_end = start + btrfs_file_extent_num_bytes(eb, item);
+ else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, item);
- nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size, root->sectorsize);
} else {
ret = 0;
@@ -585,6 +548,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
+ saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
BUG_ON(ret);
@@ -671,7 +635,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
BUG_ON(ret);
}
- inode_add_bytes(inode, nbytes);
+ inode_set_bytes(inode, saved_nbytes);
ret = btrfs_update_inode(trans, root, inode);
out:
if (inode)
diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c
index 21b3a291c327..991c63c6bdd0 100644
--- a/trunk/fs/cifs/connect.c
+++ b/trunk/fs/cifs/connect.c
@@ -1575,24 +1575,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
break;
case Opt_blank_pass:
+ vol->password = NULL;
+ break;
+ case Opt_pass:
/* passwords have to be handled differently
* to allow the character used for deliminator
* to be passed within them
*/
- /*
- * Check if this is a case where the password
- * starts with a delimiter
- */
- tmp_end = strchr(data, '=');
- tmp_end++;
- if (!(tmp_end < end && tmp_end[1] == delim)) {
- /* No it is not. Set the password to NULL */
- vol->password = NULL;
- break;
- }
- /* Yes it is. Drop down to Opt_pass below.*/
- case Opt_pass:
/* Obtain the value string */
value = strchr(data, '=');
value++;
diff --git a/trunk/fs/ecryptfs/miscdev.c b/trunk/fs/ecryptfs/miscdev.c
index e4141f257495..412e6eda25f8 100644
--- a/trunk/fs/ecryptfs/miscdev.c
+++ b/trunk/fs/ecryptfs/miscdev.c
@@ -80,6 +80,13 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
int rc;
mutex_lock(&ecryptfs_daemon_hash_mux);
+ rc = try_module_get(THIS_MODULE);
+ if (rc == 0) {
+ rc = -EIO;
+ printk(KERN_ERR "%s: Error attempting to increment module use "
+ "count; rc = [%d]\n", __func__, rc);
+ goto out_unlock_daemon_list;
+ }
rc = ecryptfs_find_daemon_by_euid(&daemon);
if (!rc) {
rc = -EINVAL;
@@ -89,7 +96,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
if (rc) {
printk(KERN_ERR "%s: Error attempting to spawn daemon; "
"rc = [%d]\n", __func__, rc);
- goto out_unlock_daemon_list;
+ goto out_module_put_unlock_daemon_list;
}
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
@@ -101,6 +108,9 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
atomic_inc(&ecryptfs_num_miscdev_opens);
out_unlock_daemon:
mutex_unlock(&daemon->mux);
+out_module_put_unlock_daemon_list:
+ if (rc)
+ module_put(THIS_MODULE);
out_unlock_daemon_list:
mutex_unlock(&ecryptfs_daemon_hash_mux);
return rc;
@@ -137,6 +147,7 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
"bug.\n", __func__, rc);
BUG();
}
+ module_put(THIS_MODULE);
return rc;
}
@@ -460,7 +471,6 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
static const struct file_operations ecryptfs_miscdev_fops = {
- .owner = THIS_MODULE,
.open = ecryptfs_miscdev_open,
.poll = ecryptfs_miscdev_poll,
.read = ecryptfs_miscdev_read,
diff --git a/trunk/fs/hfsplus/extents.c b/trunk/fs/hfsplus/extents.c
index fe0a76213d9e..a94f0f779d5e 100644
--- a/trunk/fs/hfsplus/extents.c
+++ b/trunk/fs/hfsplus/extents.c
@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
- loff_t size = inode->i_size;
+ u32 size = inode->i_size;
res = pagecache_write_begin(NULL, mapping, size, 0,
AOP_FLAG_UNINTERRUPTIBLE,
diff --git a/trunk/fs/hugetlbfs/inode.c b/trunk/fs/hugetlbfs/inode.c
index 523464e62849..84e3d856e91d 100644
--- a/trunk/fs/hugetlbfs/inode.c
+++ b/trunk/fs/hugetlbfs/inode.c
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap_pgoff unwinds (may be important on powerpc
* and ia64).
*/
- vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
+ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &hugetlb_vm_ops;
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c
index a898b3d43ccf..f5f7c06c36fb 100644
--- a/trunk/fs/inode.c
+++ b/trunk/fs/inode.c
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
* inode to the back of the list so we don't spin on it.
*/
if (!spin_trylock(&inode->i_lock)) {
- list_move(&inode->i_lru, &sb->s_inode_lru);
+ list_move_tail(&inode->i_lru, &sb->s_inode_lru);
continue;
}
diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c
index 341d3f564082..d581e45c0a9f 100644
--- a/trunk/fs/namespace.c
+++ b/trunk/fs/namespace.c
@@ -1690,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name,
if (IS_ERR(mnt)) {
err = PTR_ERR(mnt);
- goto out2;
+ goto out;
}
err = graft_tree(mnt, path);
diff --git a/trunk/fs/nfs/nfs4client.c b/trunk/fs/nfs/nfs4client.c
index 66b6664dcd4c..ac4fc9a8fdbc 100644
--- a/trunk/fs/nfs/nfs4client.c
+++ b/trunk/fs/nfs/nfs4client.c
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
struct rpc_cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
- struct nfs_client *pos, *prev = NULL;
+ struct nfs_client *pos, *n, *prev = NULL;
struct nfs4_setclientid_res clid = {
.clientid = new->cl_clientid,
.confirm = new->cl_confirm,
@@ -308,23 +308,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
int status = -NFS4ERR_STALE_CLIENTID;
spin_lock(&nn->nfs_client_lock);
- list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+ list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
/* If "pos" isn't marked ready, we can't trust the
* remaining fields in "pos" */
- if (pos->cl_cons_state > NFS_CS_READY) {
- atomic_inc(&pos->cl_count);
- spin_unlock(&nn->nfs_client_lock);
-
- if (prev)
- nfs_put_client(prev);
- prev = pos;
-
- status = nfs_wait_client_init_complete(pos);
- spin_lock(&nn->nfs_client_lock);
- if (status < 0)
- continue;
- }
- if (pos->cl_cons_state != NFS_CS_READY)
+ if (pos->cl_cons_state < NFS_CS_READY)
continue;
if (pos->rpc_ops != new->rpc_ops)
@@ -436,16 +423,16 @@ int nfs41_walk_client_list(struct nfs_client *new,
struct rpc_cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
- struct nfs_client *pos, *prev = NULL;
+ struct nfs_client *pos, *n, *prev = NULL;
int status = -NFS4ERR_STALE_CLIENTID;
spin_lock(&nn->nfs_client_lock);
- list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
+ list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
/* If "pos" isn't marked ready, we can't trust the
* remaining fields in "pos", especially the client
* ID and serverowner fields. Wait for CREATE_SESSION
* to finish. */
- if (pos->cl_cons_state > NFS_CS_READY) {
+ if (pos->cl_cons_state < NFS_CS_READY) {
atomic_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
@@ -453,17 +440,18 @@ int nfs41_walk_client_list(struct nfs_client *new,
nfs_put_client(prev);
prev = pos;
+ nfs4_schedule_lease_recovery(pos);
status = nfs_wait_client_init_complete(pos);
- if (status == 0) {
- nfs4_schedule_lease_recovery(pos);
- status = nfs4_wait_clnt_recover(pos);
+ if (status < 0) {
+ nfs_put_client(pos);
+ spin_lock(&nn->nfs_client_lock);
+ continue;
}
+ status = pos->cl_cons_state;
spin_lock(&nn->nfs_client_lock);
if (status < 0)
continue;
}
- if (pos->cl_cons_state != NFS_CS_READY)
- continue;
if (pos->rpc_ops != new->rpc_ops)
continue;
@@ -481,18 +469,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
continue;
atomic_inc(&pos->cl_count);
- *result = pos;
- status = 0;
+ spin_unlock(&nn->nfs_client_lock);
dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
__func__, pos, atomic_read(&pos->cl_count));
- break;
+
+ *result = pos;
+ return 0;
}
/* No matching nfs_client found. */
spin_unlock(&nn->nfs_client_lock);
dprintk("NFS: <-- %s status = %d\n", __func__, status);
- if (prev)
- nfs_put_client(prev);
return status;
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c
index 0ad025eb523b..26431cf62ddb 100644
--- a/trunk/fs/nfs/nfs4proc.c
+++ b/trunk/fs/nfs/nfs4proc.c
@@ -1046,7 +1046,6 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
/* Save the delegation */
nfs4_stateid_copy(&stateid, &delegation->stateid);
rcu_read_unlock();
- nfs_release_seqid(opendata->o_arg.seqid);
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
if (ret != 0)
goto out;
diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c
index d41a3518509f..6ace365c6334 100644
--- a/trunk/fs/nfs/nfs4state.c
+++ b/trunk/fs/nfs/nfs4state.c
@@ -1886,13 +1886,7 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
status = PTR_ERR(clnt);
break;
}
- /* Note: this is safe because we haven't yet marked the
- * client as ready, so we are the only user of
- * clp->cl_rpcclient
- */
- clnt = xchg(&clp->cl_rpcclient, clnt);
- rpc_shutdown_client(clnt);
- clnt = clp->cl_rpcclient;
+ clp->cl_rpcclient = clnt;
goto again;
case -NFS4ERR_MINOR_VERS_MISMATCH:
diff --git a/trunk/fs/proc/array.c b/trunk/fs/proc/array.c
index cbd0f1b324b9..f7ed9ee46eb9 100644
--- a/trunk/fs/proc/array.c
+++ b/trunk/fs/proc/array.c
@@ -143,7 +143,6 @@ static const char * const task_state_array[] = {
"x (dead)", /* 64 */
"K (wakekill)", /* 128 */
"W (waking)", /* 256 */
- "P (parked)", /* 512 */
};
static inline const char *get_task_state(struct task_struct *tsk)
diff --git a/trunk/fs/proc/generic.c b/trunk/fs/proc/generic.c
index 21e1a8f1659d..4b3b3ffb52f1 100644
--- a/trunk/fs/proc/generic.c
+++ b/trunk/fs/proc/generic.c
@@ -755,8 +755,37 @@ void pde_put(struct proc_dir_entry *pde)
free_proc_entry(pde);
}
-static void entry_rundown(struct proc_dir_entry *de)
+/*
+ * Remove a /proc entry and free it if it's not currently in use.
+ */
+void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
{
+ struct proc_dir_entry **p;
+ struct proc_dir_entry *de = NULL;
+ const char *fn = name;
+ unsigned int len;
+
+ spin_lock(&proc_subdir_lock);
+ if (__xlate_proc_name(name, &parent, &fn) != 0) {
+ spin_unlock(&proc_subdir_lock);
+ return;
+ }
+ len = strlen(fn);
+
+ for (p = &parent->subdir; *p; p=&(*p)->next ) {
+ if (proc_match(len, fn, *p)) {
+ de = *p;
+ *p = de->next;
+ de->next = NULL;
+ break;
+ }
+ }
+ spin_unlock(&proc_subdir_lock);
+ if (!de) {
+ WARN(1, "name '%s'\n", name);
+ return;
+ }
+
spin_lock(&de->pde_unload_lock);
/*
* Stop accepting new callers into module. If you're
@@ -788,40 +817,6 @@ static void entry_rundown(struct proc_dir_entry *de)
spin_lock(&de->pde_unload_lock);
}
spin_unlock(&de->pde_unload_lock);
-}
-
-/*
- * Remove a /proc entry and free it if it's not currently in use.
- */
-void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
-{
- struct proc_dir_entry **p;
- struct proc_dir_entry *de = NULL;
- const char *fn = name;
- unsigned int len;
-
- spin_lock(&proc_subdir_lock);
- if (__xlate_proc_name(name, &parent, &fn) != 0) {
- spin_unlock(&proc_subdir_lock);
- return;
- }
- len = strlen(fn);
-
- for (p = &parent->subdir; *p; p=&(*p)->next ) {
- if (proc_match(len, fn, *p)) {
- de = *p;
- *p = de->next;
- de->next = NULL;
- break;
- }
- }
- spin_unlock(&proc_subdir_lock);
- if (!de) {
- WARN(1, "name '%s'\n", name);
- return;
- }
-
- entry_rundown(de);
if (S_ISDIR(de->mode))
parent->nlink--;
@@ -832,57 +827,3 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
pde_put(de);
}
EXPORT_SYMBOL(remove_proc_entry);
-
-int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
-{
- struct proc_dir_entry **p;
- struct proc_dir_entry *root = NULL, *de, *next;
- const char *fn = name;
- unsigned int len;
-
- spin_lock(&proc_subdir_lock);
- if (__xlate_proc_name(name, &parent, &fn) != 0) {
- spin_unlock(&proc_subdir_lock);
- return -ENOENT;
- }
- len = strlen(fn);
-
- for (p = &parent->subdir; *p; p=&(*p)->next ) {
- if (proc_match(len, fn, *p)) {
- root = *p;
- *p = root->next;
- root->next = NULL;
- break;
- }
- }
- if (!root) {
- spin_unlock(&proc_subdir_lock);
- return -ENOENT;
- }
- de = root;
- while (1) {
- next = de->subdir;
- if (next) {
- de->subdir = next->next;
- next->next = NULL;
- de = next;
- continue;
- }
- spin_unlock(&proc_subdir_lock);
-
- entry_rundown(de);
- next = de->parent;
- if (S_ISDIR(de->mode))
- next->nlink--;
- de->nlink = 0;
- if (de == root)
- break;
- pde_put(de);
-
- spin_lock(&proc_subdir_lock);
- de = next;
- }
- pde_put(root);
- return 0;
-}
-EXPORT_SYMBOL(remove_proc_subtree);
diff --git a/trunk/include/asm-generic/tlb.h b/trunk/include/asm-generic/tlb.h
index b1b1fa6ffffe..25f01d0bc149 100644
--- a/trunk/include/asm-generic/tlb.h
+++ b/trunk/include/asm-generic/tlb.h
@@ -99,12 +99,7 @@ struct mmu_gather {
unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ unsigned int fullmm;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
diff --git a/trunk/include/linux/ata.h b/trunk/include/linux/ata.h
index ee0bd9524055..8f7a3d68371a 100644
--- a/trunk/include/linux/ata.h
+++ b/trunk/include/linux/ata.h
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
}
}
-static inline int atapi_command_packet_set(const u16 *dev_id)
+static inline bool atapi_command_packet_set(const u16 *dev_id)
{
return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
}
diff --git a/trunk/include/linux/blktrace_api.h b/trunk/include/linux/blktrace_api.h
index 7c2e030e72f1..0ea61e07a91c 100644
--- a/trunk/include/linux/blktrace_api.h
+++ b/trunk/include/linux/blktrace_api.h
@@ -12,6 +12,7 @@
struct blk_trace {
int trace_state;
+ bool rq_based;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
diff --git a/trunk/include/linux/capability.h b/trunk/include/linux/capability.h
index d9a4f7f40f32..98503b792369 100644
--- a/trunk/include/linux/capability.h
+++ b/trunk/include/linux/capability.h
@@ -35,7 +35,6 @@ struct cpu_vfs_cap_data {
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-struct file;
struct inode;
struct dentry;
struct user_namespace;
@@ -212,7 +211,6 @@ extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool nsown_capable(int cap);
extern bool inode_capable(const struct inode *inode, int cap);
-extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h
index 52da2a250795..e5ca8ef50e9b 100644
--- a/trunk/include/linux/ftrace.h
+++ b/trunk/include/linux/ftrace.h
@@ -89,7 +89,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* that the call back has its own recursion protection. If it does
* not set this, then the ftrace infrastructure will add recursion
* protection for the caller.
- * STUB - The ftrace_ops is just a place holder.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -99,7 +98,6 @@ enum {
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
- FTRACE_OPS_FL_STUB = 1 << 7,
};
struct ftrace_ops {
@@ -396,6 +394,7 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
+loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
int ftrace_regex_release(struct inode *inode, struct file *file);
void __init
@@ -568,8 +567,6 @@ static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
#endif /* CONFIG_DYNAMIC_FTRACE */
-loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
-
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
diff --git a/trunk/include/linux/libata.h b/trunk/include/linux/libata.h
index eae7a053dc51..91c9d109e5f1 100644
--- a/trunk/include/linux/libata.h
+++ b/trunk/include/linux/libata.h
@@ -398,7 +398,6 @@ enum {
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h
index e2091b88d24c..e19ff30ad0a2 100644
--- a/trunk/include/linux/mm.h
+++ b/trunk/include/linux/mm.h
@@ -1611,8 +1611,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
-
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
diff --git a/trunk/include/linux/netfilter/ipset/ip_set_ahash.h b/trunk/include/linux/netfilter/ipset/ip_set_ahash.h
index 0214c4c146fa..01d25e6fc792 100644
--- a/trunk/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/trunk/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -291,7 +291,6 @@ ip_set_hash_destroy(struct ip_set *set)
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
-#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)
#ifdef IP_SET_HASH_WITH_NETS
#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
#else
@@ -386,9 +385,9 @@ type_pf_resize(struct ip_set *set, bool retried)
struct ip_set_hash *h = set->data;
struct htable *t, *orig = h->table;
u8 htable_bits = orig->htable_bits;
- struct type_pf_elem *data;
+ const struct type_pf_elem *data;
struct hbucket *n, *m;
- u32 i, j, flags = 0;
+ u32 i, j;
int ret;
retry:
@@ -413,16 +412,9 @@ type_pf_resize(struct ip_set *set, bool retried)
n = hbucket(orig, i);
for (j = 0; j < n->pos; j++) {
data = ahash_data(n, j);
-#ifdef IP_SET_HASH_WITH_NETS
- flags = 0;
- type_pf_data_reset_flags(data, &flags);
-#endif
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags);
+ ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0);
if (ret < 0) {
-#ifdef IP_SET_HASH_WITH_NETS
- type_pf_data_flags(data, flags);
-#endif
read_unlock_bh(&set->lock);
ahash_destroy(t);
if (ret == -EAGAIN)
@@ -844,9 +836,9 @@ type_pf_tresize(struct ip_set *set, bool retried)
struct ip_set_hash *h = set->data;
struct htable *t, *orig = h->table;
u8 htable_bits = orig->htable_bits;
- struct type_pf_elem *data;
+ const struct type_pf_elem *data;
struct hbucket *n, *m;
- u32 i, j, flags = 0;
+ u32 i, j;
int ret;
/* Try to cleanup once */
@@ -881,17 +873,10 @@ type_pf_tresize(struct ip_set *set, bool retried)
n = hbucket(orig, i);
for (j = 0; j < n->pos; j++) {
data = ahash_tdata(n, j);
-#ifdef IP_SET_HASH_WITH_NETS
- flags = 0;
- type_pf_data_reset_flags(data, &flags);
-#endif
m = hbucket(t, HKEY(data, h->initval, htable_bits));
- ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags,
- ip_set_timeout_get(type_pf_data_timeout(data)));
+ ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
+ ip_set_timeout_get(type_pf_data_timeout(data)));
if (ret < 0) {
-#ifdef IP_SET_HASH_WITH_NETS
- type_pf_data_flags(data, flags);
-#endif
read_unlock_bh(&set->lock);
ahash_destroy(t);
if (ret == -EAGAIN)
@@ -1202,7 +1187,6 @@ type_pf_gc_init(struct ip_set *set)
#undef type_pf_data_tlist
#undef type_pf_data_next
#undef type_pf_data_flags
-#undef type_pf_data_reset_flags
#undef type_pf_data_match
#undef type_pf_elem
diff --git a/trunk/include/linux/preempt.h b/trunk/include/linux/preempt.h
index 87a03c746f17..5a710b9c578e 100644
--- a/trunk/include/linux/preempt.h
+++ b/trunk/include/linux/preempt.h
@@ -93,20 +93,14 @@ do { \
#else /* !CONFIG_PREEMPT_COUNT */
-/*
- * Even if we don't have any preemption, we need preempt disable/enable
- * to be barriers, so that we don't have things like get_user/put_user
- * that can cause faults and scheduling migrate into our preempt-protected
- * region.
- */
-#define preempt_disable() barrier()
-#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
-
-#define preempt_disable_notrace() barrier()
-#define preempt_enable_no_resched_notrace() barrier()
-#define preempt_enable_notrace() barrier()
+#define preempt_disable() do { } while (0)
+#define sched_preempt_enable_no_resched() do { } while (0)
+#define preempt_enable_no_resched() do { } while (0)
+#define preempt_enable() do { } while (0)
+
+#define preempt_disable_notrace() do { } while (0)
+#define preempt_enable_no_resched_notrace() do { } while (0)
+#define preempt_enable_notrace() do { } while (0)
#endif /* CONFIG_PREEMPT_COUNT */
diff --git a/trunk/include/linux/proc_fs.h b/trunk/include/linux/proc_fs.h
index 94dfb2aa5533..8307f2f94d86 100644
--- a/trunk/include/linux/proc_fs.h
+++ b/trunk/include/linux/proc_fs.h
@@ -117,7 +117,6 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
const struct file_operations *proc_fops,
void *data);
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
-extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent);
struct pid_namespace;
@@ -203,7 +202,6 @@ static inline struct proc_dir_entry *proc_create_data(const char *name,
return NULL;
}
#define remove_proc_entry(name, parent) do {} while (0)
-#define remove_proc_subtree(name, parent) do {} while (0)
static inline struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent,const char *dest) {return NULL;}
diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h
index e692a022527b..d35d2b6ddbfb 100644
--- a/trunk/include/linux/sched.h
+++ b/trunk/include/linux/sched.h
@@ -163,10 +163,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
-#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_STATE_MAX 512
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
diff --git a/trunk/include/linux/security.h b/trunk/include/linux/security.h
index 032c366ef1c6..eee7478cda70 100644
--- a/trunk/include/linux/security.h
+++ b/trunk/include/linux/security.h
@@ -1012,10 +1012,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* This hook can be used by the module to update any security state
* associated with the TUN device's security structure.
* @security pointer to the TUN devices's security structure.
- * @skb_owned_by:
- * This hook sets the packet's owning sock.
- * @skb is the packet.
- * @sk the sock which owns the packet.
*
* Security hooks for XFRM operations.
*
@@ -1642,7 +1638,6 @@ struct security_operations {
int (*tun_dev_attach_queue) (void *security);
int (*tun_dev_attach) (struct sock *sk, void *security);
int (*tun_dev_open) (void *security);
- void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2593,8 +2588,6 @@ int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
-
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
@@ -2786,11 +2779,6 @@ static inline int security_tun_dev_open(void *security)
{
return 0;
}
-
-static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/trunk/include/linux/spinlock_up.h b/trunk/include/linux/spinlock_up.h
index e2369c167dbd..a26e2fb604e6 100644
--- a/trunk/include/linux/spinlock_up.h
+++ b/trunk/include/linux/spinlock_up.h
@@ -16,10 +16,7 @@
* In the debug case, 1 means unlocked, 0 means locked. (the values
* are inverted, to catch initialization bugs)
*
- * No atomicity anywhere, we are on UP. However, we still need
- * the compiler barriers, because we do not want the compiler to
- * move potentially faulting instructions (notably user accesses)
- * into the locked sequence, resulting in non-atomic execution.
+ * No atomicity anywhere, we are on UP.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
@@ -28,7 +25,6 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
- barrier();
}
static inline void
@@ -36,7 +32,6 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
- barrier();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -44,34 +39,32 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
char oldval = lock->slock;
lock->slock = 0;
- barrier();
return oldval > 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- barrier();
lock->slock = 1;
}
/*
* Read-write spinlocks. No debug version.
*/
-#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
#define arch_spin_is_contended(lock) (((void)(lock), 0))
diff --git a/trunk/include/net/addrconf.h b/trunk/include/net/addrconf.h
index 84a6440f1f19..40be2a0d8ae1 100644
--- a/trunk/include/net/addrconf.h
+++ b/trunk/include/net/addrconf.h
@@ -199,7 +199,6 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
/* Device notifier */
extern int register_inet6addr_notifier(struct notifier_block *nb);
extern int unregister_inet6addr_notifier(struct notifier_block *nb);
-extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
struct ipv6_devconf *devconf);
diff --git a/trunk/include/net/iucv/af_iucv.h b/trunk/include/net/iucv/af_iucv.h
index 714cc9a54a4c..cc7c19732389 100644
--- a/trunk/include/net/iucv/af_iucv.h
+++ b/trunk/include/net/iucv/af_iucv.h
@@ -130,14 +130,6 @@ struct iucv_sock {
enum iucv_tx_notify n);
};
-struct iucv_skb_cb {
- u32 class; /* target class of message */
- u32 tag; /* tag associated with message */
- u32 offset; /* offset for skb receival */
-};
-
-#define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0]))
-
/* iucv socket options (SOL_IUCV) */
#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
diff --git a/trunk/include/trace/events/block.h b/trunk/include/trace/events/block.h
index 9c1467357b03..9961726523d0 100644
--- a/trunk/include/trace/events/block.h
+++ b/trunk/include/trace/events/block.h
@@ -257,7 +257,6 @@ TRACE_EVENT(block_bio_bounce,
/**
* block_bio_complete - completed all work on the block operation
- * @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
@@ -266,9 +265,9 @@ TRACE_EVENT(block_bio_bounce,
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct bio *bio, int error),
- TP_ARGS(q, bio, error),
+ TP_ARGS(bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -279,7 +278,8 @@ TRACE_EVENT(block_bio_complete,
),
TP_fast_assign(
- __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->dev = bio->bi_bdev ?
+ bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->error = error;
diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h
index e5586caff67a..5a8671e8a67f 100644
--- a/trunk/include/trace/events/sched.h
+++ b/trunk/include/trace/events/sched.h
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
- { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+ { 128, "W" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/trunk/include/uapi/linux/fuse.h b/trunk/include/uapi/linux/fuse.h
index 706d035fa748..4c43b4448792 100644
--- a/trunk/include/uapi/linux/fuse.h
+++ b/trunk/include/uapi/linux/fuse.h
@@ -95,10 +95,15 @@
#ifndef _LINUX_FUSE_H
#define _LINUX_FUSE_H
-#ifdef __KERNEL__
+#ifdef __linux__
#include
#else
#include
+#define __u64 uint64_t
+#define __s64 int64_t
+#define __u32 uint32_t
+#define __s32 int32_t
+#define __u16 uint16_t
#endif
/*
@@ -134,42 +139,42 @@
userspace works under 64bit kernels */
struct fuse_attr {
- uint64_t ino;
- uint64_t size;
- uint64_t blocks;
- uint64_t atime;
- uint64_t mtime;
- uint64_t ctime;
- uint32_t atimensec;
- uint32_t mtimensec;
- uint32_t ctimensec;
- uint32_t mode;
- uint32_t nlink;
- uint32_t uid;
- uint32_t gid;
- uint32_t rdev;
- uint32_t blksize;
- uint32_t padding;
+ __u64 ino;
+ __u64 size;
+ __u64 blocks;
+ __u64 atime;
+ __u64 mtime;
+ __u64 ctime;
+ __u32 atimensec;
+ __u32 mtimensec;
+ __u32 ctimensec;
+ __u32 mode;
+ __u32 nlink;
+ __u32 uid;
+ __u32 gid;
+ __u32 rdev;
+ __u32 blksize;
+ __u32 padding;
};
struct fuse_kstatfs {
- uint64_t blocks;
- uint64_t bfree;
- uint64_t bavail;
- uint64_t files;
- uint64_t ffree;
- uint32_t bsize;
- uint32_t namelen;
- uint32_t frsize;
- uint32_t padding;
- uint32_t spare[6];
+ __u64 blocks;
+ __u64 bfree;
+ __u64 bavail;
+ __u64 files;
+ __u64 ffree;
+ __u32 bsize;
+ __u32 namelen;
+ __u32 frsize;
+ __u32 padding;
+ __u32 spare[6];
};
struct fuse_file_lock {
- uint64_t start;
- uint64_t end;
- uint32_t type;
- uint32_t pid; /* tgid */
+ __u64 start;
+ __u64 end;
+ __u32 type;
+ __u32 pid; /* tgid */
};
/**
@@ -359,143 +364,143 @@ enum fuse_notify_code {
#define FUSE_COMPAT_ENTRY_OUT_SIZE 120
struct fuse_entry_out {
- uint64_t nodeid; /* Inode ID */
- uint64_t generation; /* Inode generation: nodeid:gen must
- be unique for the fs's lifetime */
- uint64_t entry_valid; /* Cache timeout for the name */
- uint64_t attr_valid; /* Cache timeout for the attributes */
- uint32_t entry_valid_nsec;
- uint32_t attr_valid_nsec;
+ __u64 nodeid; /* Inode ID */
+ __u64 generation; /* Inode generation: nodeid:gen must
+ be unique for the fs's lifetime */
+ __u64 entry_valid; /* Cache timeout for the name */
+ __u64 attr_valid; /* Cache timeout for the attributes */
+ __u32 entry_valid_nsec;
+ __u32 attr_valid_nsec;
struct fuse_attr attr;
};
struct fuse_forget_in {
- uint64_t nlookup;
+ __u64 nlookup;
};
struct fuse_forget_one {
- uint64_t nodeid;
- uint64_t nlookup;
+ __u64 nodeid;
+ __u64 nlookup;
};
struct fuse_batch_forget_in {
- uint32_t count;
- uint32_t dummy;
+ __u32 count;
+ __u32 dummy;
};
struct fuse_getattr_in {
- uint32_t getattr_flags;
- uint32_t dummy;
- uint64_t fh;
+ __u32 getattr_flags;
+ __u32 dummy;
+ __u64 fh;
};
#define FUSE_COMPAT_ATTR_OUT_SIZE 96
struct fuse_attr_out {
- uint64_t attr_valid; /* Cache timeout for the attributes */
- uint32_t attr_valid_nsec;
- uint32_t dummy;
+ __u64 attr_valid; /* Cache timeout for the attributes */
+ __u32 attr_valid_nsec;
+ __u32 dummy;
struct fuse_attr attr;
};
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
- uint32_t mode;
- uint32_t rdev;
- uint32_t umask;
- uint32_t padding;
+ __u32 mode;
+ __u32 rdev;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_mkdir_in {
- uint32_t mode;
- uint32_t umask;
+ __u32 mode;
+ __u32 umask;
};
struct fuse_rename_in {
- uint64_t newdir;
+ __u64 newdir;
};
struct fuse_link_in {
- uint64_t oldnodeid;
+ __u64 oldnodeid;
};
struct fuse_setattr_in {
- uint32_t valid;
- uint32_t padding;
- uint64_t fh;
- uint64_t size;
- uint64_t lock_owner;
- uint64_t atime;
- uint64_t mtime;
- uint64_t unused2;
- uint32_t atimensec;
- uint32_t mtimensec;
- uint32_t unused3;
- uint32_t mode;
- uint32_t unused4;
- uint32_t uid;
- uint32_t gid;
- uint32_t unused5;
+ __u32 valid;
+ __u32 padding;
+ __u64 fh;
+ __u64 size;
+ __u64 lock_owner;
+ __u64 atime;
+ __u64 mtime;
+ __u64 unused2;
+ __u32 atimensec;
+ __u32 mtimensec;
+ __u32 unused3;
+ __u32 mode;
+ __u32 unused4;
+ __u32 uid;
+ __u32 gid;
+ __u32 unused5;
};
struct fuse_open_in {
- uint32_t flags;
- uint32_t unused;
+ __u32 flags;
+ __u32 unused;
};
struct fuse_create_in {
- uint32_t flags;
- uint32_t mode;
- uint32_t umask;
- uint32_t padding;
+ __u32 flags;
+ __u32 mode;
+ __u32 umask;
+ __u32 padding;
};
struct fuse_open_out {
- uint64_t fh;
- uint32_t open_flags;
- uint32_t padding;
+ __u64 fh;
+ __u32 open_flags;
+ __u32 padding;
};
struct fuse_release_in {
- uint64_t fh;
- uint32_t flags;
- uint32_t release_flags;
- uint64_t lock_owner;
+ __u64 fh;
+ __u32 flags;
+ __u32 release_flags;
+ __u64 lock_owner;
};
struct fuse_flush_in {
- uint64_t fh;
- uint32_t unused;
- uint32_t padding;
- uint64_t lock_owner;
+ __u64 fh;
+ __u32 unused;
+ __u32 padding;
+ __u64 lock_owner;
};
struct fuse_read_in {
- uint64_t fh;
- uint64_t offset;
- uint32_t size;
- uint32_t read_flags;
- uint64_t lock_owner;
- uint32_t flags;
- uint32_t padding;
+ __u64 fh;
+ __u64 offset;
+ __u32 size;
+ __u32 read_flags;
+ __u64 lock_owner;
+ __u32 flags;
+ __u32 padding;
};
#define FUSE_COMPAT_WRITE_IN_SIZE 24
struct fuse_write_in {
- uint64_t fh;
- uint64_t offset;
- uint32_t size;
- uint32_t write_flags;
- uint64_t lock_owner;
- uint32_t flags;
- uint32_t padding;
+ __u64 fh;
+ __u64 offset;
+ __u32 size;
+ __u32 write_flags;
+ __u64 lock_owner;
+ __u32 flags;
+ __u32 padding;
};
struct fuse_write_out {
- uint32_t size;
- uint32_t padding;
+ __u32 size;
+ __u32 padding;
};
#define FUSE_COMPAT_STATFS_SIZE 48
@@ -505,32 +510,32 @@ struct fuse_statfs_out {
};
struct fuse_fsync_in {
- uint64_t fh;
- uint32_t fsync_flags;
- uint32_t padding;
+ __u64 fh;
+ __u32 fsync_flags;
+ __u32 padding;
};
struct fuse_setxattr_in {
- uint32_t size;
- uint32_t flags;
+ __u32 size;
+ __u32 flags;
};
struct fuse_getxattr_in {
- uint32_t size;
- uint32_t padding;
+ __u32 size;
+ __u32 padding;
};
struct fuse_getxattr_out {
- uint32_t size;
- uint32_t padding;
+ __u32 size;
+ __u32 padding;
};
struct fuse_lk_in {
- uint64_t fh;
- uint64_t owner;
+ __u64 fh;
+ __u64 owner;
struct fuse_file_lock lk;
- uint32_t lk_flags;
- uint32_t padding;
+ __u32 lk_flags;
+ __u32 padding;
};
struct fuse_lk_out {
@@ -538,135 +543,134 @@ struct fuse_lk_out {
};
struct fuse_access_in {
- uint32_t mask;
- uint32_t padding;
+ __u32 mask;
+ __u32 padding;
};
struct fuse_init_in {
- uint32_t major;
- uint32_t minor;
- uint32_t max_readahead;
- uint32_t flags;
+ __u32 major;
+ __u32 minor;
+ __u32 max_readahead;
+ __u32 flags;
};
struct fuse_init_out {
- uint32_t major;
- uint32_t minor;
- uint32_t max_readahead;
- uint32_t flags;
- uint16_t max_background;
- uint16_t congestion_threshold;
- uint32_t max_write;
+ __u32 major;
+ __u32 minor;
+ __u32 max_readahead;
+ __u32 flags;
+ __u16 max_background;
+ __u16 congestion_threshold;
+ __u32 max_write;
};
#define CUSE_INIT_INFO_MAX 4096
struct cuse_init_in {
- uint32_t major;
- uint32_t minor;
- uint32_t unused;
- uint32_t flags;
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
};
struct cuse_init_out {
- uint32_t major;
- uint32_t minor;
- uint32_t unused;
- uint32_t flags;
- uint32_t max_read;
- uint32_t max_write;
- uint32_t dev_major; /* chardev major */
- uint32_t dev_minor; /* chardev minor */
- uint32_t spare[10];
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
+ __u32 max_read;
+ __u32 max_write;
+ __u32 dev_major; /* chardev major */
+ __u32 dev_minor; /* chardev minor */
+ __u32 spare[10];
};
struct fuse_interrupt_in {
- uint64_t unique;
+ __u64 unique;
};
struct fuse_bmap_in {
- uint64_t block;
- uint32_t blocksize;
- uint32_t padding;
+ __u64 block;
+ __u32 blocksize;
+ __u32 padding;
};
struct fuse_bmap_out {
- uint64_t block;
+ __u64 block;
};
struct fuse_ioctl_in {
- uint64_t fh;
- uint32_t flags;
- uint32_t cmd;
- uint64_t arg;
- uint32_t in_size;
- uint32_t out_size;
+ __u64 fh;
+ __u32 flags;
+ __u32 cmd;
+ __u64 arg;
+ __u32 in_size;
+ __u32 out_size;
};
struct fuse_ioctl_iovec {
- uint64_t base;
- uint64_t len;
+ __u64 base;
+ __u64 len;
};
struct fuse_ioctl_out {
- int32_t result;
- uint32_t flags;
- uint32_t in_iovs;
- uint32_t out_iovs;
+ __s32 result;
+ __u32 flags;
+ __u32 in_iovs;
+ __u32 out_iovs;
};
struct fuse_poll_in {
- uint64_t fh;
- uint64_t kh;
- uint32_t flags;
- uint32_t events;
+ __u64 fh;
+ __u64 kh;
+ __u32 flags;
+ __u32 events;
};
struct fuse_poll_out {
- uint32_t revents;
- uint32_t padding;
+ __u32 revents;
+ __u32 padding;
};
struct fuse_notify_poll_wakeup_out {
- uint64_t kh;
+ __u64 kh;
};
struct fuse_fallocate_in {
- uint64_t fh;
- uint64_t offset;
- uint64_t length;
- uint32_t mode;
- uint32_t padding;
+ __u64 fh;
+ __u64 offset;
+ __u64 length;
+ __u32 mode;
+ __u32 padding;
};
struct fuse_in_header {
- uint32_t len;
- uint32_t opcode;
- uint64_t unique;
- uint64_t nodeid;
- uint32_t uid;
- uint32_t gid;
- uint32_t pid;
- uint32_t padding;
+ __u32 len;
+ __u32 opcode;
+ __u64 unique;
+ __u64 nodeid;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 padding;
};
struct fuse_out_header {
- uint32_t len;
- int32_t error;
- uint64_t unique;
+ __u32 len;
+ __s32 error;
+ __u64 unique;
};
struct fuse_dirent {
- uint64_t ino;
- uint64_t off;
- uint32_t namelen;
- uint32_t type;
+ __u64 ino;
+ __u64 off;
+ __u32 namelen;
+ __u32 type;
char name[];
};
#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
- (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1))
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
@@ -681,47 +685,47 @@ struct fuse_direntplus {
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen)
struct fuse_notify_inval_inode_out {
- uint64_t ino;
- int64_t off;
- int64_t len;
+ __u64 ino;
+ __s64 off;
+ __s64 len;
};
struct fuse_notify_inval_entry_out {
- uint64_t parent;
- uint32_t namelen;
- uint32_t padding;
+ __u64 parent;
+ __u32 namelen;
+ __u32 padding;
};
struct fuse_notify_delete_out {
- uint64_t parent;
- uint64_t child;
- uint32_t namelen;
- uint32_t padding;
+ __u64 parent;
+ __u64 child;
+ __u32 namelen;
+ __u32 padding;
};
struct fuse_notify_store_out {
- uint64_t nodeid;
- uint64_t offset;
- uint32_t size;
- uint32_t padding;
+ __u64 nodeid;
+ __u64 offset;
+ __u32 size;
+ __u32 padding;
};
struct fuse_notify_retrieve_out {
- uint64_t notify_unique;
- uint64_t nodeid;
- uint64_t offset;
- uint32_t size;
- uint32_t padding;
+ __u64 notify_unique;
+ __u64 nodeid;
+ __u64 offset;
+ __u32 size;
+ __u32 padding;
};
/* Matches the size of fuse_write_in */
struct fuse_notify_retrieve_in {
- uint64_t dummy1;
- uint64_t offset;
- uint32_t size;
- uint32_t dummy2;
- uint64_t dummy3;
- uint64_t dummy4;
+ __u64 dummy1;
+ __u64 offset;
+ __u32 size;
+ __u32 dummy2;
+ __u64 dummy3;
+ __u64 dummy4;
};
#endif /* _LINUX_FUSE_H */
diff --git a/trunk/kernel/capability.c b/trunk/kernel/capability.c
index f6c2ce5701e1..493d97259484 100644
--- a/trunk/kernel/capability.c
+++ b/trunk/kernel/capability.c
@@ -392,30 +392,6 @@ bool ns_capable(struct user_namespace *ns, int cap)
}
EXPORT_SYMBOL(ns_capable);
-/**
- * file_ns_capable - Determine if the file's opener had a capability in effect
- * @file: The file we want to check
- * @ns: The usernamespace we want the capability in
- * @cap: The capability to be tested for
- *
- * Return true if task that opened the file had a capability in effect
- * when the file was opened.
- *
- * This does not set PF_SUPERPRIV because the caller may not
- * actually be privileged.
- */
-bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
-{
- if (WARN_ON_ONCE(!cap_valid(cap)))
- return false;
-
- if (security_capable(file->f_cred, ns, cap) == 0)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(file_ns_capable);
-
/**
* capable - Determine if the current task has a superior capability in effect
* @cap: The capability to be tested for
diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c
index 7e0962ed7f8a..59412d037eed 100644
--- a/trunk/kernel/events/core.c
+++ b/trunk/kernel/events/core.c
@@ -4737,8 +4737,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
- sizeof(tmp) - 1);
- tmp[sizeof(tmp) - 1] = '\0';
+ sizeof(tmp));
goto got_name;
}
@@ -5987,7 +5986,6 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
if (pmu->pmu_cpu_context)
goto got_cpu_context;
- ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h
index eb675c4d59df..d56a64c99a8b 100644
--- a/trunk/kernel/events/internal.h
+++ b/trunk/kernel/events/internal.h
@@ -16,7 +16,7 @@ struct ring_buffer {
int page_order; /* allocation order */
#endif
int nr_pages; /* nr of data pages */
- int overwrite; /* can overwrite itself */
+ int writable; /* are we writable */
atomic_t poll; /* POLL_ for wakeups */
diff --git a/trunk/kernel/events/ring_buffer.c b/trunk/kernel/events/ring_buffer.c
index 97fddb09762b..23cb34ff3973 100644
--- a/trunk/kernel/events/ring_buffer.c
+++ b/trunk/kernel/events/ring_buffer.c
@@ -18,24 +18,12 @@
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
unsigned long offset, unsigned long head)
{
- unsigned long sz = perf_data_size(rb);
- unsigned long mask = sz - 1;
+ unsigned long mask;
- /*
- * check if user-writable
- * overwrite : over-write its own tail
- * !overwrite: buffer possibly drops events.
- */
- if (rb->overwrite)
+ if (!rb->writable)
return true;
- /*
- * verify that payload is not bigger than buffer
- * otherwise masking logic may fail to detect
- * the "not enough space" condition
- */
- if ((head - offset) > sz)
- return false;
+ mask = perf_data_size(rb) - 1;
offset = (offset - tail) & mask;
head = (head - tail) & mask;
@@ -224,9 +212,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->watermark = max_size / 2;
if (flags & RING_BUFFER_WRITABLE)
- rb->overwrite = 0;
- else
- rb->overwrite = 1;
+ rb->writable = 1;
atomic_set(&rb->refcount, 1);
diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c
index 14be27feda49..cc47812d3feb 100644
--- a/trunk/kernel/hrtimer.c
+++ b/trunk/kernel/hrtimer.c
@@ -63,7 +63,6 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
- .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
.clock_base =
{
{
@@ -1643,6 +1642,8 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
+ raw_spin_lock_init(&cpu_base->lock);
+
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c
index 3fed7f0cbcdf..e35be53f6613 100644
--- a/trunk/kernel/kprobes.c
+++ b/trunk/kernel/kprobes.c
@@ -794,16 +794,16 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
}
#ifdef CONFIG_SYSCTL
+/* This should be called with kprobe_mutex locked */
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
- mutex_lock(&kprobe_mutex);
/* If optimization is already allowed, just return */
if (kprobes_allow_optimization)
- goto out;
+ return;
kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -813,22 +813,18 @@ static void __kprobes optimize_all_kprobes(void)
optimize_kprobe(p);
}
printk(KERN_INFO "Kprobes globally optimized\n");
-out:
- mutex_unlock(&kprobe_mutex);
}
+/* This should be called with kprobe_mutex locked */
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
struct kprobe *p;
unsigned int i;
- mutex_lock(&kprobe_mutex);
/* If optimization is already prohibited, just return */
- if (!kprobes_allow_optimization) {
- mutex_unlock(&kprobe_mutex);
+ if (!kprobes_allow_optimization)
return;
- }
kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -838,14 +834,11 @@ static void __kprobes unoptimize_all_kprobes(void)
unoptimize_kprobe(p, false);
}
}
- mutex_unlock(&kprobe_mutex);
-
/* Wait for unoptimizing completion */
wait_for_kprobe_optimizer();
printk(KERN_INFO "Kprobes globally unoptimized\n");
}
-static DEFINE_MUTEX(kprobe_sysctl_mutex);
int sysctl_kprobes_optimization;
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
@@ -853,7 +846,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
{
int ret;
- mutex_lock(&kprobe_sysctl_mutex);
+ mutex_lock(&kprobe_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
@@ -861,7 +854,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
optimize_all_kprobes();
else
unoptimize_all_kprobes();
- mutex_unlock(&kprobe_sysctl_mutex);
+ mutex_unlock(&kprobe_mutex);
return ret;
}
diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c
index 9eb7fed0bbaa..691dc2ef9baf 100644
--- a/trunk/kernel/kthread.c
+++ b/trunk/kernel/kthread.c
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
static void __kthread_parkme(struct kthread *self)
{
- __set_current_state(TASK_PARKED);
+ __set_current_state(TASK_INTERRUPTIBLE);
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
complete(&self->parked);
schedule();
- __set_current_state(TASK_PARKED);
+ __set_current_state(TASK_INTERRUPTIBLE);
}
clear_bit(KTHREAD_IS_PARKED, &self->flags);
__set_current_state(TASK_RUNNING);
@@ -256,13 +256,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create_on_node);
-static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu)
{
- /* Must have done schedule() in kthread() before we set_task_cpu */
- if (!wait_task_inactive(p, state)) {
- WARN_ON(1);
- return;
- }
/* It's safe because the task is inactive. */
do_set_cpus_allowed(p, cpumask_of(cpu));
p->flags |= PF_THREAD_BOUND;
@@ -279,7 +274,12 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
- __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
+ /* Must have done schedule() in kthread() before we set_task_cpu */
+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+ WARN_ON(1);
+ return;
+ }
+ __kthread_bind(p, cpu);
}
EXPORT_SYMBOL(kthread_bind);
@@ -324,22 +324,6 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
return NULL;
}
-static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
-{
- clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
- /*
- * We clear the IS_PARKED bit here as we don't wait
- * until the task has left the park code. So if we'd
- * park before that happens we'd see the IS_PARKED bit
- * which might be about to be cleared.
- */
- if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
- if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
- __kthread_bind(k, kthread->cpu, TASK_PARKED);
- wake_up_state(k, TASK_PARKED);
- }
-}
-
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
@@ -352,8 +336,20 @@ void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = task_get_live_kthread(k);
- if (kthread)
- __kthread_unpark(k, kthread);
+ if (kthread) {
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ /*
+ * We clear the IS_PARKED bit here as we don't wait
+ * until the task has left the park code. So if we'd
+ * park before that happens we'd see the IS_PARKED bit
+ * which might be about to be cleared.
+ */
+ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+ __kthread_bind(k, kthread->cpu);
+ wake_up_process(k);
+ }
+ }
put_task_struct(k);
}
@@ -411,7 +407,7 @@ int kthread_stop(struct task_struct *k)
trace_sched_kthread_stop(k);
if (kthread) {
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
- __kthread_unpark(k, kthread);
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
wake_up_process(k);
wait_for_completion(&kthread->exited);
}
diff --git a/trunk/kernel/sched/clock.c b/trunk/kernel/sched/clock.c
index c3ae1446461c..c685e31492df 100644
--- a/trunk/kernel/sched/clock.c
+++ b/trunk/kernel/sched/clock.c
@@ -176,36 +176,10 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
u64 this_clock, remote_clock;
u64 *ptr, old_val, val;
-#if BITS_PER_LONG != 64
-again:
- /*
- * Careful here: The local and the remote clock values need to
- * be read out atomic as we need to compare the values and
- * then update either the local or the remote side. So the
- * cmpxchg64 below only protects one readout.
- *
- * We must reread via sched_clock_local() in the retry case on
- * 32bit as an NMI could use sched_clock_local() via the
- * tracer and hit between the readout of
- * the low32bit and the high 32bit portion.
- */
- this_clock = sched_clock_local(my_scd);
- /*
- * We must enforce atomic readout on 32bit, otherwise the
- * update on the remote cpu can hit inbetween the readout of
- * the low32bit and the high 32bit portion.
- */
- remote_clock = cmpxchg64(&scd->clock, 0, 0);
-#else
- /*
- * On 64bit the read of [my]scd->clock is atomic versus the
- * update, so we can avoid the above 32bit dance.
- */
sched_clock_local(my_scd);
again:
this_clock = my_scd->clock;
remote_clock = scd->clock;
-#endif
/*
* Use the opportunity that we have both locks
diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c
index 67d04651f44b..7f12624a393c 100644
--- a/trunk/kernel/sched/core.c
+++ b/trunk/kernel/sched/core.c
@@ -1498,10 +1498,8 @@ static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
- if (WARN_ON_ONCE(rq != this_rq()) ||
- WARN_ON_ONCE(p == current))
- return;
-
+ BUG_ON(rq != this_rq());
+ BUG_ON(p == current);
lockdep_assert_held(&rq->lock);
if (!raw_spin_trylock(&p->pi_lock)) {
@@ -5001,7 +4999,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
}
static int min_load_idx = 0;
-static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+static int max_load_idx = CPU_LOAD_IDX_MAX;
static void
set_table_entry(struct ctl_table *entry,
diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c
index e93cca92f38b..ed12cbb135f4 100644
--- a/trunk/kernel/sched/cputime.c
+++ b/trunk/kernel/sched/cputime.c
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
t = tsk;
do {
- task_cputime(t, &utime, &stime);
+ task_cputime(tsk, &utime, &stime);
times->utime += utime;
times->stime += stime;
times->sum_exec_runtime += task_sched_runtime(t);
diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c
index 598dc06be421..dd72567767d9 100644
--- a/trunk/kernel/signal.c
+++ b/trunk/kernel/signal.c
@@ -2948,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
- struct siginfo info = {};
+ struct siginfo info;
info.si_signo = sig;
info.si_errno = 0;
diff --git a/trunk/kernel/smpboot.c b/trunk/kernel/smpboot.c
index 02fc5c933673..8eaed9aa9cf0 100644
--- a/trunk/kernel/smpboot.c
+++ b/trunk/kernel/smpboot.c
@@ -185,18 +185,8 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
}
get_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = tsk;
- if (ht->create) {
- /*
- * Make sure that the task has actually scheduled out
- * into park position, before calling the create
- * callback. At least the migration thread callback
- * requires that the task is off the runqueue.
- */
- if (!wait_task_inactive(tsk, TASK_PARKED))
- WARN_ON(1);
- else
- ht->create(cpu);
- }
+ if (ht->create)
+ ht->create(cpu);
return 0;
}
diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c
index 0da73cf73e60..39c9c4a2949f 100644
--- a/trunk/kernel/sys.c
+++ b/trunk/kernel/sys.c
@@ -324,6 +324,7 @@ void kernel_restart_prepare(char *cmd)
system_state = SYSTEM_RESTART;
usermodehelper_disable();
device_shutdown();
+ syscore_shutdown();
}
/**
@@ -369,7 +370,6 @@ void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
disable_nonboot_cpus();
- syscore_shutdown();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
else
@@ -395,7 +395,6 @@ static void kernel_shutdown_prepare(enum system_states state)
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
- disable_nonboot_cpus();
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
diff --git a/trunk/kernel/trace/blktrace.c b/trunk/kernel/trace/blktrace.c
index 5a0f781cd729..9e5b8c272eec 100644
--- a/trunk/kernel/trace/blktrace.c
+++ b/trunk/kernel/trace/blktrace.c
@@ -739,6 +739,12 @@ static void blk_add_trace_rq_complete(void *ignore,
struct request_queue *q,
struct request *rq)
{
+ struct blk_trace *bt = q->blk_trace;
+
+ /* if control ever passes through here, it's a request based driver */
+ if (unlikely(bt && !bt->rq_based))
+ bt->rq_based = true;
+
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
}
@@ -774,10 +780,24 @@ static void blk_add_trace_bio_bounce(void *ignore,
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
}
-static void blk_add_trace_bio_complete(void *ignore,
- struct request_queue *q, struct bio *bio,
- int error)
+static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
{
+ struct request_queue *q;
+ struct blk_trace *bt;
+
+ if (!bio->bi_bdev)
+ return;
+
+ q = bdev_get_queue(bio->bi_bdev);
+ bt = q->blk_trace;
+
+ /*
+ * Request based drivers will generate both rq and bio completions.
+ * Ignore bio ones.
+ */
+ if (likely(!bt) || bt->rq_based)
+ return;
+
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
}
diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c
index b3fde6d7b7fc..6893d5a2bf08 100644
--- a/trunk/kernel/trace/ftrace.c
+++ b/trunk/kernel/trace/ftrace.c
@@ -66,7 +66,7 @@
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+ .flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
/* ftrace_enabled is a method to turn ftrace on or off */
@@ -694,6 +694,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
free_page(tmp);
}
+ free_page((unsigned long)stat->pages);
stat->pages = NULL;
stat->start = NULL;
@@ -1052,19 +1053,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
- loff_t ret;
-
- if (file->f_mode & FMODE_READ)
- ret = seq_lseek(file, offset, whence);
- else
- file->f_pos = ret = 1;
-
- return ret;
-}
-
#ifdef CONFIG_DYNAMIC_FTRACE
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2625,7 +2613,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
* routine, you can use ftrace_filter_write() for the write
* routine if @flag has FTRACE_ITER_FILTER set, or
* ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * ftrace_regex_lseek() should be used as the lseek routine, and
* release must call ftrace_regex_release().
*/
int
@@ -2709,6 +2697,19 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
inode, file);
}
+loff_t
+ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t ret;
+
+ if (file->f_mode & FMODE_READ)
+ ret = seq_lseek(file, offset, whence);
+ else
+ file->f_pos = ret = 1;
+
+ return ret;
+}
+
static int ftrace_match(char *str, char *regex, int len, int type)
{
int matched = 0;
@@ -3440,14 +3441,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str)
{
- strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+ strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
- strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+ strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);
@@ -3570,7 +3571,7 @@ static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
@@ -3578,7 +3579,7 @@ static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
.read = seq_read,
.write = ftrace_notrace_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
@@ -3783,8 +3784,8 @@ static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
.read = seq_read,
.write = ftrace_graph_write,
- .llseek = ftrace_filter_lseek,
.release = ftrace_graph_release,
+ .llseek = seq_lseek,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -4130,8 +4131,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
preempt_disable_notrace();
trace_recursion_set(TRACE_CONTROL_BIT);
do_for_each_ftrace_op(op, ftrace_control_list) {
- if (!(op->flags & FTRACE_OPS_FL_STUB) &&
- !ftrace_function_local_disabled(op) &&
+ if (!ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
@@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = {
.open = ftrace_pid_open,
.write = ftrace_pid_write,
.read = seq_read,
- .llseek = ftrace_filter_lseek,
+ .llseek = seq_lseek,
.release = ftrace_pid_release,
};
@@ -4555,8 +4555,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ftrace_startup_sysctl();
/* we are starting ftrace again */
- if (ftrace_ops_list != &ftrace_list_end)
- update_ftrace_function();
+ if (ftrace_ops_list != &ftrace_list_end) {
+ if (ftrace_ops_list->next == &ftrace_list_end)
+ ftrace_trace_function = ftrace_ops_list->func;
+ else
+ ftrace_trace_function = ftrace_ops_list_func;
+ }
} else {
/* stopping ftrace calls (just send to ftrace_stub) */
diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c
index 66338c4f7f4b..4f1dade56981 100644
--- a/trunk/kernel/trace/trace.c
+++ b/trunk/kernel/trace/trace.c
@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
static int __init set_cmdline_ftrace(char *str)
{
- strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
+ strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str)
{
- strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+ strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
trace_boot_options = trace_boot_options_buf;
return 0;
}
@@ -744,11 +744,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (!current_trace->allocated_snapshot) {
- /* Only the nop tracer should hit this when disabling */
- WARN_ON_ONCE(current_trace != &nop_trace);
+ if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
return;
- }
arch_spin_lock(&ftrace_max_lock);
diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c
index 83a8b5b7bd35..42ca822fc701 100644
--- a/trunk/kernel/trace/trace_stack.c
+++ b/trunk/kernel/trace/trace_stack.c
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
.open = stack_trace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
diff --git a/trunk/kernel/user_namespace.c b/trunk/kernel/user_namespace.c
index e134d8f365dd..a54f26f82eb2 100644
--- a/trunk/kernel/user_namespace.c
+++ b/trunk/kernel/user_namespace.c
@@ -25,8 +25,7 @@
static struct kmem_cache *user_ns_cachep __read_mostly;
-static bool new_idmap_permitted(const struct file *file,
- struct user_namespace *ns, int cap_setid,
+static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
struct uid_gid_map *map);
static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
@@ -613,10 +612,10 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (map->nr_extents != 0)
goto out;
- /*
- * Adjusting namespace settings requires capabilities on the target.
+ /* Require the appropriate privilege CAP_SETUID or CAP_SETGID
+ * over the user namespace in order to set the id mapping.
*/
- if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
+ if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid))
goto out;
/* Get a buffer */
@@ -701,7 +700,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
ret = -EPERM;
/* Validate the user is allowed to use user id's mapped to. */
- if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
+ if (!new_idmap_permitted(ns, cap_setid, &new_map))
goto out;
/* Map the lower ids from the parent user namespace to the
@@ -788,8 +787,7 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t
&ns->projid_map, &ns->parent->projid_map);
}
-static bool new_idmap_permitted(const struct file *file,
- struct user_namespace *ns, int cap_setid,
+static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
/* Allow mapping to your own filesystem ids */
@@ -797,12 +795,12 @@ static bool new_idmap_permitted(const struct file *file,
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
kuid_t uid = make_kuid(ns->parent, id);
- if (uid_eq(uid, file->f_cred->fsuid))
+ if (uid_eq(uid, current_fsuid()))
return true;
}
else if (cap_setid == CAP_SETGID) {
kgid_t gid = make_kgid(ns->parent, id);
- if (gid_eq(gid, file->f_cred->fsgid))
+ if (gid_eq(gid, current_fsgid()))
return true;
}
}
@@ -813,10 +811,8 @@ static bool new_idmap_permitted(const struct file *file,
/* Allow the specified ids if we have the appropriate capability
* (CAP_SETUID or CAP_SETGID) over the parent user namespace.
- * And the opener of the id file also had the approprpiate capability.
*/
- if (ns_capable(ns->parent, cap_setid) &&
- file_ns_capable(file, ns->parent, cap_setid))
+ if (ns_capable(ns->parent, cap_setid))
return true;
return false;
diff --git a/trunk/lib/kobject.c b/trunk/lib/kobject.c
index a65486613d79..e07ee1fcd6f1 100644
--- a/trunk/lib/kobject.c
+++ b/trunk/lib/kobject.c
@@ -529,13 +529,6 @@ struct kobject *kobject_get(struct kobject *kobj)
return kobj;
}
-static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
-{
- if (!kref_get_unless_zero(&kobj->kref))
- kobj = NULL;
- return kobj;
-}
-
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
@@ -758,7 +751,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
- ret = kobject_get_unless_zero(k);
+ ret = kobject_get(k);
break;
}
}
diff --git a/trunk/mm/hugetlb.c b/trunk/mm/hugetlb.c
index 1a12f5b9a0ab..ca9a7c6d7e97 100644
--- a/trunk/mm/hugetlb.c
+++ b/trunk/mm/hugetlb.c
@@ -2961,17 +2961,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
- /*
- * We need call hugetlb_fault for both hugepages under migration
- * (in which case hugetlb_fault waits for the migration,) and
- * hwpoisoned hugepages (in which case we need to prevent the
- * caller from accessing to them.) In order to do this, we use
- * here is_swap_pte instead of is_hugetlb_entry_migration and
- * is_hugetlb_entry_hwpoisoned. This is because it simply covers
- * both cases, and because we can't follow correct pages
- * directly from any kind of swap entries.
- */
- if (absent || is_swap_pte(huge_ptep_get(pte)) ||
+ if (absent ||
((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
int ret;
diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c
index ba94dec5b259..494526ae024a 100644
--- a/trunk/mm/memory.c
+++ b/trunk/mm/memory.c
@@ -216,7 +216,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
tlb->mm = mm;
tlb->fullmm = fullmm;
- tlb->need_flush_all = 0;
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0;
@@ -2393,53 +2392,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(remap_pfn_range);
-/**
- * vm_iomap_memory - remap memory to userspace
- * @vma: user vma to map to
- * @start: start of area
- * @len: size of area
- *
- * This is a simplified io_remap_pfn_range() for common driver use. The
- * driver just needs to give us the physical memory range to be mapped,
- * we'll figure out the rest from the vma information.
- *
- * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
- * whatever write-combining details or similar.
- */
-int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
-{
- unsigned long vm_len, pfn, pages;
-
- /* Check that the physical memory area passed in looks valid */
- if (start + len < start)
- return -EINVAL;
- /*
- * You *really* shouldn't map things that aren't page-aligned,
- * but we've historically allowed it because IO memory might
- * just have smaller alignment.
- */
- len += start & ~PAGE_MASK;
- pfn = start >> PAGE_SHIFT;
- pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (pfn + pages < pfn)
- return -EINVAL;
-
- /* We start the mapping 'vm_pgoff' pages into the area */
- if (vma->vm_pgoff > pages)
- return -EINVAL;
- pfn += vma->vm_pgoff;
- pages -= vma->vm_pgoff;
-
- /* Can we fit all of the mapping? */
- vm_len = vma->vm_end - vma->vm_start;
- if (vm_len >> PAGE_SHIFT > pages)
- return -EINVAL;
-
- /* Ok, let it rip */
- return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
-}
-EXPORT_SYMBOL(vm_iomap_memory);
-
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c
index 669fba39be1a..88c5fed8b9a4 100644
--- a/trunk/mm/vmscan.c
+++ b/trunk/mm/vmscan.c
@@ -3188,9 +3188,9 @@ int kswapd_run(int nid)
if (IS_ERR(pgdat->kswapd)) {
/* failure at boot is fatal */
BUG_ON(system_state == SYSTEM_BOOTING);
+ pgdat->kswapd = NULL;
pr_err("Failed to start kswapd on node %d\n", nid);
ret = PTR_ERR(pgdat->kswapd);
- pgdat->kswapd = NULL;
}
return ret;
}
diff --git a/trunk/net/802/mrp.c b/trunk/net/802/mrp.c
index e085bcc754f6..a4cc3229952a 100644
--- a/trunk/net/802/mrp.c
+++ b/trunk/net/802/mrp.c
@@ -870,12 +870,8 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
* all pending messages before the applicant is gone.
*/
del_timer_sync(&app->join_timer);
-
- spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
mrp_pdu_queue(app);
- spin_unlock(&app->lock);
-
mrp_queue_xmit(app);
dev_mc_del(dev, appl->group_address);
diff --git a/trunk/net/atm/common.c b/trunk/net/atm/common.c
index 737bef59ce89..7b491006eaf4 100644
--- a/trunk/net/atm/common.c
+++ b/trunk/net/atm/common.c
@@ -531,8 +531,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int copied, error = -EINVAL;
- msg->msg_namelen = 0;
-
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
diff --git a/trunk/net/ax25/af_ax25.c b/trunk/net/ax25/af_ax25.c
index e277e38f736b..7b11f8bc5071 100644
--- a/trunk/net/ax25/af_ax25.c
+++ b/trunk/net/ax25/af_ax25.c
@@ -1642,7 +1642,6 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
ax25_address src;
const unsigned char *mac = skb_mac_header(skb);
- memset(sax, 0, sizeof(struct full_sockaddr_ax25));
ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
&digi, NULL, NULL);
sax->sax25_family = AF_AX25;
diff --git a/trunk/net/bluetooth/af_bluetooth.c b/trunk/net/bluetooth/af_bluetooth.c
index 0d1b08cc76e1..d3ee69b35a78 100644
--- a/trunk/net/bluetooth/af_bluetooth.c
+++ b/trunk/net/bluetooth/af_bluetooth.c
@@ -230,8 +230,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- msg->msg_namelen = 0;
-
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -239,6 +237,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
+ msg->msg_namelen = 0;
+
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c
index 7c9224bcce17..c23bae86263b 100644
--- a/trunk/net/bluetooth/rfcomm/sock.c
+++ b/trunk/net/bluetooth/rfcomm/sock.c
@@ -608,7 +608,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
- msg->msg_namelen = 0;
return 0;
}
diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c
index fb6192c9812e..fad0302bdb32 100644
--- a/trunk/net/bluetooth/sco.c
+++ b/trunk/net/bluetooth/sco.c
@@ -665,7 +665,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
hci_conn_accept(pi->conn->hcon, 0);
sk->sk_state = BT_CONFIG;
- msg->msg_namelen = 0;
release_sock(sk);
return 0;
diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c
index 459dab22b3f6..ef1b91431c6b 100644
--- a/trunk/net/bridge/br_if.c
+++ b/trunk/net/bridge/br_if.c
@@ -67,8 +67,7 @@ void br_port_carrier_check(struct net_bridge_port *p)
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
- if (!(p->flags & BR_ADMIN_COST) &&
- netif_running(dev) && netif_oper_up(dev))
+ if (netif_running(dev) && netif_oper_up(dev))
p->path_cost = port_cost(dev);
if (!netif_running(br->dev))
diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h
index d2c043a857b6..3cbf5beb3d4b 100644
--- a/trunk/net/bridge/br_private.h
+++ b/trunk/net/bridge/br_private.h
@@ -156,7 +156,6 @@ struct net_bridge_port
#define BR_BPDU_GUARD 0x00000002
#define BR_ROOT_BLOCK 0x00000004
#define BR_MULTICAST_FAST_LEAVE 0x00000008
-#define BR_ADMIN_COST 0x00000010
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
u32 multicast_startup_queries_sent;
diff --git a/trunk/net/bridge/br_stp_if.c b/trunk/net/bridge/br_stp_if.c
index d45e760141bb..0bdb4ebd362b 100644
--- a/trunk/net/bridge/br_stp_if.c
+++ b/trunk/net/bridge/br_stp_if.c
@@ -288,7 +288,6 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
path_cost > BR_MAX_PATH_COST)
return -ERANGE;
- p->flags |= BR_ADMIN_COST;
p->path_cost = path_cost;
br_configuration_update(p->br);
br_port_state_selection(p->br);
diff --git a/trunk/net/caif/caif_socket.c b/trunk/net/caif/caif_socket.c
index ff2ff3ce6965..095259f83902 100644
--- a/trunk/net/caif/caif_socket.c
+++ b/trunk/net/caif/caif_socket.c
@@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
if (m->msg_flags&MSG_OOB)
goto read_error;
- m->msg_namelen = 0;
-
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
diff --git a/trunk/net/can/gw.c b/trunk/net/can/gw.c
index 117814a7e73c..2d117dc5ebea 100644
--- a/trunk/net/can/gw.c
+++ b/trunk/net/can/gw.c
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
- kmem_cache_free(cgw_cache, gwj);
+ kfree(gwj);
}
}
}
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)
hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
- kmem_cache_free(cgw_cache, gwj);
+ kfree(gwj);
}
}
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
- kmem_cache_free(cgw_cache, gwj);
+ kfree(gwj);
err = 0;
break;
}
diff --git a/trunk/net/core/rtnetlink.c b/trunk/net/core/rtnetlink.c
index 23854b51a259..b65441da74ab 100644
--- a/trunk/net/core/rtnetlink.c
+++ b/trunk/net/core/rtnetlink.c
@@ -1072,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
cb->seq = net->dev_base_seq;
- if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
@@ -1922,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
- if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
diff --git a/trunk/net/ipv4/devinet.c b/trunk/net/ipv4/devinet.c
index c6287cd978c2..96083b7a436b 100644
--- a/trunk/net/ipv4/devinet.c
+++ b/trunk/net/ipv4/devinet.c
@@ -587,16 +587,13 @@ static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
struct in_ifaddr *ifa;
- struct hlist_node *n;
int i;
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
+ rcu_read_lock();
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
- bool change_needed = false;
-
- rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
unsigned long age;
@@ -609,7 +606,16 @@ static void check_lifetime(struct work_struct *work)
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
- change_needed = true;
+ struct in_ifaddr **ifap ;
+
+ rtnl_lock();
+ for (ifap = &ifa->ifa_dev->ifa_list;
+ *ifap != NULL; ifap = &ifa->ifa_next) {
+ if (*ifap == ifa)
+ inet_del_ifa(ifa->ifa_dev,
+ ifap, 1);
+ }
+ rtnl_unlock();
} else if (ifa->ifa_preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
@@ -619,8 +625,10 @@ static void check_lifetime(struct work_struct *work)
next = ifa->ifa_tstamp +
ifa->ifa_valid_lft * HZ;
- if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
- change_needed = true;
+ if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
+ ifa->ifa_flags |= IFA_F_DEPRECATED;
+ rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
+ }
} else if (time_before(ifa->ifa_tstamp +
ifa->ifa_preferred_lft * HZ,
next)) {
@@ -628,42 +636,8 @@ static void check_lifetime(struct work_struct *work)
ifa->ifa_preferred_lft * HZ;
}
}
- rcu_read_unlock();
- if (!change_needed)
- continue;
- rtnl_lock();
- hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
- unsigned long age;
-
- if (ifa->ifa_flags & IFA_F_PERMANENT)
- continue;
-
- /* We try to batch several events at once. */
- age = (now - ifa->ifa_tstamp +
- ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
-
- if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
- age >= ifa->ifa_valid_lft) {
- struct in_ifaddr **ifap;
-
- for (ifap = &ifa->ifa_dev->ifa_list;
- *ifap != NULL; ifap = &(*ifap)->ifa_next) {
- if (*ifap == ifa) {
- inet_del_ifa(ifa->ifa_dev,
- ifap, 1);
- break;
- }
- }
- } else if (ifa->ifa_preferred_lft !=
- INFINITY_LIFE_TIME &&
- age >= ifa->ifa_preferred_lft &&
- !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
- ifa->ifa_flags |= IFA_F_DEPRECATED;
- rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
- }
- }
- rtnl_unlock();
}
+ rcu_read_unlock();
next_sec = round_jiffies_up(next);
next_sched = next;
@@ -830,8 +804,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
return -EEXIST;
ifa = ifa_existing;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
- cancel_delayed_work(&check_lifetime_work);
- schedule_delayed_work(&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
}
diff --git a/trunk/net/ipv4/esp4.c b/trunk/net/ipv4/esp4.c
index 4cfe34d4cc96..3b4f0cd2e63e 100644
--- a/trunk/net/ipv4/esp4.c
+++ b/trunk/net/ipv4/esp4.c
@@ -139,6 +139,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
/* skb is pure payload to encrypt */
+ err = -ENOMEM;
+
esp = x->data;
aead = esp->aead;
alen = crypto_aead_authsize(aead);
@@ -174,10 +176,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
}
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
- if (!tmp) {
- err = -ENOMEM;
+ if (!tmp)
goto error;
- }
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/trunk/net/ipv4/ip_fragment.c b/trunk/net/ipv4/ip_fragment.c
index 52c273ea05c3..a6445b843ef4 100644
--- a/trunk/net/ipv4/ip_fragment.c
+++ b/trunk/net/ipv4/ip_fragment.c
@@ -248,7 +248,8 @@ static void ip_expire(unsigned long arg)
if (!head->dev)
goto out_rcu_unlock;
- /* skb has no dst, perform route lookup again */
+ /* skb dst is stale, drop it, and perform route lookup again */
+ skb_dst_drop(head);
iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
@@ -522,16 +523,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->q.max_size = skb->len + ihl;
if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- qp->q.meat == qp->q.len) {
- unsigned long orefdst = skb->_skb_refdst;
+ qp->q.meat == qp->q.len)
+ return ip_frag_reasm(qp, prev, dev);
- skb->_skb_refdst = 0UL;
- err = ip_frag_reasm(qp, prev, dev);
- skb->_skb_refdst = orefdst;
- return err;
- }
-
- skb_dst_drop(skb);
inet_frag_lru_move(&qp->q);
return -EINPROGRESS;
diff --git a/trunk/net/ipv4/syncookies.c b/trunk/net/ipv4/syncookies.c
index 397e0f69435f..ef54377fb11c 100644
--- a/trunk/net/ipv4/syncookies.c
+++ b/trunk/net/ipv4/syncookies.c
@@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
- flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
- RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+ RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, th->source, th->dest);
diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c
index 509912a5ff98..5d0b4387cba6 100644
--- a/trunk/net/ipv4/tcp_output.c
+++ b/trunk/net/ipv4/tcp_output.c
@@ -2388,12 +2388,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
*/
TCP_SKB_CB(skb)->when = tcp_time_stamp;
- /* make sure skb->data is aligned on arches that require it
- * and check if ack-trimming & collapsing extended the headroom
- * beyond what csum_start can cover.
- */
- if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
- skb_headroom(skb) >= 0xFFFF)) {
+ /* make sure skb->data is aligned on arches that require it */
+ if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
GFP_ATOMIC);
return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
@@ -2713,7 +2709,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
skb_reserve(skb, MAX_TCP_HEADER);
skb_dst_set(skb, dst);
- security_skb_owned_by(skb, sk);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c
index dae802c0af7c..a459c4f5b769 100644
--- a/trunk/net/ipv6/addrconf.c
+++ b/trunk/net/ipv6/addrconf.c
@@ -168,6 +168,8 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
+static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
+
static struct ipv6_devconf ipv6_devconf __read_mostly = {
.forwarding = 0,
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
@@ -835,7 +837,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
rcu_read_unlock_bh();
if (likely(err == 0))
- inet6addr_notifier_call_chain(NETDEV_UP, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa);
else {
kfree(ifa);
ifa = ERR_PTR(err);
@@ -925,7 +927,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ipv6_ifa_notify(RTM_DELADDR, ifp);
- inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp);
/*
* Purge or update corresponding prefix
@@ -2986,7 +2988,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
if (state != INET6_IFADDR_STATE_DEAD) {
__ipv6_ifa_notify(RTM_DELADDR, ifa);
- inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
}
in6_ifa_put(ifa);
@@ -4867,6 +4869,22 @@ static struct pernet_operations addrconf_ops = {
.exit = addrconf_exit_net,
};
+/*
+ * Device notifier
+ */
+
+int register_inet6addr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&inet6addr_chain, nb);
+}
+EXPORT_SYMBOL(register_inet6addr_notifier);
+
+int unregister_inet6addr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
+}
+EXPORT_SYMBOL(unregister_inet6addr_notifier);
+
static struct rtnl_af_ops inet6_ops = {
.family = AF_INET6,
.fill_link_af = inet6_fill_link_af,
diff --git a/trunk/net/ipv6/addrconf_core.c b/trunk/net/ipv6/addrconf_core.c
index 72104562c864..d051e5f4bf34 100644
--- a/trunk/net/ipv6/addrconf_core.c
+++ b/trunk/net/ipv6/addrconf_core.c
@@ -78,22 +78,3 @@ int __ipv6_addr_type(const struct in6_addr *addr)
}
EXPORT_SYMBOL(__ipv6_addr_type);
-static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
-
-int register_inet6addr_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&inet6addr_chain, nb);
-}
-EXPORT_SYMBOL(register_inet6addr_notifier);
-
-int unregister_inet6addr_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
-}
-EXPORT_SYMBOL(unregister_inet6addr_notifier);
-
-int inet6addr_notifier_call_chain(unsigned long val, void *v)
-{
- return atomic_notifier_call_chain(&inet6addr_chain, val, v);
-}
-EXPORT_SYMBOL(inet6addr_notifier_call_chain);
diff --git a/trunk/net/ipv6/reassembly.c b/trunk/net/ipv6/reassembly.c
index 0ba10e53a629..196ab9347ad1 100644
--- a/trunk/net/ipv6/reassembly.c
+++ b/trunk/net/ipv6/reassembly.c
@@ -330,17 +330,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
}
if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len) {
- int res;
- unsigned long orefdst = skb->_skb_refdst;
-
- skb->_skb_refdst = 0UL;
- res = ip6_frag_reasm(fq, prev, dev);
- skb->_skb_refdst = orefdst;
- return res;
- }
+ fq->q.meat == fq->q.len)
+ return ip6_frag_reasm(fq, prev, dev);
- skb_dst_drop(skb);
inet_frag_lru_move(&fq->q);
return -1;
diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c
index 46a5be85be87..f6d629fd6aee 100644
--- a/trunk/net/ipv6/tcp_ipv6.c
+++ b/trunk/net/ipv6/tcp_ipv6.c
@@ -386,7 +386,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (dst)
dst->ops->redirect(dst, sk, skb);
- goto out;
}
if (type == ICMPV6_PKT_TOOBIG) {
diff --git a/trunk/net/irda/af_irda.c b/trunk/net/irda/af_irda.c
index e493b3397ae3..d28e7f014cc6 100644
--- a/trunk/net/irda/af_irda.c
+++ b/trunk/net/irda/af_irda.c
@@ -1386,8 +1386,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
- msg->msg_namelen = 0;
-
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
diff --git a/trunk/net/iucv/af_iucv.c b/trunk/net/iucv/af_iucv.c
index 206ce6db2c36..a7d11ffe4284 100644
--- a/trunk/net/iucv/af_iucv.c
+++ b/trunk/net/iucv/af_iucv.c
@@ -49,6 +49,12 @@ static const u8 iprm_shutdown[8] =
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
+/* macros to set/get socket control buffer at correct offset */
+#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
+#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
+#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
+#define CB_TRGCLS_LEN (TRGCLS_SIZE)
+
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
@@ -1135,7 +1141,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
- IUCV_SKB_CB(skb)->tag = txmsg.tag;
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_sent);
@@ -1218,7 +1224,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
return -ENOMEM;
/* copy target class to control buffer of new skb */
- IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
+ memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
/* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
@@ -1250,7 +1256,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
/* store msg target class in the second 4 bytes of skb ctrl buffer */
/* Note: the first 4 bytes are reserved for msg tag */
- IUCV_SKB_CB(skb)->class = msg->class;
+ memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
@@ -1286,7 +1292,6 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
}
}
- IUCV_SKB_CB(skb)->offset = 0;
if (sock_queue_rcv_skb(sk, skb))
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}
@@ -1322,9 +1327,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
- u32 offset;
-
- msg->msg_namelen = 0;
if ((sk->sk_state == IUCV_DISCONN) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
@@ -1344,14 +1346,13 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
- offset = IUCV_SKB_CB(skb)->offset;
- rlen = skb->len - offset; /* real length of skb */
+ rlen = skb->len; /* real length of skb */
copied = min_t(unsigned int, rlen, len);
if (!rlen)
sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
cskb = skb;
- if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
+ if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@@ -1369,8 +1370,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
* get the trgcls from the control buffer of the skb due to
* fragmentation of original iucv message. */
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
- sizeof(IUCV_SKB_CB(skb)->class),
- (void *)&IUCV_SKB_CB(skb)->class);
+ CB_TRGCLS_LEN, CB_TRGCLS(skb));
if (err) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
@@ -1382,8 +1382,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
if (sk->sk_type == SOCK_STREAM) {
- if (copied < rlen) {
- IUCV_SKB_CB(skb)->offset = offset + copied;
+ skb_pull(skb, copied);
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
goto done;
}
}
@@ -1402,7 +1403,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
- IUCV_SKB_CB(rskb)->offset = 0;
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv->backlog_skb_q,
rskb);
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
- if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+ if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
this = list_skb;
break;
}
@@ -2091,7 +2091,6 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
- IUCV_SKB_CB(skb)->offset = 0;
spin_lock(&iucv->message_q.lock);
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (sock_queue_rcv_skb(sk, skb)) {
@@ -2196,7 +2195,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
/* fall through and receive zero length data */
case 0:
/* plain data frame */
- IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
+ memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
+ CB_TRGCLS_LEN);
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
diff --git a/trunk/net/l2tp/l2tp_ip6.c b/trunk/net/l2tp/l2tp_ip6.c
index b8a6039314e8..c74f5a91ff6a 100644
--- a/trunk/net/l2tp/l2tp_ip6.c
+++ b/trunk/net/l2tp/l2tp_ip6.c
@@ -690,7 +690,6 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
- lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = IP6CB(skb)->iif;
}
diff --git a/trunk/net/llc/af_llc.c b/trunk/net/llc/af_llc.c
index 48aaa89253e0..88709882c464 100644
--- a/trunk/net/llc/af_llc.c
+++ b/trunk/net/llc/af_llc.c
@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
int target; /* Read at least this many bytes */
long timeo;
- msg->msg_namelen = 0;
-
lock_sock(sk);
copied = -ENOTCONN;
if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
diff --git a/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c b/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 10a30b4fc7db..f2627226a087 100644
--- a/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/trunk/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -104,15 +104,6 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
-static inline void
-hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
-}
-
static inline int
hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
{
@@ -423,15 +414,6 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
-static inline void
-hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
-}
-
static inline int
hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
{
diff --git a/trunk/net/netfilter/ipset/ip_set_hash_net.c b/trunk/net/netfilter/ipset/ip_set_hash_net.c
index d6a59154d710..4b677cf6bf7d 100644
--- a/trunk/net/netfilter/ipset/ip_set_hash_net.c
+++ b/trunk/net/netfilter/ipset/ip_set_hash_net.c
@@ -87,16 +87,7 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
static inline void
hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
{
- dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
-}
-
-static inline void
-hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline int
@@ -317,16 +308,7 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
static inline void
hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
{
- dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
-}
-
-static inline void
-hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline int
diff --git a/trunk/net/netfilter/ipset/ip_set_hash_netiface.c b/trunk/net/netfilter/ipset/ip_set_hash_netiface.c
index f2b0a3c30130..6ba985f1c96f 100644
--- a/trunk/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/trunk/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -198,16 +198,7 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
static inline void
hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
{
- dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
-}
-
-static inline void
-hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline int
@@ -503,7 +494,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
static inline void
hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
{
- dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
}
static inline int
@@ -512,15 +503,6 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
return elem->nomatch ? -ENOTEMPTY : 1;
}
-static inline void
-hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
-}
-
static inline void
hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
{
diff --git a/trunk/net/netfilter/ipset/ip_set_hash_netport.c b/trunk/net/netfilter/ipset/ip_set_hash_netport.c
index 349deb672a2d..af20c0c5ced2 100644
--- a/trunk/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/trunk/net/netfilter/ipset/ip_set_hash_netport.c
@@ -104,15 +104,6 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
-static inline void
-hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
-}
-
static inline int
hash_netport4_data_match(const struct hash_netport4_elem *elem)
{
@@ -384,15 +375,6 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
}
-static inline void
-hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags)
-{
- if (dst->nomatch) {
- *flags = IPSET_FLAG_NOMATCH;
- dst->nomatch = 0;
- }
-}
-
static inline int
hash_netport6_data_match(const struct hash_netport6_elem *elem)
{
diff --git a/trunk/net/netfilter/ipset/ip_set_list_set.c b/trunk/net/netfilter/ipset/ip_set_list_set.c
index 09c744aa8982..8371c2bac2e4 100644
--- a/trunk/net/netfilter/ipset/ip_set_list_set.c
+++ b/trunk/net/netfilter/ipset/ip_set_list_set.c
@@ -174,13 +174,9 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
{
const struct set_elem *e = list_set_elem(map, i);
- if (e->id != IPSET_INVALID_ID) {
- const struct set_elem *x = list_set_elem(map, map->size - 1);
-
- /* Last element replaced or pushed off */
- if (x->id != IPSET_INVALID_ID)
- ip_set_put_byindex(x->id);
- }
+ if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+ /* Last element replaced: e.g. add new,before,last */
+ ip_set_put_byindex(e->id);
if (with_timeout(map->timeout))
list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else
diff --git a/trunk/net/netfilter/nf_conntrack_sip.c b/trunk/net/netfilter/nf_conntrack_sip.c
index e0c4373b4747..0e7d423324c3 100644
--- a/trunk/net/netfilter/nf_conntrack_sip.c
+++ b/trunk/net/netfilter/nf_conntrack_sip.c
@@ -1593,8 +1593,10 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
end += strlen("\r\n\r\n") + clen;
msglen = origlen = end - dptr;
- if (msglen > datalen)
- return NF_ACCEPT;
+ if (msglen > datalen) {
+ nf_ct_helper_log(skb, ct, "incomplete/bad SIP message");
+ return NF_DROP;
+ }
ret = process_sip_msg(skb, ct, protoff, dataoff,
&dptr, &msglen);
diff --git a/trunk/net/netfilter/nf_nat_core.c b/trunk/net/netfilter/nf_nat_core.c
index ad24be070e53..8d5769c6d16e 100644
--- a/trunk/net/netfilter/nf_nat_core.c
+++ b/trunk/net/netfilter/nf_nat_core.c
@@ -467,22 +467,33 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
struct nf_nat_proto_clean {
u8 l3proto;
u8 l4proto;
+ bool hash;
};
-/* kill conntracks with affected NAT section */
-static int nf_nat_proto_remove(struct nf_conn *i, void *data)
+/* Clear NAT section of all conntracks, in case we're loaded again. */
+static int nf_nat_proto_clean(struct nf_conn *i, void *data)
{
const struct nf_nat_proto_clean *clean = data;
struct nf_conn_nat *nat = nfct_nat(i);
if (!nat)
return 0;
-
+ if (!(i->status & IPS_SRC_NAT_DONE))
+ return 0;
if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
(clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
return 0;
- return i->status & IPS_NAT_MASK ? 1 : 0;
+ if (clean->hash) {
+ spin_lock_bh(&nf_nat_lock);
+ hlist_del_rcu(&nat->bysource);
+ spin_unlock_bh(&nf_nat_lock);
+ } else {
+ memset(nat, 0, sizeof(*nat));
+ i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
+ IPS_SEQ_ADJUST);
+ }
+ return 0;
}
static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
@@ -494,8 +505,16 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
struct net *net;
rtnl_lock();
+ /* Step 1 - remove from bysource hash */
+ clean.hash = true;
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+ synchronize_rcu();
+
+ /* Step 2 - clean NAT section */
+ clean.hash = false;
+ for_each_net(net)
+ nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
rtnl_unlock();
}
@@ -507,9 +526,16 @@ static void nf_nat_l3proto_clean(u8 l3proto)
struct net *net;
rtnl_lock();
+ /* Step 1 - remove from bysource hash */
+ clean.hash = true;
+ for_each_net(net)
+ nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
+ synchronize_rcu();
+ /* Step 2 - clean NAT section */
+ clean.hash = false;
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
rtnl_unlock();
}
@@ -747,7 +773,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
diff --git a/trunk/net/netrom/af_netrom.c b/trunk/net/netrom/af_netrom.c
index 103bd704b5fc..d1fa1d9ffd2e 100644
--- a/trunk/net/netrom/af_netrom.c
+++ b/trunk/net/netrom/af_netrom.c
@@ -1173,7 +1173,6 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
}
if (sax != NULL) {
- memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
diff --git a/trunk/net/nfc/llcp/sock.c b/trunk/net/nfc/llcp/sock.c
index 6c94447ec414..8f025746f337 100644
--- a/trunk/net/nfc/llcp/sock.c
+++ b/trunk/net/nfc/llcp/sock.c
@@ -646,8 +646,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
pr_debug("%p %zu\n", sk, len);
- msg->msg_namelen = 0;
-
lock_sock(sk);
if (sk->sk_state == LLCP_CLOSED &&
@@ -693,7 +691,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap);
- memset(sockaddr, 0, sizeof(*sockaddr));
sockaddr->sa_family = AF_NFC;
sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;
sockaddr->dsap = ui_cb->dsap;
diff --git a/trunk/net/rose/af_rose.c b/trunk/net/rose/af_rose.c
index 9c8347451597..cf68e6e4054a 100644
--- a/trunk/net/rose/af_rose.c
+++ b/trunk/net/rose/af_rose.c
@@ -1253,7 +1253,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (srose != NULL) {
- memset(srose, 0, msg->msg_namelen);
srose->srose_family = AF_ROSE;
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
diff --git a/trunk/net/sunrpc/clnt.c b/trunk/net/sunrpc/clnt.c
index d5f35f15af98..dcc446e7fbf6 100644
--- a/trunk/net/sunrpc/clnt.c
+++ b/trunk/net/sunrpc/clnt.c
@@ -304,8 +304,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
err = rpciod_up();
if (err)
goto out_no_rpciod;
-
err = -EINVAL;
+ if (!xprt)
+ goto out_no_xprt;
+
if (args->version >= program->nrvers)
goto out_err;
version = program->version[args->version];
@@ -380,9 +382,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
out_no_stats:
kfree(clnt);
out_err:
+ xprt_put(xprt);
+out_no_xprt:
rpciod_down();
out_no_rpciod:
- xprt_put(xprt);
return ERR_PTR(err);
}
@@ -509,7 +512,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new = rpc_new_client(args, xprt);
if (IS_ERR(new)) {
err = PTR_ERR(new);
- goto out_err;
+ goto out_put;
}
atomic_inc(&clnt->cl_count);
@@ -522,6 +525,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new->cl_chatty = clnt->cl_chatty;
return new;
+out_put:
+ xprt_put(xprt);
out_err:
dprintk("RPC: %s: returned error %d\n", __func__, err);
return ERR_PTR(err);
diff --git a/trunk/net/tipc/socket.c b/trunk/net/tipc/socket.c
index 515ce38e4f4c..a9622b6cd916 100644
--- a/trunk/net/tipc/socket.c
+++ b/trunk/net/tipc/socket.c
@@ -790,7 +790,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
if (addr) {
addr->family = AF_TIPC;
addr->addrtype = TIPC_ADDR_ID;
- memset(&addr->addr, 0, sizeof(addr->addr));
addr->addr.id.ref = msg_origport(msg);
addr->addr.id.node = msg_orignode(msg);
addr->addr.name.domain = 0; /* could leave uninitialized */
@@ -905,9 +904,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
goto exit;
}
- /* will be updated in set_orig_addr() if needed */
- m->msg_namelen = 0;
-
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:
@@ -1017,9 +1013,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
- /* will be updated in set_orig_addr() if needed */
- m->msg_namelen = 0;
-
target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
diff --git a/trunk/net/vmw_vsock/af_vsock.c b/trunk/net/vmw_vsock/af_vsock.c
index 7f93e2a42d7a..d8079daf1bde 100644
--- a/trunk/net/vmw_vsock/af_vsock.c
+++ b/trunk/net/vmw_vsock/af_vsock.c
@@ -1670,8 +1670,6 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
vsk = vsock_sk(sk);
err = 0;
- msg->msg_namelen = 0;
-
lock_sock(sk);
if (sk->sk_state != SS_CONNECTED) {
diff --git a/trunk/net/vmw_vsock/vmci_transport.c b/trunk/net/vmw_vsock/vmci_transport.c
index 5e04d3d96285..1f6508e249ae 100644
--- a/trunk/net/vmw_vsock/vmci_transport.c
+++ b/trunk/net/vmw_vsock/vmci_transport.c
@@ -1736,8 +1736,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
return -EOPNOTSUPP;
- msg->msg_namelen = 0;
-
/* Retrieve the head sk_buff from the socket's receive queue. */
err = 0;
skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
@@ -1770,6 +1768,7 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
if (err)
goto out;
+ msg->msg_namelen = 0;
if (msg->msg_name) {
struct sockaddr_vm *vm_addr;
diff --git a/trunk/net/wireless/sme.c b/trunk/net/wireless/sme.c
index 482c70e70127..09d994d192ff 100644
--- a/trunk/net/wireless/sme.c
+++ b/trunk/net/wireless/sme.c
@@ -224,7 +224,6 @@ void cfg80211_conn_work(struct work_struct *work)
rtnl_lock();
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
- mutex_lock(&rdev->sched_scan_mtx);
list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
@@ -249,7 +248,6 @@ void cfg80211_conn_work(struct work_struct *work)
wdev_unlock(wdev);
}
- mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
rtnl_unlock();
diff --git a/trunk/scripts/checkpatch.pl b/trunk/scripts/checkpatch.pl
index 4de4bc48493b..b28cc384a5bc 100755
--- a/trunk/scripts/checkpatch.pl
+++ b/trunk/scripts/checkpatch.pl
@@ -3016,7 +3016,6 @@ sub process {
$dstat !~ /^'X'$/ && # character constants
$dstat !~ /$exceptions/ &&
$dstat !~ /^\.$Ident\s*=/ && # .foo =
- $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
$dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
$dstat !~ /^for\s*$Constant$/ && # for (...)
$dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
diff --git a/trunk/security/capability.c b/trunk/security/capability.c
index 6783c3e6c88e..579775088967 100644
--- a/trunk/security/capability.c
+++ b/trunk/security/capability.c
@@ -737,11 +737,6 @@ static int cap_tun_dev_open(void *security)
{
return 0;
}
-
-static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1076,7 +1071,6 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, tun_dev_open);
set_to_cap_if_null(ops, tun_dev_attach_queue);
set_to_cap_if_null(ops, tun_dev_attach);
- set_to_cap_if_null(ops, skb_owned_by);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);
diff --git a/trunk/security/security.c b/trunk/security/security.c
index 03f248b84e9f..7b88c6aeaed4 100644
--- a/trunk/security/security.c
+++ b/trunk/security/security.c
@@ -1290,11 +1290,6 @@ int security_tun_dev_open(void *security)
}
EXPORT_SYMBOL(security_tun_dev_open);
-void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
- security_ops->skb_owned_by(skb, sk);
-}
-
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
diff --git a/trunk/security/selinux/hooks.c b/trunk/security/selinux/hooks.c
index 7171a957b933..2fa28c88900c 100644
--- a/trunk/security/selinux/hooks.c
+++ b/trunk/security/selinux/hooks.c
@@ -51,7 +51,6 @@
#include
#include
#include /* for local_port_range[] */
-#include
#include /* struct or_callable used in sock_rcv_skb */
#include
#include
@@ -4364,11 +4363,6 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
-static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
-{
- skb_set_owner_w(skb, sk);
-}
-
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
@@ -5670,7 +5664,6 @@ static struct security_operations selinux_ops = {
.tun_dev_attach_queue = selinux_tun_dev_attach_queue,
.tun_dev_attach = selinux_tun_dev_attach,
.tun_dev_open = selinux_tun_dev_open,
- .skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
diff --git a/trunk/sound/soc/codecs/wm5102.c b/trunk/sound/soc/codecs/wm5102.c
index 34d0201d6a78..b82bbf584146 100644
--- a/trunk/sound/soc/codecs/wm5102.c
+++ b/trunk/sound/soc/codecs/wm5102.c
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct arizona *arizona = dev_get_drvdata(codec->dev);
struct regmap *regmap = codec->control_data;
const struct reg_default *patch = NULL;
int i, patch_size;
diff --git a/trunk/sound/soc/codecs/wm8903.c b/trunk/sound/soc/codecs/wm8903.c
index f8a31ad0b203..134e41c870b9 100644
--- a/trunk/sound/soc/codecs/wm8903.c
+++ b/trunk/sound/soc/codecs/wm8903.c
@@ -1083,8 +1083,6 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
{ "ROP", NULL, "Right Speaker PGA" },
{ "RON", NULL, "Right Speaker PGA" },
- { "Charge Pump", NULL, "CLK_DSP" },
-
{ "Left Headphone Output PGA", NULL, "Charge Pump" },
{ "Right Headphone Output PGA", NULL, "Charge Pump" },
{ "Left Line Output PGA", NULL, "Charge Pump" },
diff --git a/trunk/sound/soc/samsung/i2s.c b/trunk/sound/soc/samsung/i2s.c
index 6bbeb0bf1a73..d7231e336a7c 100644
--- a/trunk/sound/soc/samsung/i2s.c
+++ b/trunk/sound/soc/samsung/i2s.c
@@ -972,7 +972,6 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
{
struct i2s_dai *i2s;
- int ret;
i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);
if (i2s == NULL)
@@ -997,18 +996,16 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
i2s->i2s_dai_drv.capture.channels_max = 2;
i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
- dev_set_drvdata(&i2s->pdev->dev, i2s);
} else { /* Create a new platform_device for Secondary */
- i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);
+ i2s->pdev = platform_device_register_resndata(NULL,
+ "samsung-i2s-sec", -1, NULL, 0, NULL, 0);
if (IS_ERR(i2s->pdev))
return NULL;
-
- platform_set_drvdata(i2s->pdev, i2s);
- ret = platform_device_add(i2s->pdev);
- if (ret < 0)
- return NULL;
}
+ /* Pre-assign snd_soc_dai_set_drvdata */
+ dev_set_drvdata(&i2s->pdev->dev, i2s);
+
return i2s;
}
@@ -1110,10 +1107,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (samsung_dai_type == TYPE_SEC) {
sec_dai = dev_get_drvdata(&pdev->dev);
- if (!sec_dai) {
- dev_err(&pdev->dev, "Unable to get drvdata\n");
- return -EFAULT;
- }
snd_soc_register_dai(&sec_dai->pdev->dev,
&sec_dai->i2s_dai_drv);
asoc_dma_platform_register(&pdev->dev);
diff --git a/trunk/sound/soc/soc-compress.c b/trunk/sound/soc/soc-compress.c
index ed0bfb0ddb96..b5b3db71e253 100644
--- a/trunk/sound/soc/soc-compress.c
+++ b/trunk/sound/soc/soc-compress.c
@@ -211,27 +211,19 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
- goto err;
+ goto out;
}
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
ret = rtd->dai_link->compr_ops->set_params(cstream);
if (ret < 0)
- goto err;
+ goto out;
}
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_START);
- /* cancel any delayed stream shutdown that is pending */
- rtd->pop_wait = 0;
- mutex_unlock(&rtd->pcm_mutex);
-
- cancel_delayed_work_sync(&rtd->delayed_work);
-
- return ret;
-
-err:
+out:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c
index ff4b45a5d796..507d251916af 100644
--- a/trunk/sound/soc/soc-core.c
+++ b/trunk/sound/soc/soc-core.c
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val = val << shift;
ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
- if (ret < 0)
+ if (ret != 0)
return ret;
if (snd_soc_volsw_is_stereo(mc)) {
diff --git a/trunk/sound/soc/tegra/tegra_pcm.c b/trunk/sound/soc/tegra/tegra_pcm.c
index 5e2c55c5b255..c925ab0adeb6 100644
--- a/trunk/sound/soc/tegra/tegra_pcm.c
+++ b/trunk/sound/soc/tegra/tegra_pcm.c
@@ -43,6 +43,8 @@
static const struct snd_pcm_hardware tegra_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 2,
@@ -125,6 +127,26 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_START);
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_STOP);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
@@ -142,7 +164,7 @@ static struct snd_pcm_ops tegra_pcm_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = tegra_pcm_hw_params,
.hw_free = tegra_pcm_hw_free,
- .trigger = snd_dmaengine_pcm_trigger,
+ .trigger = tegra_pcm_trigger,
.pointer = snd_dmaengine_pcm_pointer,
.mmap = tegra_pcm_mmap,
};
diff --git a/trunk/sound/usb/mixer_quirks.c b/trunk/sound/usb/mixer_quirks.c
index ebe91440a068..497d2741d119 100644
--- a/trunk/sound/usb/mixer_quirks.c
+++ b/trunk/sound/usb/mixer_quirks.c
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, wIndex,
+ 0, cpu_to_le16(wIndex),
&tmp, sizeof(tmp), 1000);
up_read(&mixer->chip->shutdown_rwsem);
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- wValue, wIndex,
+ cpu_to_le16(wValue), cpu_to_le16(wIndex),
NULL, 0, 1000);
up_read(&mixer->chip->shutdown_rwsem);
diff --git a/trunk/sound/usb/quirks.c b/trunk/sound/usb/quirks.c
index 9c5ab22358b1..5325a3869bb7 100644
--- a/trunk/sound/usb/quirks.c
+++ b/trunk/sound/usb/quirks.c
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
{
int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 1, 0, NULL, 0, 1000);
+ cpu_to_le16(1), 0, NULL, 0, 1000);
if (ret < 0)
return ret;
diff --git a/trunk/tools/power/x86/turbostat/turbostat.c b/trunk/tools/power/x86/turbostat/turbostat.c
index 321e066a0753..6f3214ed4444 100644
--- a/trunk/tools/power/x86/turbostat/turbostat.c
+++ b/trunk/tools/power/x86/turbostat/turbostat.c
@@ -1421,7 +1421,6 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
case 0x3C: /* HSW */
case 0x3F: /* HSW */
case 0x45: /* HSW */
- case 0x46: /* HSW */
return 1;
case 0x2E: /* Nehalem-EX Xeon - Beckton */
case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1516,7 +1515,6 @@ void rapl_probe(unsigned int family, unsigned int model)
case 0x3C: /* HSW */
case 0x3F: /* HSW */
case 0x45: /* HSW */
- case 0x46: /* HSW */
do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
break;
case 0x2D:
@@ -1756,7 +1754,6 @@ int is_snb(unsigned int family, unsigned int model)
case 0x3C: /* HSW */
case 0x3F: /* HSW */
case 0x45: /* HSW */
- case 0x46: /* HSW */
return 1;
}
return 0;
@@ -2279,7 +2276,7 @@ int main(int argc, char **argv)
cmdline(argc, argv);
if (verbose)
- fprintf(stderr, "turbostat v3.3 March 15, 2013"
+ fprintf(stderr, "turbostat v3.2 February 11, 2013"
" - Len Brown \n");
turbostat_init();