diff --git a/[refs] b/[refs]
index 618e56d38cce..0270ff6116db 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: ea2f2fa143ead1a9bf5ce87a3abe403694fe6d78
+refs/heads/master: fe8a93b95145c66adf196eea4a919dfe0b7c57db
diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl
index c36892c072da..7514dbf0a679 100644
--- a/trunk/Documentation/DocBook/device-drivers.tmpl
+++ b/trunk/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
16x50 UART Driver
!Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250_core.c
+!Edrivers/tty/serial/8250/8250.c
diff --git a/trunk/Documentation/scsi/LICENSE.qla2xxx b/trunk/Documentation/scsi/LICENSE.qla2xxx
index 5020b7b5a244..27a91cf43d6d 100644
--- a/trunk/Documentation/scsi/LICENSE.qla2xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2013 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 8bdd7a7ef2f4..376704078d65 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -4941,12 +4941,6 @@ W: logfs.org
S: Maintained
F: fs/logfs/
-LPC32XX MACHINE SUPPORT
-M: Roland Stigge
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-lpc32xx/
-
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Nagalakshmi Nandigama
M: Sreekanth Reddy
@@ -6631,7 +6625,7 @@ S: Supported
F: fs/reiserfs/
REGISTER MAP ABSTRACTION
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
S: Supported
F: drivers/base/regmap/
@@ -7379,7 +7373,7 @@ F: sound/
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
@@ -7468,7 +7462,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely
-M: Mark Brown
+M: Mark Brown
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -8713,7 +8707,7 @@ F: drivers/scsi/vmw_pvscsi.h
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
diff --git a/trunk/Makefile b/trunk/Makefile
index ac509b25b701..6db672b15bda 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc6
NAME = Unicycling Gorilla
# *DOCUMENTATION*
@@ -513,8 +513,7 @@ ifeq ($(KBUILD_EXTMOD),)
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
diff --git a/trunk/arch/arm/boot/dts/imx28-m28evk.dts b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
index fd36e1cca104..6ce3d17c3a29 100644
--- a/trunk/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,6 +152,7 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
status = "okay";
sgtl5000: codec@0a {
diff --git a/trunk/arch/arm/boot/dts/imx28-sps1.dts b/trunk/arch/arm/boot/dts/imx28-sps1.dts
index 6c6a5442800a..e6cde8aa7fff 100644
--- a/trunk/arch/arm/boot/dts/imx28-sps1.dts
+++ b/trunk/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,6 +70,7 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
status = "okay";
rtc: rtc@51 {
diff --git a/trunk/arch/arm/boot/dts/imx6qdl.dtsi b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
index 281a223591ff..06ec460b4581 100644
--- a/trunk/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,7 +91,6 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
interrupts = <1 13 0xf01>;
- clocks = <&clks 15>;
};
L2: l2-cache@00a02000 {
diff --git a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 3694e94f6e99..93c3afbef9ee 100644
--- a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
- marvell,pins = "mpp46";
+ marvell,pins = "mpp44";
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
- marvell,pins = "mpp47";
+ marvell,pins = "mpp45";
marvell,function = "gpio";
};
@@ -157,14 +157,14 @@
gpios = <&gpio0 16 0>;
linux,default-trigger = "default-on";
};
- rebuild_led {
- label = "status:white:rebuild_led";
- gpios = <&gpio1 4 0>;
- };
- health_led {
+ health_led1 {
label = "status:red:health_led";
gpios = <&gpio1 5 0>;
};
+ health_led2 {
+ label = "status:white:health_led";
+ gpios = <&gpio1 4 0>;
+ };
backup_led {
label = "status:blue:backup_led";
gpios = <&gpio0 15 0>;
diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h
index 86b8fe398b95..6ef8afd1b64c 100644
--- a/trunk/arch/arm/include/asm/pgtable-3level.h
+++ b/trunk/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c
index c1fe498983ac..5a936988eb24 100644
--- a/trunk/arch/arm/kvm/arm.c
+++ b/trunk/arch/arm/kvm/arm.c
@@ -201,7 +201,6 @@ int kvm_dev_ioctl_check_extension(long ext)
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
- break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c
index 7bed7556077a..4ea9a982269c 100644
--- a/trunk/arch/arm/kvm/coproc.c
+++ b/trunk/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
u32 val;
int cpu;
+ cpu = get_cpu();
+
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c
index 2193c834f55c..e13a8fa5e62c 100644
--- a/trunk/arch/arm/mach-imx/clk-imx35.c
+++ b/trunk/arch/arm/mach-imx/clk-imx35.c
@@ -257,7 +257,6 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
- clk_register_clkdev(clk[admux_gate], "audmux", NULL);
clk_prepare_enable(clk[spba_gate]);
clk_prepare_enable(clk[gpio1_gate]);
@@ -266,7 +265,6 @@ int __init mx35_clocks_init(void)
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
clk_prepare_enable(clk[max_gate]);
- clk_prepare_enable(clk[iomuxc_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c
index d38e54f5b6d7..2f9ff93a4e61 100644
--- a/trunk/arch/arm/mach-imx/clk-imx6q.c
+++ b/trunk/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,6 +443,7 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[twd], NULL, "smp_twd");
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index e5f70415905a..f655b2637b0e 100644
--- a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,15 +20,10 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.duplex = DUPLEX_FULL,
};
-static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
- .phy_addr = MV643XX_ETH_PHY_ADDR(11),
-};
-
void __init iomega_ix2_200_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
- kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
+ kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
}
diff --git a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
index d5970f5a1e8d..6a9195e10579 100644
--- a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -61,6 +61,7 @@ static struct irq_domain *armada_370_xp_mpic_domain;
*/
static void armada_370_xp_irq_mask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -69,10 +70,15 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_SET_MASK_OFFS);
+#else
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+#endif
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
@@ -81,6 +87,10 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#else
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#endif
}
#ifdef CONFIG_SMP
@@ -136,11 +146,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
- if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
- writel(hw, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- else
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
diff --git a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
index 1e73f5fa8659..b7a9f4d469e8 100644
--- a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,8 +188,10 @@
#if defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
+#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
-#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
+#define NR_IRQS (IRQ_S3C2440_AC97+1)
#endif
/* compatibility define. */
diff --git a/trunk/arch/arm/mach-s3c24xx/irq.c b/trunk/arch/arm/mach-s3c24xx/irq.c
index d8ba9bee4c7e..cb9f5e011e73 100644
--- a/trunk/arch/arm/mach-s3c24xx/irq.c
+++ b/trunk/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
base = (void *)0xfd000000;
intc->reg_mask = base + 0xa4;
- intc->reg_pending = base + 0xa8;
+ intc->reg_pending = base + 0x08;
irq_num = 20;
irq_start = S3C2410_IRQ(32);
irq_offset = 4;
diff --git a/trunk/arch/arm/mm/proc-arm920.S b/trunk/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..2c3b9421ab5e 100644
--- a/trunk/arch/arm/mm/proc-arm920.S
+++ b/trunk/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-arm926.S b/trunk/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..f1803f7e2972 100644
--- a/trunk/arch/arm/mm/proc-arm926.S
+++ b/trunk/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-mohawk.S b/trunk/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..82f9cdc751d6 100644
--- a/trunk/arch/arm/mm/proc-mohawk.S
+++ b/trunk/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-sa1100.S b/trunk/arch/arm/mm/proc-sa1100.S
index d92dfd081429..3aa0da11fd84 100644
--- a/trunk/arch/arm/mm/proc-sa1100.S
+++ b/trunk/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/trunk/arch/arm/mm/proc-v6.S b/trunk/arch/arm/mm/proc-v6.S
index 5c07ee4fe3eb..bcaaa8de9325 100644
--- a/trunk/arch/arm/mm/proc-v6.S
+++ b/trunk/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/trunk/arch/arm/mm/proc-xsc3.S b/trunk/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..eb93d6487f35 100644
--- a/trunk/arch/arm/mm/proc-xsc3.S
+++ b/trunk/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S
index e766f889bfd6..25510361aa18 100644
--- a/trunk/arch/arm/mm/proc-xscale.S
+++ b/trunk/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/m68k/include/asm/gpio.h b/trunk/arch/m68k/include/asm/gpio.h
index 8cc83431805b..4395ffc51fdb 100644
--- a/trunk/arch/m68k/include/asm/gpio.h
+++ b/trunk/arch/m68k/include/asm/gpio.h
@@ -86,24 +86,4 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
-static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
-{
- int err;
-
- err = gpio_request(gpio, label);
- if (err)
- return err;
-
- if (flags & GPIOF_DIR_IN)
- err = gpio_direction_input(gpio);
- else
- err = gpio_direction_output(gpio,
- (flags & GPIOF_INIT_HIGH) ? 1 : 0);
-
- if (err)
- gpio_free(gpio);
-
- return err;
-}
-
#endif
diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S
index 04d69c4a5ac2..256c5bf0adb7 100644
--- a/trunk/arch/powerpc/kernel/entry_64.S
+++ b/trunk/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -657,7 +657,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
+ ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 16e77a81ab4f..59dd545fdde1 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -555,12 +555,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
-#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..3acb28e245b4 100644
--- a/trunk/arch/powerpc/kernel/signal_32.c
+++ b/trunk/arch/powerpc/kernel/signal_32.c
@@ -866,12 +866,10 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return 0;
}
diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c
index c1794286098c..995f8543cb57 100644
--- a/trunk/arch/powerpc/kernel/signal_64.c
+++ b/trunk/arch/powerpc/kernel/signal_64.c
@@ -522,12 +522,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return err;
}
diff --git a/trunk/arch/powerpc/kernel/tm.S b/trunk/arch/powerpc/kernel/tm.S
index 2da67e7a16d5..84dbace657ce 100644
--- a/trunk/arch/powerpc/kernel/tm.S
+++ b/trunk/arch/powerpc/kernel/tm.S
@@ -309,7 +309,6 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
-#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -324,7 +323,6 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
-#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/trunk/arch/powerpc/kvm/e500.h b/trunk/arch/powerpc/kvm/e500.h
index 33db48a8ce24..41cefd43655f 100644
--- a/trunk/arch/powerpc/kvm/e500.h
+++ b/trunk/arch/powerpc/kvm/e500.h
@@ -26,20 +26,17 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-/* entry is mapped somewhere in host TLB */
-#define E500_TLB_VALID (1 << 0)
-/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
-#define E500_TLB_BITMAP (1 << 1)
-/* TLB1 entry is mapped by host TLB0 */
+#define E500_TLB_VALID 1
+#define E500_TLB_BITMAP 2
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn; /* valid only for TLB0, except briefly */
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref;
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
};
#ifdef CONFIG_KVM_E500V2
@@ -66,6 +63,17 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
diff --git a/trunk/arch/powerpc/kvm/e500_mmu_host.c b/trunk/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..a222edfb9a9b 100644
--- a/trunk/arch/powerpc/kvm/e500_mmu_host.c
+++ b/trunk/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,11 +193,8 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID)) {
- WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
- "%s: flags %x\n", __func__, ref->flags);
- WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
- }
+ if (!(ref->flags & E500_TLB_VALID))
+ return;
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -251,7 +248,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags |= E500_TLB_VALID;
+ ref->flags = E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -260,7 +257,6 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
- /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -278,23 +274,36 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel;
+ int tlbsel = 0;
int i;
- for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
}
}
-void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int stlbsel = 1;
+ int i;
+
kvmppc_e500_tlbil_all(vcpu_e500);
+
+ for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->tlb_refs[stlbsel][i];
+ kvmppc_e500_ref_release(ref);
+ }
+
clear_tlb_privs(vcpu_e500);
+}
+
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ clear_tlb_refs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -449,6 +458,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
+ /* Drop old ref and setup new one. */
+ kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -496,15 +507,14 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
+ vcpu_e500->tlb_refs[1][sesel] = *ref;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
-
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
- WARN_ON(!(ref->flags & E500_TLB_VALID));
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
return sesel;
}
@@ -516,12 +526,13 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
+ struct tlbe_ref ref;
int sesel;
int r;
+ ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- ref);
+ &ref);
if (r)
return r;
@@ -533,7 +544,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -554,7 +565,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_privs or on initial mapping */
+ /* Triggers after clear_tlb_refs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -654,16 +665,35 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
+ vcpu_e500->tlb_refs[0] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[0])
+ goto err;
+
+ vcpu_e500->tlb_refs[1] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[1])
+ goto err;
+
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- return -EINVAL;
+ goto err;
return 0;
+
+err:
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
+ return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/trunk/arch/powerpc/kvm/e500mc.c b/trunk/arch/powerpc/kvm/e500mc.c
index 2f4baa074b2e..1f89d26e65fb 100644
--- a/trunk/arch/powerpc/kvm/e500mc.c
+++ b/trunk/arch/powerpc/kvm/e500mc.c
@@ -108,8 +108,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
-
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -138,11 +136,8 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_on_cpu) = vcpu;
- }
kvmppc_load_guest_fp(vcpu);
}
diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h
index 379d96e2105e..27cb32185ce1 100644
--- a/trunk/arch/s390/include/asm/io.h
+++ b/trunk/arch/s390/include/asm/io.h
@@ -50,6 +50,10 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..4a5443118cfb 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -57,10 +57,6 @@ extern unsigned long zero_page_mask;
(((unsigned long)(vaddr)) &zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h
index 7361e47db79f..5edd1742cfd0 100644
--- a/trunk/arch/x86/include/asm/paravirt.h
+++ b/trunk/arch/x86/include/asm/paravirt.h
@@ -703,10 +703,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
-}
+void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h
index b3b0ec1dac86..142236ed83af 100644
--- a/trunk/arch/x86/include/asm/paravirt_types.h
+++ b/trunk/arch/x86/include/asm/paravirt_types.h
@@ -91,7 +91,6 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
- void (*flush)(void);
};
struct pv_time_ops {
@@ -680,7 +679,6 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
-void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/trunk/arch/x86/include/asm/tlb.h b/trunk/arch/x86/include/asm/tlb.h
index c7797307fc2b..4fef20773b8f 100644
--- a/trunk/arch/x86/include/asm/tlb.h
+++ b/trunk/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
#define tlb_flush(tlb) \
{ \
- if (!tlb->fullmm && !tlb->need_flush_all) \
+ if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 26830f3af0df..b05a575d56f4 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,11 +314,10 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at)
return 0;
- memset(®s, 0, sizeof(regs));
-
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period);
+ regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/trunk/arch/x86/kernel/paravirt.c b/trunk/arch/x86/kernel/paravirt.c
index 8bfb335f74bb..17fff18a1031 100644
--- a/trunk/arch/x86/kernel/paravirt.c
+++ b/trunk/arch/x86/kernel/paravirt.c
@@ -263,18 +263,6 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
-void paravirt_flush_lazy_mmu(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -304,6 +292,18 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
+void arch_flush_lazy_mmu_mode(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -475,7 +475,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
- .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c
index 7114c63f047d..1cbd89ca5569 100644
--- a/trunk/arch/x86/lguest/boot.c
+++ b/trunk/arch/x86/lguest/boot.c
@@ -1334,7 +1334,6 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
- pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 0e883364abb5..2b97525246d4 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -378,12 +378,10 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd)) {
+ if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
- arch_flush_lazy_mmu_mode();
- } else {
+ else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
- }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/trunk/arch/x86/mm/pageattr-test.c b/trunk/arch/x86/mm/pageattr-test.c
index 0e38951e65eb..b0086567271c 100644
--- a/trunk/arch/x86/mm/pageattr-test.c
+++ b/trunk/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
s->gpg++;
i += GPS/PAGE_SIZE;
} else if (level == PG_LEVEL_2M) {
- if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
+ if (!(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
addr, level, (u64)pte_val(*pte));
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index fb4e73ec24d8..091934e1d0d9 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* We are safe now. Check whether the new pgprot is the same:
*/
old_pte = *kpte;
- old_prot = req_prot = pte_pgprot(old_pte);
+ old_prot = new_prot = req_prot = pte_pgprot(old_pte);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
* for the ancient hardware that doesn't support it.
*/
- if (pgprot_val(req_prot) & _PAGE_PRESENT)
- pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ if (pgprot_val(new_prot) & _PAGE_PRESENT)
+ pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+ pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
- req_prot = canon_pgprot(req_prot);
+ new_prot = canon_pgprot(new_prot);
/*
* old_pte points to the large page base address. So we need
@@ -1413,8 +1413,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
-
- arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_HIBERNATION
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 17fda6a8b3c2..193350b51f90 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -58,13 +58,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
- /*
- * NOTE! For PAE, any changes to the top page-directory-pointer-table
- * entries need a full cr3 reload to flush.
- */
-#ifdef CONFIG_X86_PAE
- tlb->need_flush_all = 1;
-#endif
tlb_remove_page(tlb, virt_to_page(pmd));
}
diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c
index e006c18d288a..6afbb2ca9a0a 100644
--- a/trunk/arch/x86/xen/mmu.c
+++ b/trunk/arch/x86/xen/mmu.c
@@ -1748,18 +1748,14 @@ static void *m2v(phys_addr_t maddr)
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
+static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
BUG();
}
-static void set_page_prot(void *addr, pgprot_t prot)
-{
- return set_page_prot_flags(addr, prot, UVMF_NONE);
-}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
@@ -1843,12 +1839,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_end)--;
}
@@ -2200,7 +2196,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
- .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c
index 58cfb3232428..d34adef1e63e 100644
--- a/trunk/drivers/base/regmap/regmap.c
+++ b/trunk/drivers/base/regmap/regmap.c
@@ -943,8 +943,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
unsigned int ival;
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
- memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
- ival = map->format.parse_val(map->work_buf);
+ ival = map->format.parse_val(val + (i * val_bytes));
ret = regcache_write(map, reg + (i * map->reg_stride),
ival);
if (ret) {
diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c
index b7b7a88d9f68..f556f8a8b3f9 100644
--- a/trunk/drivers/block/rbd.c
+++ b/trunk/drivers/block/rbd.c
@@ -1742,10 +1742,9 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
+ for_each_obj_request(img_request, obj_request) {
int ret;
obj_request->callback = rbd_img_obj_callback;
diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..ad72922919ed 100644
--- a/trunk/drivers/cpufreq/intel_pstate.c
+++ b/trunk/drivers/cpufreq/intel_pstate.c
@@ -502,6 +502,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
sample_time = cpu->pstate_policy->sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c
index 08b43bf37158..c4b4fd2acc42 100644
--- a/trunk/drivers/dma/omap-dma.c
+++ b/trunk/drivers/dma/omap-dma.c
@@ -276,20 +276,12 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
if (vchan_issue_pending(&c->vc) && !c->desc) {
- /*
- * c->cyclic is used only by audio and in this case the DMA need
- * to be started without delay.
- */
- if (!c->cyclic) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
- } else {
- omap_dma_start_desc(c);
- }
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
}
spin_unlock_irqrestore(&c->vc.lock, flags);
}
diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c
index 5dbc5946c4c3..718153122759 100644
--- a/trunk/drivers/dma/pl330.c
+++ b/trunk/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
- struct dma_pl330_chan *pch, *_p;
+ struct dma_pl330_chan *pch;
struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
@@ -2984,16 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err3;
- }
-
- if (adev->dev.of_node) {
- ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
- if (ret) {
- dev_err(&adev->dev,
- "unable to register DMA to the generic DT DMA helpers\n");
- }
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -3004,21 +2995,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
- return 0;
-probe_err3:
- amba_set_drvdata(adev, NULL);
-
- /* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
- chan.device_node) {
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
- /* Remove the channel */
- list_del(&pch->chan.device_node);
+ return 0;
- /* Flush the channel */
- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
- pl330_free_chan_resources(&pch->chan);
- }
probe_err2:
pl330_del(pi);
probe_err1:
@@ -3037,10 +3023,8 @@ static int pl330_remove(struct amba_device *adev)
if (!pdmac)
return 0;
- if (adev->dev.of_node)
- of_dma_controller_free(adev->dev.of_node);
+ of_dma_controller_free(adev->dev.of_node);
- dma_async_device_unregister(&pdmac->ddma);
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
diff --git a/trunk/drivers/gpio/gpio-pca953x.c b/trunk/drivers/gpio/gpio-pca953x.c
index 9391cf16e990..24059462c87f 100644
--- a/trunk/drivers/gpio/gpio-pca953x.c
+++ b/trunk/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
chip->gpio_chip.ngpio,
irq_base,
&pca953x_irq_simple_ops,
- chip);
+ NULL);
if (!chip->domain)
return -ENODEV;
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 892ff9f95975..59d6b9bf204b 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
if (!fb_helper->fb)
return 0;
- mutex_lock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1558,11 +1558,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
-
- drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
+
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..fe5cdbcf2636 100644
--- a/trunk/drivers/gpu/drm/udl/udl_connector.c
+++ b/trunk/drivers/gpu/drm/udl/udl_connector.c
@@ -61,10 +61,6 @@ static int udl_get_modes(struct drm_connector *connector)
int ret;
edid = (struct edid *)udl_get_edid(udl);
- if (!edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
- }
/*
* We only read the main block, but if the monitor reports extension
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 49fdff02b5fe..1daa97913b7d 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBC =
- { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
+ { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x26 =
{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
- { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0xF6) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c
index fc6aebf1e4b2..a32e0d5aa45f 100644
--- a/trunk/drivers/irqchip/irq-gic.c
+++ b/trunk/drivers/irqchip/irq-gic.c
@@ -236,8 +236,7 @@ static int gic_retrigger(struct irq_data *d)
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
- /* the genirq layer expects 0 if we can't retrigger in hardware */
- return 0;
+ return -ENXIO;
}
#ifdef CONFIG_SMP
diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig
index ea98f7e9ccd1..39c2ecadb273 100644
--- a/trunk/drivers/misc/vmw_vmci/Kconfig
+++ b/trunk/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI && NET
+ depends on X86 && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..a044f593e8b9 100644
--- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
sdev->allow_restart = 1;
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c
index 2197b57fb225..f328089a1060 100644
--- a/trunk/drivers/scsi/ipr.c
+++ b/trunk/drivers/scsi/ipr.c
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -9349,10 +9349,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
return rc;
@@ -9374,10 +9371,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(pdev->irq, ioa_cfg);
LEAVE;
@@ -9728,7 +9722,6 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
- INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/trunk/drivers/scsi/libsas/sas_expander.c b/trunk/drivers/scsi/libsas/sas_expander.c
index 55cbd0180159..aec2e0da5016 100644
--- a/trunk/drivers/scsi/libsas/sas_expander.c
+++ b/trunk/drivers/scsi/libsas/sas_expander.c
@@ -235,17 +235,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* Handle vacant phy - rest of dr data is not valid so skip it */
- if (phy->phy_state == PHY_VACANT) {
- memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->attached_dev_type = NO_DEVICE;
- if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
- phy->phy_id = phy_id;
- goto skip;
- } else
- goto out;
- }
-
phy->attached_dev_type = to_dev_type(dr);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
goto out;
@@ -283,7 +272,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
- skip:
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
@@ -400,7 +388,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
if (!disc_req)
return -ENOMEM;
- disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
if (!disc_resp) {
kfree(disc_req);
return -ENOMEM;
diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..74b67d98e952 100644
--- a/trunk/drivers/scsi/lpfc/lpfc_sli.c
+++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c
@@ -438,12 +438,11 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int put_index = hq->host_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dcc2619..1d82eef4e1eb 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_attr.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c
@@ -1938,6 +1938,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
+ /* No pending activities shall be there on the vha now */
+ if (ql2xextended_error_logging & ql_dbg_user)
+ msleep(random32()%10); /* Just to see if something falls on
+ * the net we have placed below */
+
BUG_ON(atomic_read(&vha->vref_count));
qla2x00_free_fcports(vha);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f1c87c..1626de52e32a 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,6 @@
* | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
- * | | | 0x1155-0x1158 |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
@@ -402,7 +401,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
void *ring;
} aq, *aqp;
- if (!ha->tgt.atio_ring)
+ if (!ha->tgt.atio_q_length)
return ptr;
num_queues = 1;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff75936b..c6509911772b 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_def.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_def.h
@@ -863,6 +863,7 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
/*
diff --git a/trunk/drivers/scsi/qla2xxx/qla_gbl.h b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa97b545..eb3ca21a7f17 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c
index b59203393cb2..edf4d14a1335 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_init.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_init.c
@@ -619,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1397,7 +1399,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
- if (ha->tgt.atio_ring)
+ if (ha->tgt.atio_q_length)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af56431..186dd59ce4fa 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3866,6 +3866,64 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
return rval;
}
+int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h
index ec54036d1e12..2b6e478d9e33 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_version.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.13-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/trunk/drivers/scsi/st.c b/trunk/drivers/scsi/st.c
index 2a32036a9404..86974471af68 100644
--- a/trunk/drivers/scsi/st.c
+++ b/trunk/drivers/scsi/st.c
@@ -4112,10 +4112,6 @@ static int st_probe(struct device *dev)
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
disk->queue = SDp->request_queue;
- /* SCSI tape doesn't register this gendisk via add_disk(). Manually
- * take queue reference that release_disk() expects. */
- if (!blk_get_queue(disk->queue))
- goto out_put_disk;
tpnt->driver = &st_template;
tpnt->device = SDp;
@@ -4189,7 +4185,7 @@ static int st_probe(struct device *dev)
idr_preload_end();
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
- goto out_put_queue;
+ goto out_put_disk;
}
tpnt->index = error;
sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4215,8 +4211,6 @@ static int st_probe(struct device *dev)
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
-out_put_queue:
- blk_put_queue(disk->queue);
out_put_disk:
put_disk(disk);
kfree(tpnt);
diff --git a/trunk/drivers/target/target_core_alua.c b/trunk/drivers/target/target_core_alua.c
index cbe48ab41745..ff1c5ee352cb 100644
--- a/trunk/drivers/target/target_core_alua.c
+++ b/trunk/drivers/target/target_core_alua.c
@@ -409,7 +409,6 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -452,7 +451,6 @@ static inline int core_alua_state_unavailable(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -493,7 +491,6 @@ static inline int core_alua_state_transition(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
diff --git a/trunk/drivers/tty/mxser.c b/trunk/drivers/tty/mxser.c
index 302909ccf183..484b6a3c9b03 100644
--- a/trunk/drivers/tty/mxser.c
+++ b/trunk/drivers/tty/mxser.c
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
mxvar_sdriver, brd->idx + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
goto err_relbrd;
}
}
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, brd->idx + i, NULL);
if (IS_ERR(tty_dev)) {
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
for (i = 0; i < brd->info->nports; i++)
tty_port_destroy(&brd->ports[i].port);
free_irq(brd->irq, brd);
diff --git a/trunk/drivers/tty/serial/8250/8250_pnp.c b/trunk/drivers/tty/serial/8250/8250_pnp.c
index 35d9ab95c5cb..b3455a970a1d 100644
--- a/trunk/drivers/tty/serial/8250/8250_pnp.c
+++ b/trunk/drivers/tty/serial/8250/8250_pnp.c
@@ -429,6 +429,7 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart;
int ret, line, flags = dev_id->driver_data;
+ struct resource *res = NULL;
if (flags & UNKNOWN_DEV) {
ret = serial_pnp_guess_board(dev);
@@ -439,11 +440,12 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if (pnp_irq_valid(dev, 0))
uart.port.irq = pnp_irq(dev, 0);
- if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
- uart.port.iobase = pnp_port_start(dev, 2);
- uart.port.iotype = UPIO_PORT;
- } else if (pnp_port_valid(dev, 0)) {
- uart.port.iobase = pnp_port_start(dev, 0);
+ if ((flags & CIR_PORT) && pnp_port_valid(dev, 2))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 2);
+ else if (pnp_port_valid(dev, 0))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (pnp_resource_enabled(res)) {
+ uart.port.iobase = res->start;
uart.port.iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
diff --git a/trunk/drivers/tty/serial/omap-serial.c b/trunk/drivers/tty/serial/omap-serial.c
index 30d4f7a783cd..4dc41408ecb7 100644
--- a/trunk/drivers/tty/serial/omap-serial.c
+++ b/trunk/drivers/tty/serial/omap-serial.c
@@ -886,17 +886,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
- /*
- * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
- * sets Enables the granularity of 1 for TRIGGER RX
- * level. Along with setting RX FIFO trigger level
- * to 1 (as noted below, 16 characters) and TLR[3:0]
- * to zero this will result RX FIFO threshold level
- * to 1 character, instead of 16 as noted in comment
- * below.
- */
-
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
diff --git a/trunk/drivers/vhost/tcm_vhost.c b/trunk/drivers/vhost/tcm_vhost.c
index 957a0b98a5d9..2968b4934659 100644
--- a/trunk/drivers/vhost/tcm_vhost.c
+++ b/trunk/drivers/vhost/tcm_vhost.c
@@ -74,8 +74,9 @@ enum {
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
- struct tcm_vhost_tpg **vs_tpg;
+ struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+ bool vs_endpoint;
struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -578,27 +579,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
}
}
-static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq, int head, unsigned out)
-{
- struct virtio_scsi_cmd_resp __user *resp;
- struct virtio_scsi_cmd_resp rsp;
- int ret;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
- else
- pr_err("Faulted on virtio_scsi_cmd_resp\n");
-}
-
static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
struct vhost_virtqueue *vq)
{
- struct tcm_vhost_tpg **vs_tpg;
struct virtio_scsi_cmd_req v_req;
struct tcm_vhost_tpg *tv_tpg;
struct tcm_vhost_cmd *tv_cmd;
@@ -607,16 +590,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
int head, ret;
u8 target;
- /*
- * We can handle the vq only after the endpoint is setup by calling the
- * VHOST_SCSI_SET_ENDPOINT ioctl.
- *
- * TODO: Check that we are running from vhost_worker which acts
- * as read-side critical section for vhost kind of RCU.
- * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
- */
- vs_tpg = rcu_dereference_check(vq->private_data, 1);
- if (!vs_tpg)
+ /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+ if (unlikely(!vs->vs_endpoint))
return;
mutex_lock(&vq->mutex);
@@ -686,11 +661,23 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
/* Extract the tpgt */
target = v_req.lun[1];
- tv_tpg = ACCESS_ONCE(vs_tpg[target]);
+ tv_tpg = vs->vs_tpg[target];
/* Target does not exist, fail the request */
if (unlikely(!tv_tpg)) {
- vhost_scsi_send_bad_target(vs, vq, head, out);
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev,
+ vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
continue;
}
@@ -703,13 +690,22 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
if (IS_ERR(tv_cmd)) {
vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
PTR_ERR(tv_cmd));
- goto err_cmd;
+ break;
}
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
": %d\n", tv_cmd, exp_data_len, data_direction);
tv_cmd->tvc_vhost = vs;
tv_cmd->tvc_vq = vq;
+
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes, out: %d, in: %d\n",
+ vq->iov[out].iov_len, out, in);
+ break;
+ }
+
tv_cmd->tvc_resp = vq->iov[out].iov_base;
/*
@@ -729,7 +725,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(tv_cmd->tvc_cdb),
TCM_VHOST_MAX_CDB_SIZE);
- goto err_free;
+ break; /* TODO */
}
tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
@@ -742,7 +738,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
data_direction == DMA_TO_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
- goto err_free;
+ break; /* TODO */
}
}
@@ -763,13 +759,6 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
}
mutex_unlock(&vq->mutex);
- return;
-
-err_free:
- vhost_scsi_free_cmd(tv_cmd);
-err_cmd:
- vhost_scsi_send_bad_target(vs, vq, head, out);
- mutex_unlock(&vq->mutex);
}
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -791,20 +780,6 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->dev.vqs[index].poll);
-}
-
-static void vhost_scsi_flush(struct vhost_scsi *vs)
-{
- int i;
-
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
-}
-
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -815,10 +790,8 @@ static int vhost_scsi_set_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct tcm_vhost_tpg **vs_tpg;
- struct vhost_virtqueue *vq;
- int index, ret, i, len;
bool match = false;
+ int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -830,15 +803,6 @@ static int vhost_scsi_set_endpoint(
}
}
- len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
- vs_tpg = kzalloc(len, GFP_KERNEL);
- if (!vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return -ENOMEM;
- }
- if (vs->vs_tpg)
- memcpy(vs_tpg, vs->vs_tpg, len);
-
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -853,15 +817,14 @@ static int vhost_scsi_set_endpoint(
tv_tport = tv_tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
- if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
+ if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
mutex_unlock(&tcm_vhost_mutex);
mutex_unlock(&vs->dev.mutex);
- kfree(vs_tpg);
return -EEXIST;
}
tv_tpg->tv_tpg_vhost_count++;
- vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+ vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
smp_mb__after_atomic_inc();
match = true;
}
@@ -872,27 +835,12 @@ static int vhost_scsi_set_endpoint(
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, vs_tpg);
- vhost_init_used(vq);
- mutex_unlock(&vq->mutex);
- }
+ vs->vs_endpoint = true;
ret = 0;
} else {
ret = -EEXIST;
}
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = vs_tpg;
-
mutex_unlock(&vs->dev.mutex);
return ret;
}
@@ -903,8 +851,6 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct vhost_virtqueue *vq;
- bool match = false;
int index, ret, i;
u8 target;
@@ -916,14 +862,9 @@ static int vhost_scsi_clear_endpoint(
goto err_dev;
}
}
-
- if (!vs->vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return 0;
- }
-
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
+
tv_tpg = vs->vs_tpg[target];
if (!tv_tpg)
continue;
@@ -945,27 +886,10 @@ static int vhost_scsi_clear_endpoint(
}
tv_tpg->tv_tpg_vhost_count--;
vs->vs_tpg[target] = NULL;
- match = true;
+ vs->vs_endpoint = false;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
- if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, NULL);
- mutex_unlock(&vq->mutex);
- }
- }
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
-
return 0;
err_tpg:
@@ -975,24 +899,6 @@ static int vhost_scsi_clear_endpoint(
return ret;
}
-static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
-{
- if (features & ~VHOST_SCSI_FEATURES)
- return -EOPNOTSUPP;
-
- mutex_lock(&vs->dev.mutex);
- if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&vs->dev)) {
- mutex_unlock(&vs->dev.mutex);
- return -EFAULT;
- }
- vs->dev.acked_features = features;
- smp_wmb();
- vhost_scsi_flush(vs);
- mutex_unlock(&vs->dev.mutex);
- return 0;
-}
-
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
@@ -1033,6 +939,38 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
return 0;
}
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+ vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+ int i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ vhost_scsi_flush_vq(vs, i);
+ vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ if (features & ~VHOST_SCSI_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ vs->dev.acked_features = features;
+ smp_wmb();
+ vhost_scsi_flush(vs);
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
diff --git a/trunk/drivers/watchdog/Kconfig b/trunk/drivers/watchdog/Kconfig
index e89fc3133972..9fcc70c11cea 100644
--- a/trunk/drivers/watchdog/Kconfig
+++ b/trunk/drivers/watchdog/Kconfig
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG
config AT91RM9200_WATCHDOG
tristate "AT91RM9200 watchdog"
- depends on ARCH_AT91RM9200
+ depends on ARCH_AT91
help
Watchdog timer embedded into AT91RM9200 chips. This will reboot your
system when the timeout is reached.
diff --git a/trunk/drivers/xen/events.c b/trunk/drivers/xen/events.c
index 2647ad8e1f19..aa85881d17b2 100644
--- a/trunk/drivers/xen/events.c
+++ b/trunk/drivers/xen/events.c
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void)
{
int start_word_idx, start_bit_idx;
int word_idx, bit_idx;
- int i, irq;
+ int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1324,8 +1324,6 @@ static void __xen_evtchn_do_upcall(void)
do {
xen_ulong_t pending_words;
- xen_ulong_t pending_bits;
- struct irq_desc *desc;
vcpu_info->evtchn_upcall_pending = 0;
@@ -1337,17 +1335,6 @@ static void __xen_evtchn_do_upcall(void)
* selector flag. xchg_xen_ulong must contain an
* appropriate barrier.
*/
- if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
- int evtchn = evtchn_from_irq(irq);
- word_idx = evtchn / BITS_PER_LONG;
- pending_bits = evtchn % BITS_PER_LONG;
- if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
- desc = irq_to_desc(irq);
- if (desc)
- generic_handle_irq_desc(irq, desc);
- }
- }
-
pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
start_word_idx = __this_cpu_read(current_word_idx);
@@ -1356,6 +1343,7 @@ static void __xen_evtchn_do_upcall(void)
word_idx = start_word_idx;
for (i = 0; pending_words != 0; i++) {
+ xen_ulong_t pending_bits;
xen_ulong_t words;
words = MASK_LSBS(pending_words, word_idx);
@@ -1384,7 +1372,8 @@ static void __xen_evtchn_do_upcall(void)
do {
xen_ulong_t bits;
- int port;
+ int port, irq;
+ struct irq_desc *desc;
bits = MASK_LSBS(pending_bits, bit_idx);
diff --git a/trunk/fs/btrfs/tree-log.c b/trunk/fs/btrfs/tree-log.c
index ef96381569a4..451fad96ecd1 100644
--- a/trunk/fs/btrfs/tree-log.c
+++ b/trunk/fs/btrfs/tree-log.c
@@ -317,7 +317,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
unsigned long src_ptr;
unsigned long dst_ptr;
int overwrite_root = 0;
- bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
overwrite_root = 1;
@@ -327,9 +326,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
/* look for the key in the destination tree */
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
- if (ret < 0)
- return ret;
-
if (ret == 0) {
char *src_copy;
char *dst_copy;
@@ -371,30 +367,6 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
return 0;
}
- /*
- * We need to load the old nbytes into the inode so when we
- * replay the extents we've logged we get the right nbytes.
- */
- if (inode_item) {
- struct btrfs_inode_item *item;
- u64 nbytes;
-
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_item);
- nbytes = btrfs_inode_nbytes(path->nodes[0], item);
- item = btrfs_item_ptr(eb, slot,
- struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, nbytes);
- }
- } else if (inode_item) {
- struct btrfs_inode_item *item;
-
- /*
- * New inode, set nbytes to 0 so that the nbytes comes out
- * properly when we replay the extents.
- */
- item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
- btrfs_set_inode_nbytes(eb, item, 0);
}
insert:
btrfs_release_path(path);
@@ -514,7 +486,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
int found_type;
u64 extent_end;
u64 start = key->offset;
- u64 nbytes = 0;
+ u64 saved_nbytes;
struct btrfs_file_extent_item *item;
struct inode *inode = NULL;
unsigned long size;
@@ -524,19 +496,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
found_type = btrfs_file_extent_type(eb, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- nbytes = btrfs_file_extent_num_bytes(eb, item);
- extent_end = start + nbytes;
-
- /*
- * We don't add to the inodes nbytes if we are prealloc or a
- * hole.
- */
- if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
- nbytes = 0;
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ found_type == BTRFS_FILE_EXTENT_PREALLOC)
+ extent_end = start + btrfs_file_extent_num_bytes(eb, item);
+ else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, item);
- nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size, root->sectorsize);
} else {
ret = 0;
@@ -585,6 +548,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
+ saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
BUG_ON(ret);
@@ -671,7 +635,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
BUG_ON(ret);
}
- inode_add_bytes(inode, nbytes);
+ inode_set_bytes(inode, saved_nbytes);
ret = btrfs_update_inode(trans, root, inode);
out:
if (inode)
diff --git a/trunk/fs/cifs/connect.c b/trunk/fs/cifs/connect.c
index 21b3a291c327..991c63c6bdd0 100644
--- a/trunk/fs/cifs/connect.c
+++ b/trunk/fs/cifs/connect.c
@@ -1575,24 +1575,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
break;
case Opt_blank_pass:
+ vol->password = NULL;
+ break;
+ case Opt_pass:
/* passwords have to be handled differently
* to allow the character used for deliminator
* to be passed within them
*/
- /*
- * Check if this is a case where the password
- * starts with a delimiter
- */
- tmp_end = strchr(data, '=');
- tmp_end++;
- if (!(tmp_end < end && tmp_end[1] == delim)) {
- /* No it is not. Set the password to NULL */
- vol->password = NULL;
- break;
- }
- /* Yes it is. Drop down to Opt_pass below.*/
- case Opt_pass:
/* Obtain the value string */
value = strchr(data, '=');
value++;
diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c
index a898b3d43ccf..f5f7c06c36fb 100644
--- a/trunk/fs/inode.c
+++ b/trunk/fs/inode.c
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
* inode to the back of the list so we don't spin on it.
*/
if (!spin_trylock(&inode->i_lock)) {
- list_move(&inode->i_lru, &sb->s_inode_lru);
+ list_move_tail(&inode->i_lru, &sb->s_inode_lru);
continue;
}
diff --git a/trunk/fs/proc/array.c b/trunk/fs/proc/array.c
index cbd0f1b324b9..f7ed9ee46eb9 100644
--- a/trunk/fs/proc/array.c
+++ b/trunk/fs/proc/array.c
@@ -143,7 +143,6 @@ static const char * const task_state_array[] = {
"x (dead)", /* 64 */
"K (wakekill)", /* 128 */
"W (waking)", /* 256 */
- "P (parked)", /* 512 */
};
static inline const char *get_task_state(struct task_struct *tsk)
diff --git a/trunk/include/asm-generic/tlb.h b/trunk/include/asm-generic/tlb.h
index b1b1fa6ffffe..25f01d0bc149 100644
--- a/trunk/include/asm-generic/tlb.h
+++ b/trunk/include/asm-generic/tlb.h
@@ -99,12 +99,7 @@ struct mmu_gather {
unsigned int need_flush : 1, /* Did free PTEs */
fast_mode : 1; /* No batching */
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ unsigned int fullmm;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
diff --git a/trunk/include/linux/capability.h b/trunk/include/linux/capability.h
index d9a4f7f40f32..98503b792369 100644
--- a/trunk/include/linux/capability.h
+++ b/trunk/include/linux/capability.h
@@ -35,7 +35,6 @@ struct cpu_vfs_cap_data {
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-struct file;
struct inode;
struct dentry;
struct user_namespace;
@@ -212,7 +211,6 @@ extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool nsown_capable(int cap);
extern bool inode_capable(const struct inode *inode, int cap);
-extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h
index 52da2a250795..167abf907802 100644
--- a/trunk/include/linux/ftrace.h
+++ b/trunk/include/linux/ftrace.h
@@ -396,6 +396,7 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos);
+loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
int ftrace_regex_release(struct inode *inode, struct file *file);
void __init
@@ -568,8 +569,6 @@ static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
#endif /* CONFIG_DYNAMIC_FTRACE */
-loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
-
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h
index e2091b88d24c..e19ff30ad0a2 100644
--- a/trunk/include/linux/mm.h
+++ b/trunk/include/linux/mm.h
@@ -1611,8 +1611,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
-
struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int foll_flags,
diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h
index e692a022527b..d35d2b6ddbfb 100644
--- a/trunk/include/linux/sched.h
+++ b/trunk/include/linux/sched.h
@@ -163,10 +163,9 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
-#define TASK_PARKED 512
-#define TASK_STATE_MAX 1024
+#define TASK_STATE_MAX 512
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h
index e5586caff67a..5a8671e8a67f 100644
--- a/trunk/include/trace/events/sched.h
+++ b/trunk/include/trace/events/sched.h
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
__print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
- { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+ { 128, "W" }) : "R",
__entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/trunk/kernel/capability.c b/trunk/kernel/capability.c
index f6c2ce5701e1..493d97259484 100644
--- a/trunk/kernel/capability.c
+++ b/trunk/kernel/capability.c
@@ -392,30 +392,6 @@ bool ns_capable(struct user_namespace *ns, int cap)
}
EXPORT_SYMBOL(ns_capable);
-/**
- * file_ns_capable - Determine if the file's opener had a capability in effect
- * @file: The file we want to check
- * @ns: The usernamespace we want the capability in
- * @cap: The capability to be tested for
- *
- * Return true if task that opened the file had a capability in effect
- * when the file was opened.
- *
- * This does not set PF_SUPERPRIV because the caller may not
- * actually be privileged.
- */
-bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap)
-{
- if (WARN_ON_ONCE(!cap_valid(cap)))
- return false;
-
- if (security_capable(file->f_cred, ns, cap) == 0)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(file_ns_capable);
-
/**
* capable - Determine if the current task has a superior capability in effect
* @cap: The capability to be tested for
diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c
index 7e0962ed7f8a..59412d037eed 100644
--- a/trunk/kernel/events/core.c
+++ b/trunk/kernel/events/core.c
@@ -4737,8 +4737,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
- sizeof(tmp) - 1);
- tmp[sizeof(tmp) - 1] = '\0';
+ sizeof(tmp));
goto got_name;
}
@@ -5987,7 +5986,6 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
if (pmu->pmu_cpu_context)
goto got_cpu_context;
- ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h
index eb675c4d59df..d56a64c99a8b 100644
--- a/trunk/kernel/events/internal.h
+++ b/trunk/kernel/events/internal.h
@@ -16,7 +16,7 @@ struct ring_buffer {
int page_order; /* allocation order */
#endif
int nr_pages; /* nr of data pages */
- int overwrite; /* can overwrite itself */
+ int writable; /* are we writable */
atomic_t poll; /* POLL_ for wakeups */
diff --git a/trunk/kernel/events/ring_buffer.c b/trunk/kernel/events/ring_buffer.c
index 97fddb09762b..23cb34ff3973 100644
--- a/trunk/kernel/events/ring_buffer.c
+++ b/trunk/kernel/events/ring_buffer.c
@@ -18,24 +18,12 @@
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
unsigned long offset, unsigned long head)
{
- unsigned long sz = perf_data_size(rb);
- unsigned long mask = sz - 1;
+ unsigned long mask;
- /*
- * check if user-writable
- * overwrite : over-write its own tail
- * !overwrite: buffer possibly drops events.
- */
- if (rb->overwrite)
+ if (!rb->writable)
return true;
- /*
- * verify that payload is not bigger than buffer
- * otherwise masking logic may fail to detect
- * the "not enough space" condition
- */
- if ((head - offset) > sz)
- return false;
+ mask = perf_data_size(rb) - 1;
offset = (offset - tail) & mask;
head = (head - tail) & mask;
@@ -224,9 +212,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->watermark = max_size / 2;
if (flags & RING_BUFFER_WRITABLE)
- rb->overwrite = 0;
- else
- rb->overwrite = 1;
+ rb->writable = 1;
atomic_set(&rb->refcount, 1);
diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c
index 14be27feda49..cc47812d3feb 100644
--- a/trunk/kernel/hrtimer.c
+++ b/trunk/kernel/hrtimer.c
@@ -63,7 +63,6 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
- .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
.clock_base =
{
{
@@ -1643,6 +1642,8 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
+ raw_spin_lock_init(&cpu_base->lock);
+
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c
index 9eb7fed0bbaa..691dc2ef9baf 100644
--- a/trunk/kernel/kthread.c
+++ b/trunk/kernel/kthread.c
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
static void __kthread_parkme(struct kthread *self)
{
- __set_current_state(TASK_PARKED);
+ __set_current_state(TASK_INTERRUPTIBLE);
while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
complete(&self->parked);
schedule();
- __set_current_state(TASK_PARKED);
+ __set_current_state(TASK_INTERRUPTIBLE);
}
clear_bit(KTHREAD_IS_PARKED, &self->flags);
__set_current_state(TASK_RUNNING);
@@ -256,13 +256,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
}
EXPORT_SYMBOL(kthread_create_on_node);
-static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+static void __kthread_bind(struct task_struct *p, unsigned int cpu)
{
- /* Must have done schedule() in kthread() before we set_task_cpu */
- if (!wait_task_inactive(p, state)) {
- WARN_ON(1);
- return;
- }
/* It's safe because the task is inactive. */
do_set_cpus_allowed(p, cpumask_of(cpu));
p->flags |= PF_THREAD_BOUND;
@@ -279,7 +274,12 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
- __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
+ /* Must have done schedule() in kthread() before we set_task_cpu */
+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+ WARN_ON(1);
+ return;
+ }
+ __kthread_bind(p, cpu);
}
EXPORT_SYMBOL(kthread_bind);
@@ -324,22 +324,6 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
return NULL;
}
-static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
-{
- clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
- /*
- * We clear the IS_PARKED bit here as we don't wait
- * until the task has left the park code. So if we'd
- * park before that happens we'd see the IS_PARKED bit
- * which might be about to be cleared.
- */
- if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
- if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
- __kthread_bind(k, kthread->cpu, TASK_PARKED);
- wake_up_state(k, TASK_PARKED);
- }
-}
-
/**
* kthread_unpark - unpark a thread created by kthread_create().
* @k: thread created by kthread_create().
@@ -352,8 +336,20 @@ void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = task_get_live_kthread(k);
- if (kthread)
- __kthread_unpark(k, kthread);
+ if (kthread) {
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ /*
+ * We clear the IS_PARKED bit here as we don't wait
+ * until the task has left the park code. So if we'd
+ * park before that happens we'd see the IS_PARKED bit
+ * which might be about to be cleared.
+ */
+ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+ __kthread_bind(k, kthread->cpu);
+ wake_up_process(k);
+ }
+ }
put_task_struct(k);
}
@@ -411,7 +407,7 @@ int kthread_stop(struct task_struct *k)
trace_sched_kthread_stop(k);
if (kthread) {
set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
- __kthread_unpark(k, kthread);
+ clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
wake_up_process(k);
wait_for_completion(&kthread->exited);
}
diff --git a/trunk/kernel/sched/clock.c b/trunk/kernel/sched/clock.c
index c3ae1446461c..c685e31492df 100644
--- a/trunk/kernel/sched/clock.c
+++ b/trunk/kernel/sched/clock.c
@@ -176,36 +176,10 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
u64 this_clock, remote_clock;
u64 *ptr, old_val, val;
-#if BITS_PER_LONG != 64
-again:
- /*
- * Careful here: The local and the remote clock values need to
- * be read out atomic as we need to compare the values and
- * then update either the local or the remote side. So the
- * cmpxchg64 below only protects one readout.
- *
- * We must reread via sched_clock_local() in the retry case on
- * 32bit as an NMI could use sched_clock_local() via the
- * tracer and hit between the readout of
- * the low32bit and the high 32bit portion.
- */
- this_clock = sched_clock_local(my_scd);
- /*
- * We must enforce atomic readout on 32bit, otherwise the
- * update on the remote cpu can hit inbetween the readout of
- * the low32bit and the high 32bit portion.
- */
- remote_clock = cmpxchg64(&scd->clock, 0, 0);
-#else
- /*
- * On 64bit the read of [my]scd->clock is atomic versus the
- * update, so we can avoid the above 32bit dance.
- */
sched_clock_local(my_scd);
again:
this_clock = my_scd->clock;
remote_clock = scd->clock;
-#endif
/*
* Use the opportunity that we have both locks
diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c
index 67d04651f44b..7f12624a393c 100644
--- a/trunk/kernel/sched/core.c
+++ b/trunk/kernel/sched/core.c
@@ -1498,10 +1498,8 @@ static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
- if (WARN_ON_ONCE(rq != this_rq()) ||
- WARN_ON_ONCE(p == current))
- return;
-
+ BUG_ON(rq != this_rq());
+ BUG_ON(p == current);
lockdep_assert_held(&rq->lock);
if (!raw_spin_trylock(&p->pi_lock)) {
@@ -5001,7 +4999,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
}
static int min_load_idx = 0;
-static int max_load_idx = CPU_LOAD_IDX_MAX-1;
+static int max_load_idx = CPU_LOAD_IDX_MAX;
static void
set_table_entry(struct ctl_table *entry,
diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c
index e93cca92f38b..ed12cbb135f4 100644
--- a/trunk/kernel/sched/cputime.c
+++ b/trunk/kernel/sched/cputime.c
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
t = tsk;
do {
- task_cputime(t, &utime, &stime);
+ task_cputime(tsk, &utime, &stime);
times->utime += utime;
times->stime += stime;
times->sum_exec_runtime += task_sched_runtime(t);
diff --git a/trunk/kernel/smpboot.c b/trunk/kernel/smpboot.c
index 02fc5c933673..8eaed9aa9cf0 100644
--- a/trunk/kernel/smpboot.c
+++ b/trunk/kernel/smpboot.c
@@ -185,18 +185,8 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
}
get_task_struct(tsk);
*per_cpu_ptr(ht->store, cpu) = tsk;
- if (ht->create) {
- /*
- * Make sure that the task has actually scheduled out
- * into park position, before calling the create
- * callback. At least the migration thread callback
- * requires that the task is off the runqueue.
- */
- if (!wait_task_inactive(tsk, TASK_PARKED))
- WARN_ON(1);
- else
- ht->create(cpu);
- }
+ if (ht->create)
+ ht->create(cpu);
return 0;
}
diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c
index 0da73cf73e60..39c9c4a2949f 100644
--- a/trunk/kernel/sys.c
+++ b/trunk/kernel/sys.c
@@ -324,6 +324,7 @@ void kernel_restart_prepare(char *cmd)
system_state = SYSTEM_RESTART;
usermodehelper_disable();
device_shutdown();
+ syscore_shutdown();
}
/**
@@ -369,7 +370,6 @@ void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
disable_nonboot_cpus();
- syscore_shutdown();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
else
@@ -395,7 +395,6 @@ static void kernel_shutdown_prepare(enum system_states state)
void kernel_halt(void)
{
kernel_shutdown_prepare(SYSTEM_HALT);
- disable_nonboot_cpus();
syscore_shutdown();
printk(KERN_EMERG "System halted.\n");
kmsg_dump(KMSG_DUMP_HALT);
diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c
index b3fde6d7b7fc..7e897106b7e0 100644
--- a/trunk/kernel/trace/ftrace.c
+++ b/trunk/kernel/trace/ftrace.c
@@ -694,6 +694,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
free_page(tmp);
}
+ free_page((unsigned long)stat->pages);
stat->pages = NULL;
stat->start = NULL;
@@ -1052,19 +1053,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-loff_t
-ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
-{
- loff_t ret;
-
- if (file->f_mode & FMODE_READ)
- ret = seq_lseek(file, offset, whence);
- else
- file->f_pos = ret = 1;
-
- return ret;
-}
-
#ifdef CONFIG_DYNAMIC_FTRACE
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -2625,7 +2613,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash)
* routine, you can use ftrace_filter_write() for the write
* routine if @flag has FTRACE_ITER_FILTER set, or
* ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
- * ftrace_filter_lseek() should be used as the lseek routine, and
+ * ftrace_regex_lseek() should be used as the lseek routine, and
* release must call ftrace_regex_release().
*/
int
@@ -2709,6 +2697,19 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
inode, file);
}
+loff_t
+ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t ret;
+
+ if (file->f_mode & FMODE_READ)
+ ret = seq_lseek(file, offset, whence);
+ else
+ file->f_pos = ret = 1;
+
+ return ret;
+}
+
static int ftrace_match(char *str, char *regex, int len, int type)
{
int matched = 0;
@@ -3440,14 +3441,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str)
{
- strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+ strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
- strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+ strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);
@@ -3570,7 +3571,7 @@ static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
@@ -3578,7 +3579,7 @@ static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
.read = seq_read,
.write = ftrace_notrace_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
@@ -3783,8 +3784,8 @@ static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
.read = seq_read,
.write = ftrace_graph_write,
- .llseek = ftrace_filter_lseek,
.release = ftrace_graph_release,
+ .llseek = seq_lseek,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -4439,7 +4440,7 @@ static const struct file_operations ftrace_pid_fops = {
.open = ftrace_pid_open,
.write = ftrace_pid_write,
.read = seq_read,
- .llseek = ftrace_filter_lseek,
+ .llseek = seq_lseek,
.release = ftrace_pid_release,
};
diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c
index 66338c4f7f4b..7ba7fc76f9eb 100644
--- a/trunk/kernel/trace/trace.c
+++ b/trunk/kernel/trace/trace.c
@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
static int __init set_cmdline_ftrace(char *str)
{
- strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
+ strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str)
{
- strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+ strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
trace_boot_options = trace_boot_options_buf;
return 0;
}
diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c
index 83a8b5b7bd35..42ca822fc701 100644
--- a/trunk/kernel/trace/trace_stack.c
+++ b/trunk/kernel/trace/trace_stack.c
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = {
.open = stack_trace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = ftrace_filter_lseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_regex_release,
};
diff --git a/trunk/lib/kobject.c b/trunk/lib/kobject.c
index a65486613d79..e07ee1fcd6f1 100644
--- a/trunk/lib/kobject.c
+++ b/trunk/lib/kobject.c
@@ -529,13 +529,6 @@ struct kobject *kobject_get(struct kobject *kobj)
return kobj;
}
-static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
-{
- if (!kref_get_unless_zero(&kobj->kref))
- kobj = NULL;
- return kobj;
-}
-
/*
* kobject_cleanup - free kobject resources.
* @kobj: object to cleanup
@@ -758,7 +751,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
- ret = kobject_get_unless_zero(k);
+ ret = kobject_get(k);
break;
}
}
diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c
index ba94dec5b259..494526ae024a 100644
--- a/trunk/mm/memory.c
+++ b/trunk/mm/memory.c
@@ -216,7 +216,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
tlb->mm = mm;
tlb->fullmm = fullmm;
- tlb->need_flush_all = 0;
tlb->start = -1UL;
tlb->end = 0;
tlb->need_flush = 0;
@@ -2393,53 +2392,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(remap_pfn_range);
-/**
- * vm_iomap_memory - remap memory to userspace
- * @vma: user vma to map to
- * @start: start of area
- * @len: size of area
- *
- * This is a simplified io_remap_pfn_range() for common driver use. The
- * driver just needs to give us the physical memory range to be mapped,
- * we'll figure out the rest from the vma information.
- *
- * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
- * whatever write-combining details or similar.
- */
-int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
-{
- unsigned long vm_len, pfn, pages;
-
- /* Check that the physical memory area passed in looks valid */
- if (start + len < start)
- return -EINVAL;
- /*
- * You *really* shouldn't map things that aren't page-aligned,
- * but we've historically allowed it because IO memory might
- * just have smaller alignment.
- */
- len += start & ~PAGE_MASK;
- pfn = start >> PAGE_SHIFT;
- pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (pfn + pages < pfn)
- return -EINVAL;
-
- /* We start the mapping 'vm_pgoff' pages into the area */
- if (vma->vm_pgoff > pages)
- return -EINVAL;
- pfn += vma->vm_pgoff;
- pages -= vma->vm_pgoff;
-
- /* Can we fit all of the mapping? */
- vm_len = vma->vm_end - vma->vm_start;
- if (vm_len >> PAGE_SHIFT > pages)
- return -EINVAL;
-
- /* Ok, let it rip */
- return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
-}
-EXPORT_SYMBOL(vm_iomap_memory);
-
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data)
diff --git a/trunk/net/batman-adv/main.c b/trunk/net/batman-adv/main.c
index 0488d70c8c35..fa563e497c48 100644
--- a/trunk/net/batman-adv/main.c
+++ b/trunk/net/batman-adv/main.c
@@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
-int batadv_is_my_mac(const uint8_t *addr)
+int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
{
const struct batadv_hard_iface *hard_iface;
@@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr)
if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
+ if (hard_iface->soft_iface != bat_priv->soft_iface)
+ continue;
+
if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
diff --git a/trunk/net/batman-adv/main.h b/trunk/net/batman-adv/main.h
index ced08b936a96..d40910dfc8ea 100644
--- a/trunk/net/batman-adv/main.h
+++ b/trunk/net/batman-adv/main.h
@@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
int batadv_mesh_init(struct net_device *soft_iface);
void batadv_mesh_free(struct net_device *soft_iface);
-int batadv_is_my_mac(const uint8_t *addr);
+int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
diff --git a/trunk/net/batman-adv/routing.c b/trunk/net/batman-adv/routing.c
index 5ee21cebbbb0..319f2906c71a 100644
--- a/trunk/net/batman-adv/routing.c
+++ b/trunk/net/batman-adv/routing.c
@@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
goto out;
/* not for me */
- if (!batadv_is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
goto out;
icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
@@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
}
/* packet for me */
- if (batadv_is_my_mac(icmp_packet->dst))
+ if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
@@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
return router;
}
-static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
+static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, int hdr_size)
{
struct ethhdr *ethhdr;
@@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
return -1;
/* not for me */
- if (!batadv_is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
return -1;
return 0;
@@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
char tt_flag;
size_t packet_size;
- if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
/* I could need to modify it */
@@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
case BATADV_TT_RESPONSE:
batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
- if (batadv_is_my_mac(tt_query->dst)) {
+ if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
/* packet needs to be linearized to access the TT
* changes
*/
@@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
struct batadv_roam_adv_packet *roam_adv_packet;
struct batadv_orig_node *orig_node;
- if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0)
+ if (batadv_check_unicast_packet(bat_priv, skb,
+ sizeof(*roam_adv_packet)) < 0)
goto out;
batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
- if (!batadv_is_my_mac(roam_adv_packet->dst))
+ if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
return batadv_route_unicast_packet(skb, recv_if);
/* check if it is a backbone gateway. we don't accept
@@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
* last time) the packet had an updated information or not
*/
curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
- if (!batadv_is_my_mac(unicast_packet->dest)) {
+ if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
orig_node = batadv_orig_hash_find(bat_priv,
unicast_packet->dest);
/* if it is not possible to find the orig_node representing the
@@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
- if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb))
return NET_RX_DROP;
/* packet for me */
- if (batadv_is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
if (is4addr) {
batadv_dat_inc_counter(bat_priv,
unicast_4addr_packet->subtype);
@@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
struct sk_buff *new_skb = NULL;
int ret;
- if (batadv_check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
return NET_RX_DROP;
if (!batadv_check_unicast_ttvn(bat_priv, skb))
@@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
/* packet for me */
- if (batadv_is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
@@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
goto out;
/* ignore broadcasts sent by myself */
- if (batadv_is_my_mac(ethhdr->h_source))
+ if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto out;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
- if (batadv_is_my_mac(bcast_packet->orig))
+ if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto out;
if (bcast_packet->header.ttl < 2)
@@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb,
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
- if (!batadv_is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
return NET_RX_DROP;
/* ignore own packets */
- if (batadv_is_my_mac(vis_packet->vis_orig))
+ if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
return NET_RX_DROP;
- if (batadv_is_my_mac(vis_packet->sender_orig))
+ if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
return NET_RX_DROP;
switch (vis_packet->vis_type) {
diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c
index 98a66a021a60..7abee19567e9 100644
--- a/trunk/net/batman-adv/translation-table.c
+++ b/trunk/net/batman-adv/translation-table.c
@@ -1953,7 +1953,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
bool batadv_send_tt_response(struct batadv_priv *bat_priv,
struct batadv_tt_query_packet *tt_request)
{
- if (batadv_is_my_mac(tt_request->dst)) {
+ if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
/* don't answer backbone gws! */
if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
return true;
diff --git a/trunk/net/batman-adv/vis.c b/trunk/net/batman-adv/vis.c
index c053244b97bd..6a1e646be96d 100644
--- a/trunk/net/batman-adv/vis.c
+++ b/trunk/net/batman-adv/vis.c
@@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
/* Are we the target for this VIS packet? */
if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
- batadv_is_my_mac(vis_packet->target_orig))
+ batadv_is_my_mac(bat_priv, vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis.hash_lock);
@@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
batadv_send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
- } else if (!batadv_is_my_mac(packet->target_orig)) {
+ } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
batadv_send_list_add(bat_priv, info);
}
diff --git a/trunk/net/openvswitch/datapath.c b/trunk/net/openvswitch/datapath.c
index 6980c3e6f066..a4b724708a1a 100644
--- a/trunk/net/openvswitch/datapath.c
+++ b/trunk/net/openvswitch/datapath.c
@@ -1593,8 +1593,10 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
return ERR_PTR(-ENOMEM);
retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
- BUG_ON(retval < 0);
-
+ if (retval < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(retval);
+ }
return skb;
}
@@ -1724,32 +1726,24 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
err = -EINVAL;
- reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!reply) {
- err = -ENOMEM;
- goto exit_unlock;
- }
-
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (err)
- goto exit_free;
-
+ goto exit_unlock;
if (a[OVS_VPORT_ATTR_UPCALL_PID])
vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
- err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
- info->snd_seq, 0, OVS_VPORT_CMD_NEW);
- BUG_ON(err < 0);
+ reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
+ OVS_VPORT_CMD_NEW);
+ if (IS_ERR(reply)) {
+ netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
+ ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+ goto exit_unlock;
+ }
genl_notify(reply, genl_info_net(info), info->snd_portid,
ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
- rtnl_unlock();
- return 0;
-
-exit_free:
- kfree_skb(reply);
exit_unlock:
rtnl_unlock();
return err;
diff --git a/trunk/net/openvswitch/flow.c b/trunk/net/openvswitch/flow.c
index 67a2b783fe70..fe0e4215c73d 100644
--- a/trunk/net/openvswitch/flow.c
+++ b/trunk/net/openvswitch/flow.c
@@ -795,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
- BUG_ON(table->count == 0);
hlist_del_rcu(&flow->hash_node[table->node_ver]);
table->count--;
+ BUG_ON(table->count < 0);
}
/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
diff --git a/trunk/sound/soc/codecs/wm5102.c b/trunk/sound/soc/codecs/wm5102.c
index 34d0201d6a78..b82bbf584146 100644
--- a/trunk/sound/soc/codecs/wm5102.c
+++ b/trunk/sound/soc/codecs/wm5102.c
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = w->codec;
- struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct arizona *arizona = dev_get_drvdata(codec->dev);
struct regmap *regmap = codec->control_data;
const struct reg_default *patch = NULL;
int i, patch_size;
diff --git a/trunk/sound/soc/codecs/wm8903.c b/trunk/sound/soc/codecs/wm8903.c
index f8a31ad0b203..134e41c870b9 100644
--- a/trunk/sound/soc/codecs/wm8903.c
+++ b/trunk/sound/soc/codecs/wm8903.c
@@ -1083,8 +1083,6 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
{ "ROP", NULL, "Right Speaker PGA" },
{ "RON", NULL, "Right Speaker PGA" },
- { "Charge Pump", NULL, "CLK_DSP" },
-
{ "Left Headphone Output PGA", NULL, "Charge Pump" },
{ "Right Headphone Output PGA", NULL, "Charge Pump" },
{ "Left Line Output PGA", NULL, "Charge Pump" },
diff --git a/trunk/sound/soc/samsung/i2s.c b/trunk/sound/soc/samsung/i2s.c
index 6bbeb0bf1a73..d7231e336a7c 100644
--- a/trunk/sound/soc/samsung/i2s.c
+++ b/trunk/sound/soc/samsung/i2s.c
@@ -972,7 +972,6 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = {
static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
{
struct i2s_dai *i2s;
- int ret;
i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL);
if (i2s == NULL)
@@ -997,18 +996,16 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
i2s->i2s_dai_drv.capture.channels_max = 2;
i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
- dev_set_drvdata(&i2s->pdev->dev, i2s);
} else { /* Create a new platform_device for Secondary */
- i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1);
+ i2s->pdev = platform_device_register_resndata(NULL,
+ "samsung-i2s-sec", -1, NULL, 0, NULL, 0);
if (IS_ERR(i2s->pdev))
return NULL;
-
- platform_set_drvdata(i2s->pdev, i2s);
- ret = platform_device_add(i2s->pdev);
- if (ret < 0)
- return NULL;
}
+ /* Pre-assign snd_soc_dai_set_drvdata */
+ dev_set_drvdata(&i2s->pdev->dev, i2s);
+
return i2s;
}
@@ -1110,10 +1107,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
if (samsung_dai_type == TYPE_SEC) {
sec_dai = dev_get_drvdata(&pdev->dev);
- if (!sec_dai) {
- dev_err(&pdev->dev, "Unable to get drvdata\n");
- return -EFAULT;
- }
snd_soc_register_dai(&sec_dai->pdev->dev,
&sec_dai->i2s_dai_drv);
asoc_dma_platform_register(&pdev->dev);
diff --git a/trunk/sound/soc/soc-compress.c b/trunk/sound/soc/soc-compress.c
index ed0bfb0ddb96..b5b3db71e253 100644
--- a/trunk/sound/soc/soc-compress.c
+++ b/trunk/sound/soc/soc-compress.c
@@ -211,27 +211,19 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
- goto err;
+ goto out;
}
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
ret = rtd->dai_link->compr_ops->set_params(cstream);
if (ret < 0)
- goto err;
+ goto out;
}
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
SND_SOC_DAPM_STREAM_START);
- /* cancel any delayed stream shutdown that is pending */
- rtd->pop_wait = 0;
- mutex_unlock(&rtd->pcm_mutex);
-
- cancel_delayed_work_sync(&rtd->delayed_work);
-
- return ret;
-
-err:
+out:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c
index ff4b45a5d796..507d251916af 100644
--- a/trunk/sound/soc/soc-core.c
+++ b/trunk/sound/soc/soc-core.c
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val = val << shift;
ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
- if (ret < 0)
+ if (ret != 0)
return ret;
if (snd_soc_volsw_is_stereo(mc)) {
diff --git a/trunk/sound/soc/tegra/tegra_pcm.c b/trunk/sound/soc/tegra/tegra_pcm.c
index 5e2c55c5b255..c925ab0adeb6 100644
--- a/trunk/sound/soc/tegra/tegra_pcm.c
+++ b/trunk/sound/soc/tegra/tegra_pcm.c
@@ -43,6 +43,8 @@
static const struct snd_pcm_hardware tegra_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels_min = 2,
@@ -125,6 +127,26 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_START);
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_STOP);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
@@ -142,7 +164,7 @@ static struct snd_pcm_ops tegra_pcm_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = tegra_pcm_hw_params,
.hw_free = tegra_pcm_hw_free,
- .trigger = snd_dmaengine_pcm_trigger,
+ .trigger = tegra_pcm_trigger,
.pointer = snd_dmaengine_pcm_pointer,
.mmap = tegra_pcm_mmap,
};
diff --git a/trunk/sound/usb/mixer_quirks.c b/trunk/sound/usb/mixer_quirks.c
index ebe91440a068..497d2741d119 100644
--- a/trunk/sound/usb/mixer_quirks.c
+++ b/trunk/sound/usb/mixer_quirks.c
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0, wIndex,
+ 0, cpu_to_le16(wIndex),
&tmp, sizeof(tmp), 1000);
up_read(&mixer->chip->shutdown_rwsem);
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
else
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- wValue, wIndex,
+ cpu_to_le16(wValue), cpu_to_le16(wIndex),
NULL, 0, 1000);
up_read(&mixer->chip->shutdown_rwsem);
diff --git a/trunk/sound/usb/quirks.c b/trunk/sound/usb/quirks.c
index 9c5ab22358b1..5325a3869bb7 100644
--- a/trunk/sound/usb/quirks.c
+++ b/trunk/sound/usb/quirks.c
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
{
int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 1, 0, NULL, 0, 1000);
+ cpu_to_le16(1), 0, NULL, 0, 1000);
if (ret < 0)
return ret;