diff --git a/[refs] b/[refs]
index 26ddc53bd150..fdd3eb53ea0b 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 42fed7ba44e4e8c1fb27b28ad14490cb1daff3c7
+refs/heads/master: 000a74f41e601bc4e36a760aa42f219a019c5391
diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl
index 7514dbf0a679..c36892c072da 100644
--- a/trunk/Documentation/DocBook/device-drivers.tmpl
+++ b/trunk/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
16x50 UART Driver
!Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250.c
+!Edrivers/tty/serial/8250/8250_core.c
diff --git a/trunk/Documentation/devicetree/bindings/video/via,vt8500-fb.txt b/trunk/Documentation/devicetree/bindings/video/via,vt8500-fb.txt
index c870b6478ec8..2871e218a0fb 100644
--- a/trunk/Documentation/devicetree/bindings/video/via,vt8500-fb.txt
+++ b/trunk/Documentation/devicetree/bindings/video/via,vt8500-fb.txt
@@ -5,58 +5,32 @@ Required properties:
- compatible : "via,vt8500-fb"
- reg : Should contain 1 register ranges(address and length)
- interrupts : framebuffer controller interrupt
-- display: a phandle pointing to the display node
+- bits-per-pixel : bit depth of framebuffer (16 or 32)
-Required nodes:
-- display: a display node is required to initialize the lcd panel
- This should be in the board dts.
-- default-mode: a videomode within the display with timing parameters
- as specified below.
+Required subnodes:
+- display-timings: see display-timing.txt for information
Example:
- fb@d800e400 {
+ fb@d8050800 {
compatible = "via,vt8500-fb";
reg = <0xd800e400 0x400>;
interrupts = <12>;
- display = <&display>;
- default-mode = <&mode0>;
- };
-
-VIA VT8500 Display
------------------------------------------------------
-Required properties (as per of_videomode_helper):
-
- - hactive, vactive: Display resolution
- - hfront-porch, hback-porch, hsync-len: Horizontal Display timing parameters
- in pixels
- vfront-porch, vback-porch, vsync-len: Vertical display timing parameters in
- lines
- - clock: displayclock in Hz
- - bpp: lcd panel bit-depth.
- <16> for RGB565, <32> for RGB888
-
-Optional properties (as per of_videomode_helper):
- - width-mm, height-mm: Display dimensions in mm
- - hsync-active-high (bool): Hsync pulse is active high
- - vsync-active-high (bool): Vsync pulse is active high
- - interlaced (bool): This is an interlaced mode
- - doublescan (bool): This is a doublescan mode
+ bits-per-pixel = <16>;
-Example:
- display: display@0 {
- modes {
- mode0: mode@0 {
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
hactive = <800>;
vactive = <480>;
- hback-porch = <88>;
hfront-porch = <40>;
+ hback-porch = <88>;
hsync-len = <0>;
vback-porch = <32>;
vfront-porch = <11>;
vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
};
};
};
+
diff --git a/trunk/Documentation/devicetree/bindings/video/wm,wm8505-fb.txt b/trunk/Documentation/devicetree/bindings/video/wm,wm8505-fb.txt
index 3d325e1d11ee..0bcadb2840a5 100644
--- a/trunk/Documentation/devicetree/bindings/video/wm,wm8505-fb.txt
+++ b/trunk/Documentation/devicetree/bindings/video/wm,wm8505-fb.txt
@@ -4,20 +4,30 @@ Wondermedia WM8505 Framebuffer
Required properties:
- compatible : "wm,wm8505-fb"
- reg : Should contain 1 register ranges(address and length)
-- via,display: a phandle pointing to the display node
+- bits-per-pixel : bit depth of framebuffer (16 or 32)
-Required nodes:
-- display: a display node is required to initialize the lcd panel
- This should be in the board dts. See definition in
- Documentation/devicetree/bindings/video/via,vt8500-fb.txt
-- default-mode: a videomode node as specified in
- Documentation/devicetree/bindings/video/via,vt8500-fb.txt
+Required subnodes:
+- display-timings: see display-timing.txt for information
Example:
- fb@d8050800 {
+ fb@d8051700 {
compatible = "wm,wm8505-fb";
- reg = <0xd8050800 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
+ reg = <0xd8051700 0x200>;
+ bits-per-pixel = <16>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
+ };
+ };
};
diff --git a/trunk/Documentation/ia64/err_inject.txt b/trunk/Documentation/ia64/err_inject.txt
index 223e4f0582d0..9f651c181429 100644
--- a/trunk/Documentation/ia64/err_inject.txt
+++ b/trunk/Documentation/ia64/err_inject.txt
@@ -882,7 +882,7 @@ int err_inj()
cpu=parameters[i].cpu;
k = cpu%64;
j = cpu/64;
- mask[j]=1<
@@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
edd= [EDD]
Format: {"off" | "on" | "skip[mbr]"}
+ efi_no_storage_paranoia [EFI; X86]
+ Using this parameter you can use more than 50% of
+ your efi variable storage. Use this parameter only if
+ you are really sure that your UEFI does sane gc and
+ fulfills the spec otherwise your board may brick.
+
eisa_irq_edge= [PARISC,HW]
See header of drivers/parisc/eisa.c.
diff --git a/trunk/Documentation/s390/s390dbf.txt b/trunk/Documentation/s390/s390dbf.txt
index ae66f9b90a25..fcaf0b4efba2 100644
--- a/trunk/Documentation/s390/s390dbf.txt
+++ b/trunk/Documentation/s390/s390dbf.txt
@@ -143,7 +143,8 @@ Parameter: id: handle for debug log
Return Value: none
-Description: frees memory for a debug log
+Description: frees memory for a debug log and removes all registered debug
+ views.
Must not be called within an interrupt handler
---------------------------------------------------------------------------
diff --git a/trunk/Documentation/scsi/LICENSE.qla2xxx b/trunk/Documentation/scsi/LICENSE.qla2xxx
index 27a91cf43d6d..5020b7b5a244 100644
--- a/trunk/Documentation/scsi/LICENSE.qla2xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2012 QLogic Corporation
+Copyright (c) 2003-2013 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.
diff --git a/trunk/Documentation/sound/alsa/ALSA-Configuration.txt b/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
index 4499bd948860..95731a08f257 100644
--- a/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
power_save - Automatic power-saving timeout (in second, 0 =
disable)
- power_save_controller - Support runtime D3 of HD-audio controller
- (-1 = on for supported chip (default), false = off,
- true = force to on even for unsupported hardware)
+ power_save_controller - Reset HD-audio controller in power-saving mode
+ (default = on)
align_buffer_size - Force rounding of buffer/period sizes to multiples
of 128 bytes. This is more efficient in terms of memory
access but isn't required by the HDA spec and prevents
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 74e58a4d035b..8bdd7a7ef2f4 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -4941,6 +4941,12 @@ W: logfs.org
S: Maintained
F: fs/logfs/
+LPC32XX MACHINE SUPPORT
+M: Roland Stigge
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-lpc32xx/
+
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Nagalakshmi Nandigama
M: Sreekanth Reddy
@@ -5065,9 +5071,8 @@ S: Maintained
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
-M: Dan Williams
L: libertas-dev@lists.infradead.org
-S: Maintained
+S: Orphan
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
@@ -5569,6 +5574,7 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETXEN (1/10) GbE SUPPORT
+M: Manish Chopra
M: Sony Chacko
M: Rajesh Borundia
L: netdev@vger.kernel.org
@@ -6625,7 +6631,7 @@ S: Supported
F: fs/reiserfs/
REGISTER MAP ABSTRACTION
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
S: Supported
F: drivers/base/regmap/
@@ -6951,7 +6957,6 @@ F: drivers/scsi/st*
SCTP PROTOCOL
M: Vlad Yasevich
-M: Sridhar Samudrala
M: Neil Horman
L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net
@@ -7374,7 +7379,7 @@ F: sound/
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
@@ -7463,7 +7468,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely
-M: Mark Brown
+M: Mark Brown
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -8708,7 +8713,7 @@ F: drivers/scsi/vmw_pvscsi.h
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
diff --git a/trunk/Makefile b/trunk/Makefile
index 58a165b02af1..8fe69916e726 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Unicycling Gorilla
# *DOCUMENTATION*
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
+ asm-generic
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
diff --git a/trunk/arch/alpha/Makefile b/trunk/arch/alpha/Makefile
index 4759fe751aa1..2cc3cc519c54 100644
--- a/trunk/arch/alpha/Makefile
+++ b/trunk/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
CHECKFLAGS += -D__alpha__ -m64
-cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
+cflags-y := -pipe -mno-fp-regs -ffixed-8
cflags-y += $(call cc-option, -fno-jump-tables)
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/trunk/arch/alpha/include/asm/floppy.h b/trunk/arch/alpha/include/asm/floppy.h
index 46cefbd50e73..bae97eb19d26 100644
--- a/trunk/arch/alpha/include/asm/floppy.h
+++ b/trunk/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
- IRQF_DISABLED, "floppy", NULL)
+ 0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#ifdef CONFIG_PCI
diff --git a/trunk/arch/alpha/kernel/irq.c b/trunk/arch/alpha/kernel/irq.c
index 2872accd2215..7b2be251c30f 100644
--- a/trunk/arch/alpha/kernel/irq.c
+++ b/trunk/arch/alpha/kernel/irq.c
@@ -117,13 +117,6 @@ handle_irq(int irq)
return;
}
- /*
- * From here we must proceed with IPL_MAX. Note that we do not
- * explicitly enable interrupts afterwards - some MILO PALcode
- * (namely LX164 one) seems to have severe problems with RTI
- * at IPL 0.
- */
- local_irq_disable();
irq_enter();
generic_handle_irq_desc(irq, desc);
irq_exit();
diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c
index 772ddfdb71a8..f433fc11877a 100644
--- a/trunk/arch/alpha/kernel/irq_alpha.c
+++ b/trunk/arch/alpha/kernel/irq_alpha.c
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
struct pt_regs *old_regs;
+
+ /*
+ * Disable interrupts during IRQ handling.
+ * Note that there is no matching local_irq_enable() due to
+ * severe problems with RTI at IPL0 and some MILO PALcode
+ * (namely LX164).
+ */
+ local_irq_disable();
switch (type) {
case 0:
#ifdef CONFIG_SMP
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
{
long cpu;
- local_irq_disable();
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED,
.name = "timer",
};
diff --git a/trunk/arch/alpha/kernel/sys_nautilus.c b/trunk/arch/alpha/kernel/sys_nautilus.c
index 4d4c046f708d..1383f8601a93 100644
--- a/trunk/arch/alpha/kernel/sys_nautilus.c
+++ b/trunk/arch/alpha/kernel/sys_nautilus.c
@@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
extern void free_reserved_mem(void *, void *);
extern void pcibios_claim_one_bus(struct pci_bus *);
+static struct resource irongate_io = {
+ .name = "Irongate PCI IO",
+ .flags = IORESOURCE_IO,
+};
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
@@ -209,6 +213,7 @@ nautilus_init_pci(void)
irongate = pci_get_bus_and_slot(0, 0);
bus->self = irongate;
+ bus->resource[0] = &irongate_io;
bus->resource[1] = &irongate_mem;
pci_bus_size_bridges(bus);
diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c
index 5cf4a481b8c5..a53cf03f49d5 100644
--- a/trunk/arch/alpha/kernel/sys_titan.c
+++ b/trunk/arch/alpha/kernel/sys_titan.c
@@ -280,15 +280,15 @@ titan_late_init(void)
* all reported to the kernel as machine checks, so the handler
* is a nop so it can be called to count the individual events.
*/
- titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(63+16, titan_intr_nop, 0,
"CChip Error", NULL);
- titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(62+16, titan_intr_nop, 0,
"PChip 0 H_Error", NULL);
- titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(61+16, titan_intr_nop, 0,
"PChip 1 H_Error", NULL);
- titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(60+16, titan_intr_nop, 0,
"PChip 0 C_Error", NULL);
- titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(59+16, titan_intr_nop, 0,
"PChip 1 C_Error", NULL);
/*
@@ -348,9 +348,9 @@ privateer_init_pci(void)
* Hook a couple of extra err interrupts that the
* common titan code won't.
*/
- titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(53+16, titan_intr_nop, 0,
"NMI", NULL);
- titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
+ titan_request_irq(50+16, titan_intr_nop, 0,
"Temperature Warning", NULL);
/*
diff --git a/trunk/arch/arc/include/asm/irqflags.h b/trunk/arch/arc/include/asm/irqflags.h
index ccd84806b62f..eac071668201 100644
--- a/trunk/arch/arc/include/asm/irqflags.h
+++ b/trunk/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "cc");
+ : "memory", "cc");
return flags;
}
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
__asm__ __volatile__(
" flag %0 \n"
:
- : "r"(flags));
+ : "r"(flags)
+ : "memory");
}
/*
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
+ : "memory");
}
/*
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
__asm__ __volatile__(
" lr %0, [status32] \n"
- : "=&r"(temp));
+ : "=&r"(temp)
+ :
+ : "memory");
return temp;
}
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 13b739469c51..1cacda426a0e 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
default 8
config IWMMXT
- bool "Enable iWMMXt support"
+ bool "Enable iWMMXt support" if !CPU_PJ4
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
- default y if PXA27x || PXA3xx || ARCH_MMP
+ default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
@@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420
to deadlock. This workaround puts DSB before executing ISB if
an abort may occur on cache maintenance.
+config ARM_ERRATA_798181
+ bool "ARM errata: TLBI/DSB failure on Cortex-A15"
+ depends on CPU_V7 && SMP
+ help
+ On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
+ adequately shooting down all use of the old entries. This
+ option enables the Linux kernel workaround for this erratum
+ which sends an IPI to the CPUs that are running the same ASID
+ as the one being invalidated.
+
endmenu
source "arch/arm/common/Kconfig"
diff --git a/trunk/arch/arm/boot/dts/armada-370-mirabox.dts b/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
index dd0c57dd9f30..3234875824dc 100644
--- a/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
};
mvsdio@d00d4000 {
- pinctrl-0 = <&sdio_pins2>;
+ pinctrl-0 = <&sdio_pins3>;
pinctrl-names = "default";
status = "okay";
/*
diff --git a/trunk/arch/arm/boot/dts/armada-370.dtsi b/trunk/arch/arm/boot/dts/armada-370.dtsi
index 8188d138020e..a195debb67d3 100644
--- a/trunk/arch/arm/boot/dts/armada-370.dtsi
+++ b/trunk/arch/arm/boot/dts/armada-370.dtsi
@@ -59,6 +59,12 @@
"mpp50", "mpp51", "mpp52";
marvell,function = "sd0";
};
+
+ sdio_pins3: sdio-pins3 {
+ marvell,pins = "mpp48", "mpp49", "mpp50",
+ "mpp51", "mpp52", "mpp53";
+ marvell,function = "sd0";
+ };
};
gpio0: gpio@d0018100 {
diff --git a/trunk/arch/arm/boot/dts/dbx5x0.dtsi b/trunk/arch/arm/boot/dts/dbx5x0.dtsi
index 9de93096601a..aaa63d0a8096 100644
--- a/trunk/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/trunk/arch/arm/boot/dts/dbx5x0.dtsi
@@ -191,8 +191,8 @@
prcmu: prcmu@80157000 {
compatible = "stericsson,db8500-prcmu";
- reg = <0x80157000 0x1000>;
- reg-names = "prcmu";
+ reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
+ reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
interrupts = <0 47 0x4>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/trunk/arch/arm/boot/dts/imx28-m28evk.dts b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
index 6ce3d17c3a29..fd36e1cca104 100644
--- a/trunk/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,7 +152,6 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
- clock-frequency = <400000>;
status = "okay";
sgtl5000: codec@0a {
diff --git a/trunk/arch/arm/boot/dts/imx28-sps1.dts b/trunk/arch/arm/boot/dts/imx28-sps1.dts
index e6cde8aa7fff..6c6a5442800a 100644
--- a/trunk/arch/arm/boot/dts/imx28-sps1.dts
+++ b/trunk/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,7 +70,6 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
- clock-frequency = <400000>;
status = "okay";
rtc: rtc@51 {
diff --git a/trunk/arch/arm/boot/dts/imx6qdl.dtsi b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
index 06ec460b4581..281a223591ff 100644
--- a/trunk/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,6 +91,7 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
interrupts = <1 13 0xf01>;
+ clocks = <&clks 15>;
};
L2: l2-cache@00a02000 {
diff --git a/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
index bd83b8fc7c83..c3573be7b92c 100644
--- a/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -77,6 +77,7 @@
};
nand@3000000 {
+ chip-delay = <40>;
status = "okay";
partition@0 {
diff --git a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 93c3afbef9ee..3694e94f6e99 100644
--- a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
- marvell,pins = "mpp44";
+ marvell,pins = "mpp46";
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
- marvell,pins = "mpp45";
+ marvell,pins = "mpp47";
marvell,function = "gpio";
};
@@ -157,14 +157,14 @@
gpios = <&gpio0 16 0>;
linux,default-trigger = "default-on";
};
- health_led1 {
+ rebuild_led {
+ label = "status:white:rebuild_led";
+ gpios = <&gpio1 4 0>;
+ };
+ health_led {
label = "status:red:health_led";
gpios = <&gpio1 5 0>;
};
- health_led2 {
- label = "status:white:health_led";
- gpios = <&gpio1 4 0>;
- };
backup_led {
label = "status:blue:backup_led";
gpios = <&gpio0 15 0>;
diff --git a/trunk/arch/arm/boot/dts/orion5x.dtsi b/trunk/arch/arm/boot/dts/orion5x.dtsi
index 8aad00f81ed9..f7bec3b1ba32 100644
--- a/trunk/arch/arm/boot/dts/orion5x.dtsi
+++ b/trunk/arch/arm/boot/dts/orion5x.dtsi
@@ -13,6 +13,9 @@
compatible = "marvell,orion5x";
interrupt-parent = <&intc>;
+ aliases {
+ gpio0 = &gpio0;
+ };
intc: interrupt-controller {
compatible = "marvell,orion-intc", "marvell,intc";
interrupt-controller;
@@ -32,7 +35,9 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0x10100 0x40>;
- ngpio = <32>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <6>, <7>, <8>, <9>;
};
@@ -91,7 +96,7 @@
reg = <0x90000 0x10000>,
<0xf2200000 0x800>;
reg-names = "regs", "sram";
- interrupts = <22>;
+ interrupts = <28>;
status = "okay";
};
};
diff --git a/trunk/arch/arm/boot/dts/vt8500-bv07.dts b/trunk/arch/arm/boot/dts/vt8500-bv07.dts
index 567cf4e8ab84..877b33afa7ed 100644
--- a/trunk/arch/arm/boot/dts/vt8500-bv07.dts
+++ b/trunk/arch/arm/boot/dts/vt8500-bv07.dts
@@ -11,26 +11,22 @@
/ {
model = "Benign BV07 Netbook";
+};
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
+&fb {
+ bits-per-pixel = <16>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
diff --git a/trunk/arch/arm/boot/dts/vt8500.dtsi b/trunk/arch/arm/boot/dts/vt8500.dtsi
index cf31ced46602..68c8dc644383 100644
--- a/trunk/arch/arm/boot/dts/vt8500.dtsi
+++ b/trunk/arch/arm/boot/dts/vt8500.dtsi
@@ -98,12 +98,10 @@
interrupts = <43>;
};
- fb@d800e400 {
+ fb: fb@d8050800 {
compatible = "via,vt8500-fb";
reg = <0xd800e400 0x400>;
interrupts = <12>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/trunk/arch/arm/boot/dts/wm8505-ref.dts b/trunk/arch/arm/boot/dts/wm8505-ref.dts
index fd4e248074c6..edd2cec3d37f 100644
--- a/trunk/arch/arm/boot/dts/wm8505-ref.dts
+++ b/trunk/arch/arm/boot/dts/wm8505-ref.dts
@@ -11,26 +11,22 @@
/ {
model = "Wondermedia WM8505 Netbook";
+};
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <32>; /* non-standard but required */
- };
+&fb {
+ bits-per-pixel = <32>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
diff --git a/trunk/arch/arm/boot/dts/wm8505.dtsi b/trunk/arch/arm/boot/dts/wm8505.dtsi
index e74a1c0fb9a2..bcf668d31b28 100644
--- a/trunk/arch/arm/boot/dts/wm8505.dtsi
+++ b/trunk/arch/arm/boot/dts/wm8505.dtsi
@@ -128,11 +128,9 @@
interrupts = <0>;
};
- fb@d8050800 {
+ fb: fb@d8050800 {
compatible = "wm,wm8505-fb";
reg = <0xd8050800 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/trunk/arch/arm/boot/dts/wm8650-mid.dts b/trunk/arch/arm/boot/dts/wm8650-mid.dts
index cefd938f842f..61671a0d9ede 100644
--- a/trunk/arch/arm/boot/dts/wm8650-mid.dts
+++ b/trunk/arch/arm/boot/dts/wm8650-mid.dts
@@ -11,26 +11,24 @@
/ {
model = "Wondermedia WM8650-MID Tablet";
+};
+
+&fb {
+ bits-per-pixel = <16>;
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
};
};
};
+
diff --git a/trunk/arch/arm/boot/dts/wm8650.dtsi b/trunk/arch/arm/boot/dts/wm8650.dtsi
index db3c0a12e052..9313407bbc30 100644
--- a/trunk/arch/arm/boot/dts/wm8650.dtsi
+++ b/trunk/arch/arm/boot/dts/wm8650.dtsi
@@ -128,11 +128,9 @@
interrupts = <43>;
};
- fb@d8050800 {
+ fb: fb@d8050800 {
compatible = "wm,wm8505-fb";
reg = <0xd8050800 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/trunk/arch/arm/boot/dts/wm8850-w70v2.dts b/trunk/arch/arm/boot/dts/wm8850-w70v2.dts
index fcc660c89540..32d22532cd6c 100644
--- a/trunk/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/trunk/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -15,28 +15,6 @@
/ {
model = "Wondermedia WM8850-W70v2 Tablet";
- /*
- * Display node is based on Sascha Hauer's patch on dri-devel.
- * Added a bpp property to calculate the size of the framebuffer
- * until the binding is formalized.
- */
- display: display@0 {
- modes {
- mode0: mode@0 {
- hactive = <800>;
- vactive = <480>;
- hback-porch = <88>;
- hfront-porch = <40>;
- hsync-len = <0>;
- vback-porch = <32>;
- vfront-porch = <11>;
- vsync-len = <1>;
- clock = <0>; /* unused but required */
- bpp = <16>; /* non-standard but required */
- };
- };
- };
-
backlight {
compatible = "pwm-backlight";
pwms = <&pwm 0 50000 1>; /* duty inverted */
@@ -45,3 +23,21 @@
default-brightness-level = <5>;
};
};
+
+&fb {
+ bits-per-pixel = <16>;
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: 800x480 {
+ clock-frequency = <0>; /* unused but required */
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <40>;
+ hback-porch = <88>;
+ hsync-len = <0>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ vsync-len = <1>;
+ };
+ };
+};
diff --git a/trunk/arch/arm/boot/dts/wm8850.dtsi b/trunk/arch/arm/boot/dts/wm8850.dtsi
index e8cbfdc87bba..7149cd13e3b9 100644
--- a/trunk/arch/arm/boot/dts/wm8850.dtsi
+++ b/trunk/arch/arm/boot/dts/wm8850.dtsi
@@ -135,11 +135,9 @@
};
};
- fb@d8051700 {
+ fb: fb@d8051700 {
compatible = "wm,wm8505-fb";
reg = <0xd8051700 0x200>;
- display = <&display>;
- default-mode = <&mode0>;
};
ge_rops@d8050400 {
diff --git a/trunk/arch/arm/include/asm/delay.h b/trunk/arch/arm/include/asm/delay.h
index 720799fd3a81..dff714d886d5 100644
--- a/trunk/arch/arm/include/asm/delay.h
+++ b/trunk/arch/arm/include/asm/delay.h
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
- bool const_clock;
+ unsigned long ticks_per_jiffy;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
diff --git a/trunk/arch/arm/include/asm/glue-cache.h b/trunk/arch/arm/include/asm/glue-cache.h
index cca9f15704ed..ea289e1435e7 100644
--- a/trunk/arch/arm/include/asm/glue-cache.h
+++ b/trunk/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
#undef _CACHE
#undef MULTI_CACHE
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-# define MULTI_CACHE 1
-# else
-# define _CACHE v3
-# endif
-#endif
-
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
diff --git a/trunk/arch/arm/include/asm/hardware/iop3xx.h b/trunk/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe2477..ed94b1a366ae 100644
--- a/trunk/arch/arm/include/asm/hardware/iop3xx.h
+++ b/trunk/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
* IOP3XX processor registers
*/
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/trunk/arch/arm/include/asm/highmem.h b/trunk/arch/arm/include/asm/highmem.h
index 8c5e828f484d..91b99abe7a95 100644
--- a/trunk/arch/arm/include/asm/highmem.h
+++ b/trunk/arch/arm/include/asm/highmem.h
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page);
#endif
#endif
+/*
+ * Needed to be able to broadcast the TLB invalidation for kmap.
+ */
+#ifdef CONFIG_ARM_ERRATA_798181
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#endif
+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
diff --git a/trunk/arch/arm/include/asm/mmu_context.h b/trunk/arch/arm/include/asm/mmu_context.h
index 863a6611323c..a7b85e0d0cc1 100644
--- a/trunk/arch/arm/include/asm/mmu_context.h
+++ b/trunk/arch/arm/include/asm/mmu_context.h
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+DECLARE_PER_CPU(atomic64_t, active_asids);
+
#else /* !CONFIG_CPU_HAS_ASID */
#ifdef CONFIG_MMU
diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h
index 6ef8afd1b64c..86b8fe398b95 100644
--- a/trunk/arch/arm/include/asm/pgtable-3level.h
+++ b/trunk/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/trunk/arch/arm/include/asm/tlbflush.h b/trunk/arch/arm/include/asm/tlbflush.h
index 4db8c8820f0d..ab865e65a84c 100644
--- a/trunk/arch/arm/include/asm/tlbflush.h
+++ b/trunk/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
#include
-#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
-#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
@@ -52,7 +50,6 @@
* =============
*
* We have the following to choose from:
- * v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
if (tlb_flag(TLB_WB))
dsb();
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
- tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_WB))
dsb();
- tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void)
isb();
}
+#ifdef CONFIG_ARM_ERRATA_798181
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+ /*
+ * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
+ */
+ asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+ dsb();
+}
+#else
+static inline void dummy_flush_tlb_a15_erratum(void)
+{
+}
+#endif
+
/*
* flush_pmd_entry
*
diff --git a/trunk/arch/arm/kernel/entry-common.S b/trunk/arch/arm/kernel/entry-common.S
index 3248cde504ed..fefd7f971437 100644
--- a/trunk/arch/arm/kernel/entry-common.S
+++ b/trunk/arch/arm/kernel/entry-common.S
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old)
*/
.macro mcount_enter
+/*
+ * This pad compensates for the push {lr} at the call site. Note that we are
+ * unable to unwind through a function which does not otherwise save its lr.
+ */
+ UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
+ UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old)
.endm
ENTRY(__gnu_mcount_nc)
+UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc)
#else
__mcount
#endif
+UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
+UNWIND(.fnstart)
__ftrace_caller
+UNWIND(.fnend)
ENDPROC(ftrace_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
+UNWIND(.fnstart)
__ftrace_graph_caller
+UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#endif
diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S
index e0eb9a1cae77..8bac553fe213 100644
--- a/trunk/arch/arm/kernel/head.S
+++ b/trunk/arch/arm/kernel/head.S
@@ -267,7 +267,7 @@ __create_page_tables:
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
-#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c
index 96093b75ab90..1fd749ee4a1b 100644
--- a/trunk/arch/arm/kernel/hw_breakpoint.c
+++ b/trunk/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
}
if (err) {
- pr_warning("CPU %d debug is powered down!\n", cpu);
+ pr_warn_once("CPU %d debug is powered down!\n", cpu);
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
return;
}
@@ -987,7 +987,7 @@ static void reset_ctrl_regs(void *unused)
isb();
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to disable vector catch\n", cpu);
+ pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
return;
}
@@ -1007,7 +1007,7 @@ static void reset_ctrl_regs(void *unused)
}
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
+ pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
return;
}
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+static struct notifier_block dbg_cpu_pm_nb = {
.notifier_call = dbg_cpu_pm_notify,
};
diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c
index 146157dfe27c..8c3094d0f7b7 100644
--- a/trunk/arch/arm/kernel/perf_event.c
+++ b/trunk/arch/arm/kernel/perf_event.c
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, event) >= 0;
diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c
index bd6f56b9ec21..59d2adb764a9 100644
--- a/trunk/arch/arm/kernel/sched_clock.c
+++ b/trunk/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
-static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
-static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
{
u64 epoch_ns;
u32 epoch_cyc;
diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c
index 3f6cbb2e3eda..234e339196c0 100644
--- a/trunk/arch/arm/kernel/setup.c
+++ b/trunk/arch/arm/kernel/setup.c
@@ -56,7 +56,6 @@
#include
#include "atags.h"
-#include "tcm.h"
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -353,6 +352,23 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+static void __init cpuid_init_hwcaps(void)
+{
+ unsigned int divide_instrs;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv7)
+ return;
+
+ divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
+
+ switch (divide_instrs) {
+ case 2:
+ elf_hwcap |= HWCAP_IDIVA;
+ case 1:
+ elf_hwcap |= HWCAP_IDIVT;
+ }
+}
+
static void __init feat_v6_fixup(void)
{
int id = read_cpuid_id();
@@ -483,8 +499,11 @@ static void __init setup_processor(void)
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
list->elf_name, ENDIANNESS);
elf_hwcap = list->elf_hwcap;
+
+ cpuid_init_hwcaps();
+
#ifndef CONFIG_ARM_THUMB
- elf_hwcap &= ~HWCAP_THUMB;
+ elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif
feat_v6_fixup();
@@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
-#ifndef CONFIG_LPAE
+#ifndef CONFIG_ARM_LPAE
if (bank->start + size < bank->start) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
@@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
- tcm_init();
-
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c
index 79078edbb9bc..1f2ccccaf009 100644
--- a/trunk/arch/arm/kernel/smp.c
+++ b/trunk/arch/arm/kernel/smp.c
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb,
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (arm_delay_ops.const_clock)
- return NOTIFY_OK;
-
if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/trunk/arch/arm/kernel/smp_tlb.c b/trunk/arch/arm/kernel/smp_tlb.c
index bd0300531399..e82e1d248772 100644
--- a/trunk/arch/arm/kernel/smp_tlb.c
+++ b/trunk/arch/arm/kernel/smp_tlb.c
@@ -12,6 +12,7 @@
#include
#include
+#include
/**********************************************************************/
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
+#ifdef CONFIG_ARM_ERRATA_798181
+static int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+#else
+static int erratum_a15_798181(void)
+{
+ return 0;
+}
+#endif
+
+static void ipi_flush_tlb_a15_erratum(void *arg)
+{
+ dmb();
+}
+
+static void broadcast_tlb_a15_erratum(void)
+{
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
+ NULL, 1);
+}
+
+static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
+{
+ int cpu;
+ cpumask_t mask = { CPU_BITS_NONE };
+
+ if (!erratum_a15_798181())
+ return;
+
+ dummy_flush_tlb_a15_erratum();
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are running
+ * the same ASID as the one being invalidated. There is no
+ * need for locking around the active_asids check since the
+ * switch_mm() function has at least one dmb() (as required by
+ * this workaround) in case a context switch happens on
+ * another CPU after the condition below.
+ */
+ if (atomic64_read(&mm->context.id) ==
+ atomic64_read(&per_cpu(active_asids, cpu)))
+ cpumask_set_cpu(cpu, &mask);
+ }
+ smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
+}
+
void flush_tlb_all(void)
{
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
local_flush_tlb_all();
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm)
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
+ broadcast_tlb_mm_a15_erratum(mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
&ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
local_flush_tlb_kernel_page(kaddr);
+ broadcast_tlb_a15_erratum();
}
void flush_tlb_range(struct vm_area_struct *vma,
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
&ta, 1);
} else
local_flush_tlb_range(vma, start, end);
+ broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
} else
local_flush_tlb_kernel_range(start, end);
+ broadcast_tlb_a15_erratum();
}
void flush_bp_all(void)
diff --git a/trunk/arch/arm/kernel/tcm.c b/trunk/arch/arm/kernel/tcm.c
index 30ae6bb4a310..f50f19e5c138 100644
--- a/trunk/arch/arm/kernel/tcm.c
+++ b/trunk/arch/arm/kernel/tcm.c
@@ -17,7 +17,6 @@
#include
#include
#include
-#include "tcm.h"
static struct gen_pool *tcm_pool;
static bool dtcm_present;
diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c
index 5a936988eb24..c1fe498983ac 100644
--- a/trunk/arch/arm/kvm/arm.c
+++ b/trunk/arch/arm/kvm/arm.c
@@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
+ break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c
index 4ea9a982269c..7bed7556077a 100644
--- a/trunk/arch/arm/kvm/coproc.c
+++ b/trunk/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
u32 val;
int cpu;
- cpu = get_cpu();
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
+ cpu = get_cpu();
+
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
diff --git a/trunk/arch/arm/kvm/vgic.c b/trunk/arch/arm/kvm/vgic.c
index c9a17316e9fe..0e4cfe123b38 100644
--- a/trunk/arch/arm/kvm/vgic.c
+++ b/trunk/arch/arm/kvm/vgic.c
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
lr, irq, vgic_cpu->vgic_lr[lr]);
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
-
- goto out;
+ return true;
}
/* Try to use another LR for this interrupt */
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vgic_cpu->vgic_irq_lr_map[irq] = lr;
set_bit(lr, vgic_cpu->lr_used);
-out:
if (!vgic_irq_is_edge(vcpu, irq))
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
- /*
- * We do not need to take the distributor lock here, since the only
- * action we perform is clearing the irq_active_bit for an EOIed
- * level interrupt. There is a potential race with
- * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
- * check if the interrupt is already active. Two possibilities:
- *
- * - The queuing is occurring on the same vcpu: cannot happen,
- * as we're already in the context of this vcpu, and
- * executing the handler
- * - The interrupt has been migrated to another vcpu, and we
- * ignore this interrupt for this run. Big deal. It is still
- * pending though, and will get considered when this vcpu
- * exits.
- */
if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
/*
* Some level interrupts have been EOIed. Clear their
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
} else {
vgic_cpu_irq_clear(vcpu, irq);
}
+
+ /*
+ * Despite being EOIed, the LR may not have
+ * been marked as empty.
+ */
+ set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
+ vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
}
}
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
/*
- * Sync back the VGIC state after a guest run. We do not really touch
- * the distributor here (the irq_pending_on_cpu bit is safe to set),
- * so there is no need for taking its lock.
+ * Sync back the VGIC state after a guest run. The distributor lock is
+ * needed so we don't get preempted in the middle of the state processing.
*/
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
if (!irqchip_in_kernel(vcpu->kvm))
return;
+ spin_lock(&dist->lock);
__kvm_vgic_sync_hwstate(vcpu);
+ spin_unlock(&dist->lock);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
diff --git a/trunk/arch/arm/lib/delay.c b/trunk/arch/arm/lib/delay.c
index 6b93f6a1a3c7..64dbfa57204a 100644
--- a/trunk/arch/arm/lib/delay.c
+++ b/trunk/arch/arm/lib/delay.c
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
static void __timer_const_udelay(unsigned long xloops)
{
unsigned long long loops = xloops;
- loops *= loops_per_jiffy;
+ loops *= arm_delay_ops.ticks_per_jiffy;
__timer_delay(loops >> UDELAY_SHIFT);
}
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
pr_info("Switching to timer-based delay loop\n");
delay_timer = timer;
lpj_fine = timer->freq / HZ;
- loops_per_jiffy = lpj_fine;
+
+ /* cpufreq may scale loops_per_jiffy, so keep a private copy */
+ arm_delay_ops.ticks_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
- arm_delay_ops.const_clock = true;
+
delay_calibrated = true;
} else {
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
diff --git a/trunk/arch/arm/mach-cns3xxx/core.c b/trunk/arch/arm/mach-cns3xxx/core.c
index e698f26cc0cb..52e4bb5cf12d 100644
--- a/trunk/arch/arm/mach-cns3xxx/core.c
+++ b/trunk/arch/arm/mach-cns3xxx/core.c
@@ -22,19 +22,9 @@
static struct map_desc cns3xxx_io_desc[] __initdata = {
{
- .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
- .length = SZ_4K,
- .type = MT_DEVICE,
- }, {
- .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
- .length = SZ_4K,
+ .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
+ .length = SZ_8K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
diff --git a/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
index 191c8e57f289..b1021aafa481 100644
--- a/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+++ b/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -94,10 +94,10 @@
#define RTC_INTR_STS_OFFSET 0x34
#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
-#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
-#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
+#define CNS3XXX_PM_BASE_VIRT 0xFB001000
#define PM_CLK_GATE_OFFSET 0x00
#define PM_SOFT_RST_OFFSET 0x04
@@ -109,7 +109,7 @@
#define PM_PLL_HM_PD_OFFSET 0x1C
#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
-#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
+#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
@@ -130,7 +130,7 @@
#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
-#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
#define TIMER1_COUNTER_OFFSET 0x00
#define TIMER1_AUTO_RELOAD_OFFSET 0x04
@@ -227,16 +227,16 @@
* Testchip peripheral and fpga gic regions
*/
#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
-#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
-#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
diff --git a/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h b/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
index d2afb4dd82ab..b5cc77d2380b 100644
--- a/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
static inline void putc(int c)
{
- /* Transmit fifo not full? */
- while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
- ;
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ /* Transmit fifo not full? */
+ if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
+ break;
+ }
__raw_writeb(c, PHYS_UART_DATA);
}
diff --git a/trunk/arch/arm/mach-highbank/hotplug.c b/trunk/arch/arm/mach-highbank/hotplug.c
index f30c52843396..890cae23c12a 100644
--- a/trunk/arch/arm/mach-highbank/hotplug.c
+++ b/trunk/arch/arm/mach-highbank/hotplug.c
@@ -28,13 +28,11 @@ extern void secondary_startup(void);
*/
void __ref highbank_cpu_die(unsigned int cpu)
{
- flush_cache_all();
-
highbank_set_cpu_jump(cpu, phys_to_virt(0));
- highbank_set_core_pwr();
- cpu_do_idle();
+ flush_cache_louis();
+ highbank_set_core_pwr();
- /* We should never return from idle */
- panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
+ while (1)
+ cpu_do_idle();
}
diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c
index e13a8fa5e62c..2193c834f55c 100644
--- a/trunk/arch/arm/mach-imx/clk-imx35.c
+++ b/trunk/arch/arm/mach-imx/clk-imx35.c
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+ clk_register_clkdev(clk[admux_gate], "audmux", NULL);
clk_prepare_enable(clk[spba_gate]);
clk_prepare_enable(clk[gpio1_gate]);
@@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
clk_prepare_enable(clk[max_gate]);
+ clk_prepare_enable(clk[iomuxc_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c
index 2f9ff93a4e61..d38e54f5b6d7 100644
--- a/trunk/arch/arm/mach-imx/clk-imx6q.c
+++ b/trunk/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
- clk_register_clkdev(clk[twd], NULL, "smp_twd");
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/trunk/arch/arm/mach-imx/common.h b/trunk/arch/arm/mach-imx/common.h
index 5a800bfcec5b..5bf4a97ab241 100644
--- a/trunk/arch/arm/mach-imx/common.h
+++ b/trunk/arch/arm/mach-imx/common.h
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *);
extern void imx_enable_cpu(int cpu, bool enable);
extern void imx_set_cpu_jump(int cpu, void *jump_addr);
+extern u32 imx_get_cpu_arg(int cpu);
+extern void imx_set_cpu_arg(int cpu, u32 arg);
extern void v7_cpu_resume(void);
extern u32 *pl310_get_save_ptr(void);
#ifdef CONFIG_SMP
diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c
index 7bc5fe15dda2..361a253e2b63 100644
--- a/trunk/arch/arm/mach-imx/hotplug.c
+++ b/trunk/arch/arm/mach-imx/hotplug.c
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void)
void imx_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
+ /*
+ * We use the cpu jumping argument register to sync with
+ * imx_cpu_kill() which is running on cpu0 and waiting for
+ * the register being cleared to kill the cpu.
+ */
+ imx_set_cpu_arg(cpu, ~0);
cpu_do_idle();
}
int imx_cpu_kill(unsigned int cpu)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+ while (imx_get_cpu_arg(cpu) == 0)
+ if (time_after(jiffies, timeout))
+ return 0;
imx_enable_cpu(cpu, false);
+ imx_set_cpu_arg(cpu, 0);
return 1;
}
diff --git a/trunk/arch/arm/mach-imx/src.c b/trunk/arch/arm/mach-imx/src.c
index e15f1555c59b..09a742f8c7ab 100644
--- a/trunk/arch/arm/mach-imx/src.c
+++ b/trunk/arch/arm/mach-imx/src.c
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
src_base + SRC_GPR1 + cpu * 8);
}
+u32 imx_get_cpu_arg(int cpu)
+{
+ cpu = cpu_logical_map(cpu);
+ return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
+void imx_set_cpu_arg(int cpu, u32 arg)
+{
+ cpu = cpu_logical_map(cpu);
+ writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
+}
+
void imx_src_prepare_restart(void)
{
u32 val;
diff --git a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index f655b2637b0e..e5f70415905a 100644
--- a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.duplex = DUPLEX_FULL,
};
+static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(11),
+};
+
void __init iomega_ix2_200_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
+ kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
+ kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
}
diff --git a/trunk/arch/arm/mach-kirkwood/guruplug-setup.c b/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
index 1c6e736cbbf8..08dd739aa709 100644
--- a/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = {
static struct mvsdio_platform_data guruplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
+ .gpio_card_detect = -1,
+ .gpio_write_protect = -1,
};
static struct gpio_led guruplug_led_pins[] = {
diff --git a/trunk/arch/arm/mach-kirkwood/openrd-setup.c b/trunk/arch/arm/mach-kirkwood/openrd-setup.c
index 8ddd69fdc937..6a6eb548307d 100644
--- a/trunk/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/openrd-setup.c
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = {
static struct mvsdio_platform_data openrd_mvsdio_data = {
.gpio_card_detect = 29, /* MPP29 used as SD card detect */
+ .gpio_write_protect = -1,
};
static unsigned int openrd_mpp_config[] __initdata = {
diff --git a/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c b/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
index c7d93b48926b..d24223166e06 100644
--- a/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
.gpio_card_detect = 28,
+ .gpio_write_protect = -1,
};
static unsigned int rd88f6281_mpp_config[] __initdata = {
diff --git a/trunk/arch/arm/mach-msm/timer.c b/trunk/arch/arm/mach-msm/timer.c
index 2969027f02fa..f9fd77e8f1f5 100644
--- a/trunk/arch/arm/mach-msm/timer.c
+++ b/trunk/arch/arm/mach-msm/timer.c
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
{
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
- writel_relaxed(0, event_base + TIMER_CLEAR);
+ ctrl &= ~TIMER_ENABLE_EN;
+ writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+
+ writel_relaxed(ctrl, event_base + TIMER_CLEAR);
writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
diff --git a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
index 274ff58271de..d5970f5a1e8d 100644
--- a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -44,6 +44,8 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
+#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
+
#define ACTIVE_DOORBELLS (8)
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain;
*/
static void armada_370_xp_irq_mask(struct irq_data *d)
{
-#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_SET_MASK_OFFS);
-#else
- writel(irqd_to_hwirq(d),
- per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
-#endif
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
-#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+ if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#else
- writel(irqd_to_hwirq(d),
- per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
-#endif
}
#ifdef CONFIG_SMP
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ writel(hw, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ else
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
- if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
+ if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
irq_set_percpu_devid(virq);
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_percpu_devid_irq);
diff --git a/trunk/arch/arm/mach-omap1/clock_data.c b/trunk/arch/arm/mach-omap1/clock_data.c
index cb7c6ae2e3fc..6c4f766365a2 100644
--- a/trunk/arch/arm/mach-omap1/clock_data.c
+++ b/trunk/arch/arm/mach-omap1/clock_data.c
@@ -538,15 +538,6 @@ static struct clk usb_hhc_ck16xx = {
};
static struct clk usb_dc_ck = {
- .name = "usb_dc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = USB_REQ_EN_SHIFT,
-};
-
-static struct clk usb_dc_ck7xx = {
.name = "usb_dc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = {
CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
diff --git a/trunk/arch/arm/mach-omap2/cclock44xx_data.c b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
index 3d58f335f173..0c6834ae1fc4 100644
--- a/trunk/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
@@ -52,6 +52,13 @@
*/
#define OMAP4_DPLL_ABE_DEFFREQ 98304000
+/*
+ * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
+ * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
+ * locked frequency for the USB DPLL is 960MHz.
+ */
+#define OMAP4_DPLL_USB_DEFFREQ 960000000
+
/* Root clocks */
DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
+DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
+ OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
+
DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
+ CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ /*
+ * Lock USB DPLL on OMAP4 devices so that the L3INIT power
+ * domain can transition to retention state when not in use.
+ */
+ rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
+ if (rc)
+ pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
return 0;
}
diff --git a/trunk/arch/arm/mach-omap2/common.h b/trunk/arch/arm/mach-omap2/common.h
index 40f4a03d728f..d6ba13e1c540 100644
--- a/trunk/arch/arm/mach-omap2/common.h
+++ b/trunk/arch/arm/mach-omap2/common.h
@@ -293,5 +293,8 @@ extern void omap_reserve(void);
struct omap_hwmod;
extern int omap_dss_reset(struct omap_hwmod *);
+/* SoC specific clock initializer */
+extern int (*omap_clk_init)(void);
+
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/trunk/arch/arm/mach-omap2/io.c b/trunk/arch/arm/mach-omap2/io.c
index 2c3fdd65387b..5c445ca1e271 100644
--- a/trunk/arch/arm/mach-omap2/io.c
+++ b/trunk/arch/arm/mach-omap2/io.c
@@ -54,6 +54,12 @@
#include "prm3xxx.h"
#include "prm44xx.h"
+/*
+ * omap_clk_init: points to a function that does the SoC-specific
+ * clock initializations
+ */
+int (*omap_clk_init)(void);
+
/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void)
omap242x_clockdomains_init();
omap2420_hwmod_init();
omap_hwmod_init_postsetup();
- omap2420_clk_init();
+ omap_clk_init = omap2420_clk_init;
}
void __init omap2420_init_late(void)
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void)
omap243x_clockdomains_init();
omap2430_hwmod_init();
omap_hwmod_init_postsetup();
- omap2430_clk_init();
+ omap_clk_init = omap2430_clk_init;
}
void __init omap2430_init_late(void)
@@ -462,7 +468,7 @@ void __init omap3_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap3xxx_clk_init();
+ omap_clk_init = omap3xxx_clk_init;
}
void __init omap3430_init_early(void)
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap3xxx_clk_init();
+ omap_clk_init = omap3xxx_clk_init;
}
void __init omap3_init_late(void)
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void)
am33xx_clockdomains_init();
am33xx_hwmod_init();
omap_hwmod_init_postsetup();
- am33xx_clk_init();
+ omap_clk_init = am33xx_clk_init;
}
#endif
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void)
omap44xx_clockdomains_init();
omap44xx_hwmod_init();
omap_hwmod_init_postsetup();
- omap4xxx_clk_init();
+ omap_clk_init = omap4xxx_clk_init;
}
void __init omap4430_init_late(void)
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c
index c2c798c08c2b..a202a4785104 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
idlemode = HWMOD_IDLEMODE_NO;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
+ (oh->flags & HWMOD_FORCE_MSTANDBY)) {
idlemode = HWMOD_IDLEMODE_FORCE;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.h b/trunk/arch/arm/mach-omap2/omap_hwmod.h
index d43d9b608eda..d5dc935f6060 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod.h
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod.h
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
*
* HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
* of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
- * of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
+ * out of standby, rather than relying on module smart-standby
* HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
* SDRAM controller, etc. XXX probably belongs outside the main hwmod file
* XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm {
* correctly, or this is being abused to deal with some PM latency
* issues -- but we're currently suffering from a shortage of
* folks who are able to track these issues down properly.
+ * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
+ * is kept in force-standby mode. Failing to do so causes PM problems
+ * with musb on OMAP3630 at least. Note that musb has a dedicated register
+ * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_16BIT_REG (1 << 8)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
#define HWMOD_BLOCK_WFI (1 << 10)
+#define HWMOD_FORCE_MSTANDBY (1 << 11)
/*
* omap_hwmod._int_flags definitions
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index ac7e03ec952f..5112d04e7b79 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
* Erratum ID: i479 idle_req / idle_ack mechanism potentially
* broken when autoidle is enabled
* workaround is to disable the autoidle bit at module level.
+ *
+ * Enabling the device in any other MIDLEMODE setting but force-idle
+ * causes core_pwrdm not enter idle states at least on OMAP3630.
+ * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
+ * signal when MIDLEMODE is set to force-idle.
*/
.flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
- | HWMOD_SWSUP_MSTANDBY,
+ | HWMOD_FORCE_MSTANDBY,
};
/* usb_otg_hs */
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 0e47d2e1687c..eaba9dc91a0d 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
.class = &omap44xx_ocp2scp_hwmod_class,
.clkdm_name = "l3_init_clkdm",
- .main_clk = "func_48m_fclk",
+ /*
+ * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP
+ * block as an "optional clock," and normally should never be
+ * specified as the main_clk for an OMAP IP block. However it
+ * turns out that this clock is actually the main clock for
+ * the ocp2scp_usb_phy IP block:
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html
+ * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems
+ * to be the best workaround.
+ */
+ .main_clk = "ocp2scp_usb_phy_phy_48m",
.prcm = {
.omap4 = {
.clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET,
diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c
index 2bdd4cf17a8f..f62b509ed08d 100644
--- a/trunk/arch/arm/mach-omap2/timer.c
+++ b/trunk/arch/arm/mach-omap2/timer.c
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void)
clksrc_nr, clksrc_src) \
void __init omap##name##_gptimer_timer_init(void) \
{ \
+ if (omap_clk_init) \
+ omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \
clksrc_nr, clksrc_src) \
void __init omap##name##_sync32k_timer_init(void) \
{ \
+ if (omap_clk_init) \
+ omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
/* Enable the use of clocksource="gp_timer" kernel parameter */ \
diff --git a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
index b7a9f4d469e8..1e73f5fa8659 100644
--- a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,10 +188,8 @@
#if defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
-#elif defined(CONFIG_CPU_S3C2443)
-#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
-#define NR_IRQS (IRQ_S3C2440_AC97+1)
+#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
#endif
/* compatibility define. */
diff --git a/trunk/arch/arm/mach-s3c24xx/irq.c b/trunk/arch/arm/mach-s3c24xx/irq.c
index cb9f5e011e73..d8ba9bee4c7e 100644
--- a/trunk/arch/arm/mach-s3c24xx/irq.c
+++ b/trunk/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
base = (void *)0xfd000000;
intc->reg_mask = base + 0xa4;
- intc->reg_pending = base + 0x08;
+ intc->reg_pending = base + 0xa8;
irq_num = 20;
irq_start = S3C2410_IRQ(32);
irq_offset = 4;
diff --git a/trunk/arch/arm/mach-ux500/board-mop500-sdi.c b/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
index 051b62c27102..7f2cb6c5e2c1 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
#endif
struct mmci_platform_data mop500_sdi0_data = {
- .ios_handler = mop500_sdi0_ios_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA |
diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c
index b03457881c4b..87d2d7b38ce9 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500.c
+++ b/trunk/arch/arm/mach-ux500/board-mop500.c
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev)
regulator_put(prox_regulator);
}
+void mop500_snowball_ethernet_clock_enable(void)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys("fsmc", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
static struct cryp_platform_data u8500_cryp1_platform_data = {
.mem_to_engine = {
.dir = STEDMA40_MEM_TO_PERIPH,
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void)
mop500_audio_init(parent);
mop500_uart_init(parent);
+ mop500_snowball_ethernet_clock_enable();
+
/* This board has full regulator constraints */
regulator_has_full_constraints();
}
diff --git a/trunk/arch/arm/mach-ux500/board-mop500.h b/trunk/arch/arm/mach-ux500/board-mop500.h
index eaa605f5d90d..d38951be70df 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500.h
+++ b/trunk/arch/arm/mach-ux500/board-mop500.h
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void);
void __init snowball_pinmaps_init(void);
void __init hrefv60_pinmaps_init(void);
void mop500_audio_init(struct device *parent);
+void mop500_snowball_ethernet_clock_enable(void);
int __init mop500_uib_init(void);
void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
diff --git a/trunk/arch/arm/mach-ux500/cpu-db8500.c b/trunk/arch/arm/mach-ux500/cpu-db8500.c
index 19235cf7bbe3..f1a581844372 100644
--- a/trunk/arch/arm/mach-ux500/cpu-db8500.c
+++ b/trunk/arch/arm/mach-ux500/cpu-db8500.c
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void)
/* Pinmaps must be in place before devices register */
if (of_machine_is_compatible("st-ericsson,mop500"))
mop500_pinmaps_init();
- else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
+ else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
snowball_pinmaps_init();
- else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+ mop500_snowball_ethernet_clock_enable();
+ } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
hrefv60_pinmaps_init();
else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
/* TODO: Add pinmaps for ccu9540 board. */
diff --git a/trunk/arch/arm/mm/Kconfig b/trunk/arch/arm/mm/Kconfig
index 025d17328730..4045c4931a30 100644
--- a/trunk/arch/arm/mm/Kconfig
+++ b/trunk/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V3 # although the core is v4t
+ select CPU_CACHE_V4
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -469,9 +469,6 @@ config CPU_PABRT_V7
bool
# The cache model
-config CPU_CACHE_V3
- bool
-
config CPU_CACHE_V4
bool
diff --git a/trunk/arch/arm/mm/Makefile b/trunk/arch/arm/mm/Makefile
index 4e333fa2756f..9e51be96f635 100644
--- a/trunk/arch/arm/mm/Makefile
+++ b/trunk/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
-obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/trunk/arch/arm/mm/cache-feroceon-l2.c b/trunk/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc3..48bc3c0a87ce 100644
--- a/trunk/arch/arm/mm/cache-feroceon-l2.c
+++ b/trunk/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
+ outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/trunk/arch/arm/mm/cache-l2x0.c b/trunk/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/trunk/arch/arm/mm/cache-l2x0.c
+++ b/trunk/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
- & L2X0_CACHE_ID_PART_MASK;
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
- .set_debug = pl310_set_debug,
},
};
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- l2x0_init(l2x0_base, aux_val, aux_mask);
-
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+ l2x0_init(l2x0_base, aux_val, aux_mask);
return 0;
}
diff --git a/trunk/arch/arm/mm/cache-v3.S b/trunk/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d3..000000000000
--- a/trunk/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/mm/cache-v3.S
- *
- * Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include
-#include
-#include
-#include "proc-macros.S"
-
-/*
- * flush_icache_all()
- *
- * Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
- mov pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- * flush_user_cache_all()
- *
- * Invalidate all cache entries in a particular address
- * space.
- *
- * - mm - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
- /* FALLTHROUGH */
-/*
- * flush_kern_cache_all()
- *
- * Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
- /* FALLTHROUGH */
-
-/*
- * flush_user_cache_range(start, end, flags)
- *
- * Invalidate a range of cache entries in the specified
- * address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
- mov ip, #0
- mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * coherent_kern_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
- /* FALLTHROUGH */
-
-/*
- * coherent_user_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_user_range)
- mov r0, #0
- mov pc, lr
-
-/*
- * flush_kern_dcache_area(void *page, size_t size)
- *
- * Ensure no D cache aliasing occurs, either with itself or
- * the I cache
- *
- * - addr - kernel address
- * - size - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * dma_flush_range(start, end)
- *
- * Clean and invalidate the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_flush_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
- teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
- /* FALLTHROUGH */
-
-/*
- * dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_map_area)
- mov pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
- .globl v3_flush_kern_cache_louis
- .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see and proc-macros.S)
- define_cache_functions v3
diff --git a/trunk/arch/arm/mm/cache-v4.S b/trunk/arch/arm/mm/cache-v4.S
index 43e5d77be677..a7ba68f59f0c 100644
--- a/trunk/arch/arm/mm/cache-v4.S
+++ b/trunk/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/trunk/arch/arm/mm/context.c b/trunk/arch/arm/mm/context.c
index a5a4b2bc42ba..2ac37372ef52 100644
--- a/trunk/arch/arm/mm/context.c
+++ b/trunk/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
+ dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c
index e95a996ab78f..a84ff763ac39 100644
--- a/trunk/arch/arm/mm/mmu.c
+++ b/trunk/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
#include
#include "mm.h"
+#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
+ tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/trunk/arch/arm/mm/proc-arm740.S b/trunk/arch/arm/mm/proc-arm740.S
index dc5de5d53f20..fde2d2a794cf 100644
--- a/trunk/arch/arm/mm/proc-arm740.S
+++ b/trunk/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ cmp r3, #0
+ moveq r0, #0
+ beq 2f
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
+ .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v3_cache_fns @ cache model
+ .long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/trunk/arch/arm/mm/proc-arm920.S b/trunk/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/trunk/arch/arm/mm/proc-arm920.S
+++ b/trunk/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-arm926.S b/trunk/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/trunk/arch/arm/mm/proc-arm926.S
+++ b/trunk/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-mohawk.S b/trunk/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/trunk/arch/arm/mm/proc-mohawk.S
+++ b/trunk/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-sa1100.S b/trunk/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/trunk/arch/arm/mm/proc-sa1100.S
+++ b/trunk/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/trunk/arch/arm/mm/proc-syms.c b/trunk/arch/arm/mm/proc-syms.c
index 3e6210b4d6d4..054b491ff764 100644
--- a/trunk/arch/arm/mm/proc-syms.c
+++ b/trunk/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/trunk/arch/arm/mm/proc-v6.S b/trunk/arch/arm/mm/proc-v6.S
index bcaaa8de9325..5c07ee4fe3eb 100644
--- a/trunk/arch/arm/mm/proc-v6.S
+++ b/trunk/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..f584d3f5b37c 100644
--- a/trunk/arch/arm/mm/proc-v7.S
+++ b/trunk/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,9 +430,24 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
+ /*
+ * Qualcomm Inc. Krait processors.
+ */
+ .type __krait_proc_info, #object
+__krait_proc_info:
+ .long 0x510f0400 @ Required ID value
+ .long 0xff0ffc00 @ Mask for ID
+ /*
+ * Some Krait processors don't indicate support for SDIV and UDIV
+ * instructions in the ARM instruction set, even though they actually
+ * do support them.
+ */
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ .size __krait_proc_info, . - __krait_proc_info
+
/*
* Match any ARMv7 processor core.
*/
diff --git a/trunk/arch/arm/mm/proc-xsc3.S b/trunk/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/trunk/arch/arm/mm/proc-xsc3.S
+++ b/trunk/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/trunk/arch/arm/mm/proc-xscale.S
+++ b/trunk/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/kernel/tcm.h b/trunk/arch/arm/mm/tcm.h
similarity index 100%
rename from trunk/arch/arm/kernel/tcm.h
rename to trunk/arch/arm/mm/tcm.h
diff --git a/trunk/arch/arm/plat-samsung/include/plat/fb.h b/trunk/arch/arm/plat-samsung/include/plat/fb.h
index b885322717a1..9ae507270785 100644
--- a/trunk/arch/arm/plat-samsung/include/plat/fb.h
+++ b/trunk/arch/arm/plat-samsung/include/plat/fb.h
@@ -15,55 +15,7 @@
#ifndef __PLAT_S3C_FB_H
#define __PLAT_S3C_FB_H __FILE__
-/* S3C_FB_MAX_WIN
- * Set to the maximum number of windows that any of the supported hardware
- * can use. Since the platform data uses this for an array size, having it
- * set to the maximum of any version of the hardware can do is safe.
- */
-#define S3C_FB_MAX_WIN (5)
-
-/**
- * struct s3c_fb_pd_win - per window setup data
- * @xres : The window X size.
- * @yres : The window Y size.
- * @virtual_x: The virtual X size.
- * @virtual_y: The virtual Y size.
- */
-struct s3c_fb_pd_win {
- unsigned short default_bpp;
- unsigned short max_bpp;
- unsigned short xres;
- unsigned short yres;
- unsigned short virtual_x;
- unsigned short virtual_y;
-};
-
-/**
- * struct s3c_fb_platdata - S3C driver platform specific information
- * @setup_gpio: Setup the external GPIO pins to the right state to transfer
- * the data from the display system to the connected display
- * device.
- * @vidcon0: The base vidcon0 values to control the panel data format.
- * @vidcon1: The base vidcon1 values to control the panel data output.
- * @vtiming: Video timing when connected to a RGB type panel.
- * @win: The setup data for each hardware window, or NULL for unused.
- * @display_mode: The LCD output display mode.
- *
- * The platform data supplies the video driver with all the information
- * it requires to work with the display(s) attached to the machine. It
- * controls the initial mode, the number of display windows (0 is always
- * the base framebuffer) that are initialised etc.
- *
- */
-struct s3c_fb_platdata {
- void (*setup_gpio)(void);
-
- struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
- struct fb_videomode *vtiming;
-
- u32 vidcon0;
- u32 vidcon1;
-};
+#include
/**
* s3c_fb_set_platdata() - Setup the FB device with platform data.
diff --git a/trunk/arch/avr32/include/asm/io.h b/trunk/arch/avr32/include/asm/io.h
index cf60d0a9f176..fc6483f83ccc 100644
--- a/trunk/arch/avr32/include/asm/io.h
+++ b/trunk/arch/avr32/include/asm/io.h
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
#define readw_be __raw_readw
#define readl_be __raw_readl
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+
#define writeb_be __raw_writeb
#define writew_be __raw_writew
#define writel_be __raw_writel
diff --git a/trunk/arch/c6x/include/asm/irqflags.h b/trunk/arch/c6x/include/asm/irqflags.h
index cf78e09e18c3..2c71d5634ec2 100644
--- a/trunk/arch/c6x/include/asm/irqflags.h
+++ b/trunk/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
+ asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
}
/* unconditionally enable interrupts */
diff --git a/trunk/arch/ia64/Kconfig b/trunk/arch/ia64/Kconfig
index 9a02f71c6b1f..e7e55a00f94f 100644
--- a/trunk/arch/ia64/Kconfig
+++ b/trunk/arch/ia64/Kconfig
@@ -187,7 +187,7 @@ config IA64_DIG
config IA64_DIG_VTD
bool "DIG+Intel+IOMMU"
- select DMAR
+ select INTEL_IOMMU
select PCI_MSI
config IA64_HP_ZX1
diff --git a/trunk/arch/ia64/include/asm/futex.h b/trunk/arch/ia64/include/asm/futex.h
index d2bf1fd5e44f..76acbcd5c060 100644
--- a/trunk/arch/ia64/include/asm/futex.h
+++ b/trunk/arch/ia64/include/asm/futex.h
@@ -106,16 +106,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8");
+ register unsigned long r8 __asm ("r8") = 0;
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov %0=r0 \n"
" mov ar.ccv=%4;; \n"
"[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (r8), "=r" (prev)
+ : "+r" (r8), "=&r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
diff --git a/trunk/arch/ia64/include/asm/mca.h b/trunk/arch/ia64/include/asm/mca.h
index 43f96ab18fa0..8c7096168716 100644
--- a/trunk/arch/ia64/include/asm/mca.h
+++ b/trunk/arch/ia64/include/asm/mca.h
@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector;
extern int ia64_cpe_irq;
extern void ia64_mca_init(void);
+extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void);
diff --git a/trunk/arch/ia64/include/asm/numa.h b/trunk/arch/ia64/include/asm/numa.h
index 2e27ef175652..2db0a6c6daa5 100644
--- a/trunk/arch/ia64/include/asm/numa.h
+++ b/trunk/arch/ia64/include/asm/numa.h
@@ -67,14 +67,13 @@ extern int paddr_to_nid(unsigned long paddr);
extern void map_cpu_to_node(int cpu, int nid);
extern void unmap_cpu_from_node(int cpu, int nid);
-
+extern void numa_clear_node(int cpu);
#else /* !CONFIG_NUMA */
#define map_cpu_to_node(cpu, nid) do{}while(0)
#define unmap_cpu_from_node(cpu, nid) do{}while(0)
-
#define paddr_to_nid(addr) 0
-
+#define numa_clear_node(cpu) do { } while (0)
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */
diff --git a/trunk/arch/ia64/kernel/fsys.S b/trunk/arch/ia64/kernel/fsys.S
index c4cd45d97749..abc6dee3799c 100644
--- a/trunk/arch/ia64/kernel/fsys.S
+++ b/trunk/arch/ia64/kernel/fsys.S
@@ -90,53 +90,6 @@ ENTRY(fsys_getpid)
FSYS_RETURN
END(fsys_getpid)
-ENTRY(fsys_getppid)
- .prologue
- .altrp b6
- .body
- add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
- ;;
- ld8 r17=[r17] // r17 = current->group_leader
- add r9=TI_FLAGS+IA64_TASK_SIZE,r16
- ;;
-
- ld4 r9=[r9]
- add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = ¤t->group_leader->real_parent
- ;;
- and r9=TIF_ALLWORK_MASK,r9
-
-1: ld8 r18=[r17] // r18 = current->group_leader->real_parent
- ;;
- cmp.ne p8,p0=0,r9
- add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = ¤t->group_leader->real_parent->tgid
- ;;
-
- /*
- * The .acq is needed to ensure that the read of tgid has returned its data before
- * we re-check "real_parent".
- */
- ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
-#ifdef CONFIG_SMP
- /*
- * Re-read current->group_leader->real_parent.
- */
- ld8 r19=[r17] // r19 = current->group_leader->real_parent
-(p8) br.spnt.many fsys_fallback_syscall
- ;;
- cmp.ne p6,p0=r18,r19 // did real_parent change?
- mov r19=0 // i must not leak kernel bits...
-(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
- ;;
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
-#else
- mov r17=0 // i must not leak kernel bits...
- mov r18=0 // i must not leak kernel bits...
- mov r19=0 // i must not leak kernel bits...
-#endif
- FSYS_RETURN
-END(fsys_getppid)
-
ENTRY(fsys_set_tid_address)
.prologue
.altrp b6
@@ -614,7 +567,7 @@ paravirt_fsyscall_table:
data8 0 // chown
data8 0 // lseek // 1040
data8 fsys_getpid // getpid
- data8 fsys_getppid // getppid
+ data8 0 // getppid
data8 0 // mount
data8 0 // umount
data8 0 // setuid // 1045
diff --git a/trunk/arch/ia64/kernel/iosapic.c b/trunk/arch/ia64/kernel/iosapic.c
index ee33c3aaa2fc..19f107be734e 100644
--- a/trunk/arch/ia64/kernel/iosapic.c
+++ b/trunk/arch/ia64/kernel/iosapic.c
@@ -76,7 +76,7 @@
* PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ
*
* Note: The term "IRQ" is loosely used everywhere in Linux kernel to
- * describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
+ * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
* (isa_irq) is the only exception in this source code.
*/
@@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)
return 0;
}
+static int
+iosapic_delete_rte(unsigned int irq, unsigned int gsi)
+{
+ struct iosapic_rte_info *rte, *temp;
+
+ list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes,
+ rte_list) {
+ if (rte->iosapic->gsi_base + rte->rte_index == gsi) {
+ if (rte->refcnt)
+ return -EBUSY;
+
+ list_del(&rte->rte_list);
+ kfree(rte);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
{
int num_rte, err, index;
@@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)
int iosapic_remove(unsigned int gsi_base)
{
- int index, err = 0;
+ int i, irq, index, err = 0;
unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags);
@@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)
goto out;
}
+ for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) {
+ irq = __gsi_to_irq(i);
+ if (irq < 0)
+ continue;
+
+ err = iosapic_delete_rte(irq, i);
+ if (err)
+ goto out;
+ }
+
iounmap(iosapic_lists[index].addr);
iosapic_free(index);
out:
diff --git a/trunk/arch/ia64/kernel/irq.c b/trunk/arch/ia64/kernel/irq.c
index ad69606613eb..f2c418281130 100644
--- a/trunk/arch/ia64/kernel/irq.c
+++ b/trunk/arch/ia64/kernel/irq.c
@@ -23,6 +23,8 @@
#include
#include
+#include
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */
+int __init arch_early_irq_init(void)
+{
+ ia64_mca_irq_init();
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS];
diff --git a/trunk/arch/ia64/kernel/mca.c b/trunk/arch/ia64/kernel/mca.c
index 65bf9cd39044..d7396dbb07bb 100644
--- a/trunk/arch/ia64/kernel/mca.c
+++ b/trunk/arch/ia64/kernel/mca.c
@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n");
}
+
/*
- * ia64_mca_late_init
- *
- * Opportunity to setup things that require initialization later
- * than ia64_mca_init. Setup a timer to poll for CPEs if the
- * platform doesn't support an interrupt driven mechanism.
- *
- * Inputs : None
- * Outputs : Status
+ * These pieces cannot be done in ia64_mca_init() because it is called before
+ * early_irq_init() which would wipe out our percpu irq registrations. But we
+ * cannot leave them until ia64_mca_late_init() because by then all the other
+ * processors have been brought online and have set their own CMC vectors to
+ * point at a non-existant action. Called from arch_early_irq_init().
*/
-static int __init
-ia64_mca_late_init(void)
+void __init ia64_mca_irq_init(void)
{
- if (!mca_init)
- return 0;
-
/*
* Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif
+}
+
+/*
+ * ia64_mca_late_init
+ *
+ * Opportunity to setup things that require initialization later
+ * than ia64_mca_init. Setup a timer to poll for CPEs if the
+ * platform doesn't support an interrupt driven mechanism.
+ *
+ * Inputs : None
+ * Outputs : Status
+ */
+static int __init
+ia64_mca_late_init(void)
+{
+ if (!mca_init)
+ return 0;
register_hotcpu_notifier(&mca_cpu_notifier);
diff --git a/trunk/arch/ia64/kernel/mca_drv.c b/trunk/arch/ia64/kernel/mca_drv.c
index 9392e021c93b..94f8bf777afa 100644
--- a/trunk/arch/ia64/kernel/mca_drv.c
+++ b/trunk/arch/ia64/kernel/mca_drv.c
@@ -349,7 +349,7 @@ init_record_index_pools(void)
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
- slidx_pool.buffer = (slidx_list_t *)
+ slidx_pool.buffer =
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
diff --git a/trunk/arch/ia64/kernel/palinfo.c b/trunk/arch/ia64/kernel/palinfo.c
index 77597e5ea60a..79521d5499f9 100644
--- a/trunk/arch/ia64/kernel/palinfo.c
+++ b/trunk/arch/ia64/kernel/palinfo.c
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
-/*
- * this array is used to keep track of the proc entries we create. This is
- * required in the module mode when we need to remove all entries. The procfs code
- * does not do recursion of deletion
- *
- * Notes:
- * - +1 accounts for the cpuN directory entry in /proc/pal
- */
-#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
-
-static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
static struct proc_dir_entry *palinfo_dir;
/*
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
static void __cpuinit
create_palinfo_proc_entries(unsigned int cpu)
{
-# define CPUSTR "cpu%d"
-
pal_func_cpu_u_t f;
- struct proc_dir_entry **pdir;
struct proc_dir_entry *cpu_dir;
int j;
- char cpustr[sizeof(CPUSTR)];
-
-
- /*
- * we keep track of created entries in a depth-first order for
- * cleanup purposes. Each entry is stored into palinfo_proc_entries
- */
- sprintf(cpustr,CPUSTR, cpu);
+ char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+ sprintf(cpustr, "cpu%d", cpu);
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
+ if (!cpu_dir)
+ return;
f.req_cpu = cpu;
- /*
- * Compute the location to store per cpu entries
- * We dont store the top level entry in this list, but
- * remove it finally after removing all cpu entries.
- */
- pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
- *pdir++ = cpu_dir;
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
f.func_id = j;
- *pdir = create_proc_read_entry(
- palinfo_entries[j].name, 0, cpu_dir,
- palinfo_read_entry, (void *)f.value);
- pdir++;
+ create_proc_read_entry(
+ palinfo_entries[j].name, 0, cpu_dir,
+ palinfo_read_entry, (void *)f.value);
}
}
static void
remove_palinfo_proc_entries(unsigned int hcpu)
{
- int j;
- struct proc_dir_entry *cpu_dir, **pdir;
-
- pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
- cpu_dir = *pdir;
- *pdir++=NULL;
- for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
- if ((*pdir)) {
- remove_proc_entry ((*pdir)->name, cpu_dir);
- *pdir ++= NULL;
- }
- }
-
- if (cpu_dir) {
- remove_proc_entry(cpu_dir->name, palinfo_dir);
- }
+ char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+ sprintf(cpustr, "cpu%d", hcpu);
+ remove_proc_subtree(cpustr, palinfo_dir);
}
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1058,6 +1019,8 @@ palinfo_init(void)
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
palinfo_dir = proc_mkdir("pal", NULL);
+ if (!palinfo_dir)
+ return -ENOMEM;
/* Create palinfo dirs in /proc for all online cpus */
for_each_online_cpu(i) {
@@ -1073,22 +1036,8 @@ palinfo_init(void)
static void __exit
palinfo_exit(void)
{
- int i = 0;
-
- /* remove all nodes: depth first pass. Could optimize this */
- for_each_online_cpu(i) {
- remove_palinfo_proc_entries(i);
- }
-
- /*
- * Remove the top level entry finally
- */
- remove_proc_entry(palinfo_dir->name, NULL);
-
- /*
- * Unregister from cpu notifier callbacks
- */
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
+ remove_proc_subtree("pal", NULL);
}
module_init(palinfo_init);
diff --git a/trunk/arch/ia64/kvm/vtlb.c b/trunk/arch/ia64/kvm/vtlb.c
index 4332f7ee5203..a7869f8f49a6 100644
--- a/trunk/arch/ia64/kvm/vtlb.c
+++ b/trunk/arch/ia64/kvm/vtlb.c
@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"srlz.d;;"
"ssm psr.i;;"
"srlz.d;;"
- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret;
}
diff --git a/trunk/arch/ia64/mm/ioremap.c b/trunk/arch/ia64/mm/ioremap.c
index 3dccdd8eb275..43964cde6214 100644
--- a/trunk/arch/ia64/mm/ioremap.c
+++ b/trunk/arch/ia64/mm/ioremap.c
@@ -16,7 +16,7 @@
#include
static inline void __iomem *
-__ioremap (unsigned long phys_addr)
+__ioremap_uc(unsigned long phys_addr)
{
return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
@@ -24,7 +24,11 @@ __ioremap (unsigned long phys_addr)
void __iomem *
early_ioremap (unsigned long phys_addr, unsigned long size)
{
- return __ioremap(phys_addr);
+ u64 attr;
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return (void __iomem *) phys_to_virt(phys_addr);
+ return __ioremap_uc(phys_addr);
}
void __iomem *
@@ -47,7 +51,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
if (attr & EFI_MEMORY_WB)
return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC)
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
/*
* Some chipsets don't support UC access to memory. If
@@ -93,7 +97,7 @@ ioremap (unsigned long phys_addr, unsigned long size)
return (void __iomem *) (offset + (char __iomem *)addr);
}
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap);
@@ -103,7 +107,7 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
- return __ioremap(phys_addr);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/trunk/arch/ia64/mm/numa.c b/trunk/arch/ia64/mm/numa.c
index 3efea7d0a351..def782e31aac 100644
--- a/trunk/arch/ia64/mm/numa.c
+++ b/trunk/arch/ia64/mm/numa.c
@@ -73,6 +73,11 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
return -1;
}
+void __cpuinit numa_clear_node(int cpu)
+{
+ unmap_cpu_from_node(cpu, NUMA_NO_NODE);
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* SRAT information is stored in node_memblk[], then we can use SRAT
diff --git a/trunk/arch/ia64/pci/pci.c b/trunk/arch/ia64/pci/pci.c
index 60532ab27346..de1474ff0bc5 100644
--- a/trunk/arch/ia64/pci/pci.c
+++ b/trunk/arch/ia64/pci/pci.c
@@ -15,6 +15,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -458,6 +459,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
platform_pci_fixup_bus(b);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
void pcibios_set_master (struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/trunk/arch/ia64/sn/kernel/tiocx.c b/trunk/arch/ia64/sn/kernel/tiocx.c
index 14c1711238c0..e35f6485c1fd 100644
--- a/trunk/arch/ia64/sn/kernel/tiocx.c
+++ b/trunk/arch/ia64/sn/kernel/tiocx.c
@@ -490,11 +490,14 @@ static int __init tiocx_init(void)
{
cnodeid_t cnodeid;
int found_tiocx_device = 0;
+ int err;
if (!ia64_platform_is("sn2"))
return 0;
- bus_register(&tiocx_bus_type);
+ err = bus_register(&tiocx_bus_type);
+ if (err)
+ return err;
for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) {
nasid_t nasid;
diff --git a/trunk/arch/m68k/include/asm/gpio.h b/trunk/arch/m68k/include/asm/gpio.h
index 4395ffc51fdb..8cc83431805b 100644
--- a/trunk/arch/m68k/include/asm/gpio.h
+++ b/trunk/arch/m68k/include/asm/gpio.h
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
+static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+ int err;
+
+ err = gpio_request(gpio, label);
+ if (err)
+ return err;
+
+ if (flags & GPIOF_DIR_IN)
+ err = gpio_direction_input(gpio);
+ else
+ err = gpio_direction_output(gpio,
+ (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+ if (err)
+ gpio_free(gpio);
+
+ return err;
+}
+
#endif
diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig
index cd2e21ff562a..51244bf97271 100644
--- a/trunk/arch/mips/Kconfig
+++ b/trunk/arch/mips/Kconfig
@@ -18,7 +18,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -657,7 +657,7 @@ config SNI_RM
bool "SNI RM200/300/400"
select FW_ARC if CPU_LITTLE_ENDIAN
select FW_ARC32 if CPU_LITTLE_ENDIAN
- select SNIPROM if CPU_BIG_ENDIAN
+ select FW_SNIPROM if CPU_BIG_ENDIAN
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
select CEVT_R4K
@@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION
config FW_ARC32
bool
-config SNIPROM
+config FW_SNIPROM
bool
config BOOT_ELF32
@@ -1493,7 +1493,6 @@ config CPU_XLP
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
- select CPU_HAS_LLSC
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
diff --git a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
index ed1949c29508..9aa7d44898ed 100644
--- a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -745,10 +745,7 @@ void __init board_prom_init(void)
strcpy(cfe_version, "unknown");
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
- if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
- printk(KERN_ERR PFX "invalid nvram checksum\n");
- return;
- }
+ bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
board_name = bcm63xx_nvram_get_name();
/* find board by name */
diff --git a/trunk/arch/mips/bcm63xx/nvram.c b/trunk/arch/mips/bcm63xx/nvram.c
index 620611680839..a4b8864f9307 100644
--- a/trunk/arch/mips/bcm63xx/nvram.c
+++ b/trunk/arch/mips/bcm63xx/nvram.c
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
static struct bcm963xx_nvram nvram;
static int mac_addr_used;
-int __init bcm63xx_nvram_init(void *addr)
+void __init bcm63xx_nvram_init(void *addr)
{
unsigned int check_len;
u32 crc, expected_crc;
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr)
crc = crc32_le(~0, (u8 *)&nvram, check_len);
if (crc != expected_crc)
- return -EINVAL;
-
- return 0;
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, crc);
}
u8 *bcm63xx_nvram_get_name(void)
diff --git a/trunk/arch/mips/bcm63xx/setup.c b/trunk/arch/mips/bcm63xx/setup.c
index 314231be788c..35e18e98beb9 100644
--- a/trunk/arch/mips/bcm63xx/setup.c
+++ b/trunk/arch/mips/bcm63xx/setup.c
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
return board_register_devices();
}
-device_initcall(bcm63xx_register_devices);
+arch_initcall(bcm63xx_register_devices);
diff --git a/trunk/arch/mips/cavium-octeon/setup.c b/trunk/arch/mips/cavium-octeon/setup.c
index c594a3d4f743..b0baa299f899 100644
--- a/trunk/arch/mips/cavium-octeon/setup.c
+++ b/trunk/arch/mips/cavium-octeon/setup.c
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image)
static void octeon_generic_shutdown(void)
{
- int cpu, i;
+ int i;
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
struct cvmx_bootmem_desc *bootmem_desc;
void *named_block_array_ptr;
diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
index 62d6a3b4d3b7..4e0b6bc1165e 100644
--- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
+++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -9,10 +9,8 @@
*
* Initialized the local nvram copy from the target address and checks
* its checksum.
- *
- * Returns 0 on success.
*/
-int __init bcm63xx_nvram_init(void *nvram);
+void bcm63xx_nvram_init(void *nvram);
/**
* bcm63xx_nvram_get_name() - returns the board name according to nvram
diff --git a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index d9c828419037..193c0912d38e 100644
--- a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,11 +28,7 @@
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
-#ifdef CONFIG_CPU_HAS_LLSC
#define cpu_has_llsc 1
-#else
-#define cpu_has_llsc 0
-#endif
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/trunk/arch/mips/include/asm/mipsregs.h b/trunk/arch/mips/include/asm/mipsregs.h
index 12b70c25906a..0da44d422f5b 100644
--- a/trunk/arch/mips/include/asm/mipsregs.h
+++ b/trunk/arch/mips/include/asm/mipsregs.h
@@ -1166,7 +1166,10 @@ do { \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
+ " .set push \n" \
+ " .set dsp \n" \
" rddsp %0, %x1 \n" \
+ " .set pop \n" \
: "=r" (__dspctl) \
: "i" (mask)); \
__dspctl; \
@@ -1175,30 +1178,198 @@ do { \
#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
+ " .set push \n" \
+ " .set dsp \n" \
" wrdsp %0, %x1 \n" \
+ " .set pop \n" \
: \
: "r" (val), "i" (mask)); \
} while (0)
-#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
-#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
-#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
-#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
-
-#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
-#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
-#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
-#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
-
-#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
-#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
-#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
-#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
-
-#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
-#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
-#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
-#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
+#define mflo0() \
+({ \
+ long mflo0; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mflo0)); \
+ mflo0; \
+})
+
+#define mflo1() \
+({ \
+ long mflo1; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mflo1)); \
+ mflo1; \
+})
+
+#define mflo2() \
+({ \
+ long mflo2; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mflo2)); \
+ mflo2; \
+})
+
+#define mflo3() \
+({ \
+ long mflo3; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mflo %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mflo3)); \
+ mflo3; \
+})
+
+#define mfhi0() \
+({ \
+ long mfhi0; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac0 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi0)); \
+ mfhi0; \
+})
+
+#define mfhi1() \
+({ \
+ long mfhi1; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac1 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi1)); \
+ mfhi1; \
+})
+
+#define mfhi2() \
+({ \
+ long mfhi2; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac2 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi2)); \
+ mfhi2; \
+})
+
+#define mfhi3() \
+({ \
+ long mfhi3; \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mfhi %0, $ac3 \n" \
+ " .set pop \n" \
+ : "=r" (mfhi3)); \
+ mfhi3; \
+})
+
+
+#define mtlo0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mtlo3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mtlo %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi0(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac0 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi1(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac1 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi2(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac2 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
+
+#define mthi3(x) \
+({ \
+ __asm__( \
+ " .set push \n" \
+ " .set dsp \n" \
+ " mthi %0, $ac3 \n" \
+ " .set pop \n" \
+ : \
+ : "r" (x)); \
+})
#else
diff --git a/trunk/arch/mips/include/asm/page.h b/trunk/arch/mips/include/asm/page.h
index 99fc547af9d3..eab99e536b5c 100644
--- a/trunk/arch/mips/include/asm/page.h
+++ b/trunk/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
diff --git a/trunk/arch/mips/include/asm/signal.h b/trunk/arch/mips/include/asm/signal.h
index 197f6367c201..8efe5a9e2c3e 100644
--- a/trunk/arch/mips/include/asm/signal.h
+++ b/trunk/arch/mips/include/asm/signal.h
@@ -21,6 +21,6 @@
#include
#include
-#define __ARCH_HAS_ODD_SIGACTION
+#define __ARCH_HAS_IRIX_SIGACTION
#endif /* _ASM_SIGNAL_H */
diff --git a/trunk/arch/mips/include/uapi/asm/signal.h b/trunk/arch/mips/include/uapi/asm/signal.h
index d6b18b4d0f3a..addb9f556b71 100644
--- a/trunk/arch/mips/include/uapi/asm/signal.h
+++ b/trunk/arch/mips/include/uapi/asm/signal.h
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
+ *
+ * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
+ * supported its use and no libc was using it, so the entire sa-restorer
+ * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
+ * retaining only the SA_RESTORER definition as a reminder to avoid
+ * accidental reuse of the mask bit.
*/
#define SA_ONSTACK 0x08000000
#define SA_RESETHAND 0x80000000
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_RESTORER 0x04000000 /* Only for o32 */
-
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile
index f81d98f6184c..de75fb50562b 100644
--- a/trunk/arch/mips/kernel/Makefile
+++ b/trunk/arch/mips/kernel/Makefile
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
#
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
-# to enable DSP assembler support here even if the MIPS Release 2 CPU we
-# are targetting does not support DSP because all code-paths making use of
-# it properly check that the running CPU *actually does* support these
-# instructions.
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
#
ifeq ($(CONFIG_CPU_MIPSR2), y)
CFLAGS_DSP = -DHAVE_AS_DSP
-#
-# Check if assembler supports DSP ASE
-#
-ifeq ($(call cc-option-yn,-mdsp), y)
-CFLAGS_DSP += -mdsp
-endif
-
-#
-# Check if assembler supports DSP ASE Rev2
-#
-ifeq ($(call cc-option-yn,-mdspr2), y)
-CFLAGS_DSP += -mdspr2
-endif
-
CFLAGS_signal.o = $(CFLAGS_DSP)
CFLAGS_signal32.o = $(CFLAGS_DSP)
CFLAGS_process.o = $(CFLAGS_DSP)
diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c
index 6bfccc227a95..5fe66a0c3224 100644
--- a/trunk/arch/mips/kernel/cpu-probe.c
+++ b/trunk/arch/mips/kernel/cpu-probe.c
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC VR4131";
} else {
c->cputype = CPU_VR4133;
+ c->options |= MIPS_CPU_LLSC;
__cpu_name[cpu] = "NEC VR4133";
}
break;
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
- set_isa(c, MIPS_CPU_ISA_III);
- c->options = R4K_OPTS;
- c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void)
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
diff --git a/trunk/arch/mips/kernel/linux32.c b/trunk/arch/mips/kernel/linux32.c
index 8eeee1c860c0..db9655f08892 100644
--- a/trunk/arch/mips/kernel/linux32.c
+++ b/trunk/arch/mips/kernel/linux32.c
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
err = compat_sys_shmctl(first, second, compat_ptr(ptr));
break;
default:
- err = -EINVAL;
+ err = -ENOSYS;
break;
}
diff --git a/trunk/arch/mips/kernel/mcount.S b/trunk/arch/mips/kernel/mcount.S
index 165867673357..33d067148e61 100644
--- a/trunk/arch/mips/kernel/mcount.S
+++ b/trunk/arch/mips/kernel/mcount.S
@@ -46,10 +46,9 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
-#else
- PTR_ADDIU sp, PT_SIZE
#endif
-.endm
+ PTR_ADDIU sp, PT_SIZE
+ .endm
.macro RETURN_BACK
jr ra
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
- addiu sp,sp,8
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
lw t1, function_trace_stop
diff --git a/trunk/arch/mips/kernel/proc.c b/trunk/arch/mips/kernel/proc.c
index 135c4aadccbe..7a54f74b7818 100644
--- a/trunk/arch/mips/kernel/proc.c
+++ b/trunk/arch/mips/kernel/proc.c
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_mips_r) {
seq_printf(m, "isa\t\t\t:");
if (cpu_has_mips_1)
- seq_printf(m, "%s", "mips1");
+ seq_printf(m, "%s", " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
if (cpu_has_mips_3)
diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c
index a200b5bdbb87..c3abb88170fc 100644
--- a/trunk/arch/mips/kernel/traps.c
+++ b/trunk/arch/mips/kernel/traps.c
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
- if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
+ if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
diff --git a/trunk/arch/mips/lib/bitops.c b/trunk/arch/mips/lib/bitops.c
index 81f1dcfdcab8..a64daee740ee 100644
--- a/trunk/arch/mips/lib/bitops.c
+++ b/trunk/arch/mips/lib/bitops.c
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- unsigned long res;
+ int res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a);
+ res = (mask & *a) != 0;
*a ^= mask;
raw_local_irq_restore(flags);
return res;
diff --git a/trunk/arch/mips/lib/csum_partial.S b/trunk/arch/mips/lib/csum_partial.S
index 507147aebd41..a6adffbb4e5f 100644
--- a/trunk/arch/mips/lib/csum_partial.S
+++ b/trunk/arch/mips/lib/csum_partial.S
@@ -270,7 +270,7 @@ LEAF(csum_partial)
#endif
/* odd buffer alignment? */
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
wsbh v1, sum
movn sum, v1, t7
#else
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
addu sum, v1
#endif
-#ifdef CPU_MIPSR2
+#ifdef CONFIG_CPU_MIPSR2
wsbh v1, sum
movn sum, v1, odd
#else
diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c
index ecca559b8d7b..2078915eacb9 100644
--- a/trunk/arch/mips/mm/c-r4k.c
+++ b/trunk/arch/mips/mm/c-r4k.c
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void)
return;
default:
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/trunk/arch/mips/mm/sc-mips.c b/trunk/arch/mips/mm/sc-mips.c
index 93d937b4b1ba..df96da7e939b 100644
--- a/trunk/arch/mips/mm/sc-mips.c
+++ b/trunk/arch/mips/mm/sc-mips.c
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void)
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
/* Ignore anything but MIPSxx processors */
- if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
- c->isa_level != MIPS_CPU_ISA_M32R2 &&
- c->isa_level != MIPS_CPU_ISA_M64R1 &&
- c->isa_level != MIPS_CPU_ISA_M64R2)
+ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/trunk/arch/mips/pci/pci-alchemy.c b/trunk/arch/mips/pci/pci-alchemy.c
index 38a80c83fd67..d1faece21b6a 100644
--- a/trunk/arch/mips/pci/pci-alchemy.c
+++ b/trunk/arch/mips/pci/pci-alchemy.c
@@ -19,7 +19,7 @@
#include
#include
-#ifdef CONFIG_DEBUG_PCI
+#ifdef CONFIG_PCI_DEBUG
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do {} while (0)
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
if (status & (1 << 29)) {
*data = 0xffffffff;
error = -1;
- DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d",
+ DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
access_type, bus->number, device);
} else if ((status >> 28) & 0xf) {
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
diff --git a/trunk/arch/mips/pci/pci.c b/trunk/arch/mips/pci/pci.c
index 0872f12f268d..594e60d6a43b 100644
--- a/trunk/arch/mips/pci/pci.c
+++ b/trunk/arch/mips/pci/pci.c
@@ -115,7 +115,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
- bus->dev.of_node = hose->of_node;
}
}
@@ -169,6 +168,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
}
}
}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->of_node);
+}
#endif
static DEFINE_MUTEX(pci_scan_mutex);
diff --git a/trunk/arch/parisc/Makefile b/trunk/arch/parisc/Makefile
index 01d95e2f0581..113e28206503 100644
--- a/trunk/arch/parisc/Makefile
+++ b/trunk/arch/parisc/Makefile
@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER
endif
# Use long jumps instead of long branches (needed if your linker fails to
-# link a too big vmlinux executable)
-cflags-$(CONFIG_MLONGCALLS) += -mlong-calls
+# link a too big vmlinux executable). Not enabled for building modules.
+ifdef CONFIG_MLONGCALLS
+KBUILD_CFLAGS_KERNEL += -mlong-calls
+endif
# select which processor to optimise for
cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100
diff --git a/trunk/arch/parisc/include/asm/cacheflush.h b/trunk/arch/parisc/include/asm/cacheflush.h
index 79f694f3ad9b..f0e2784e7cca 100644
--- a/trunk/arch/parisc/include/asm/cacheflush.h
+++ b/trunk/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-#define kunmap(page) kunmap_parisc(page_address(page))
+static inline void kunmap(struct page *page)
+{
+ kunmap_parisc(page_address(page));
+}
static inline void *kmap_atomic(struct page *page)
{
diff --git a/trunk/arch/parisc/include/asm/pgtable.h b/trunk/arch/parisc/include/asm/pgtable.h
index 7df49fad29f9..1e40d7f86be3 100644
--- a/trunk/arch/parisc/include/asm/pgtable.h
+++ b/trunk/arch/parisc/include/asm/pgtable.h
@@ -16,6 +16,8 @@
#include
#include
+extern spinlock_t pa_dbit_lock;
+
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
* memory. For the return value to be meaningful, ADDR must be >=
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define set_pte_at(mm, addr, ptep, pteval) \
do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&pa_dbit_lock, flags); \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_dbit_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
+ pte_t pte;
+ unsigned long flags;
+
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
-#else
- pte_t pte = *ptep;
- if (!pte_young(pte))
+
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ pte = *ptep;
+ if (!pte_young(pte)) {
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 0;
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ }
+ set_pte(ptep, pte_mkold(pte));
+ purge_tlb_entries(vma->vm_mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return 1;
-#endif
}
-extern spinlock_t pa_dbit_lock;
-
struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
+ unsigned long flags;
- spin_lock(&pa_dbit_lock);
+ spin_lock_irqsave(&pa_dbit_lock, flags);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
- spin_unlock(&pa_dbit_lock);
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_SMP
- unsigned long new, old;
-
- do {
- old = pte_val(*ptep);
- new = pte_val(pte_wrprotect(__pte (old)));
- } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+ unsigned long flags;
+ spin_lock_irqsave(&pa_dbit_lock, flags);
+ set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
-#else
- pte_t old_pte = *ptep;
- set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-#endif
+ spin_unlock_irqrestore(&pa_dbit_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/trunk/arch/parisc/include/asm/uaccess.h b/trunk/arch/parisc/include/asm/uaccess.h
index 4ba2c93770f1..e0a82358517e 100644
--- a/trunk/arch/parisc/include/asm/uaccess.h
+++ b/trunk/arch/parisc/include/asm/uaccess.h
@@ -181,30 +181,24 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)" \
- "\n2:\tstw %3,4(%1)\n\t" \
+ "\n2:\tstw %R2,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
- u64 __val64 = (u64)(__val); \
- u32 hi = (__val64) >> 32; \
- u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)" \
- "\n2:\tstw %3,4(%%sr3,%1)\n\t" \
+ "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
- : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
+ : "r"(ptr), "r"(__val), "0"(__pu_err) \
: "r1"); \
} while (0)
diff --git a/trunk/arch/parisc/kernel/cache.c b/trunk/arch/parisc/kernel/cache.c
index 4b12890642eb..83ded26cad06 100644
--- a/trunk/arch/parisc/kernel/cache.c
+++ b/trunk/arch/parisc/kernel/cache.c
@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
/* Note: purge_tlb_entries can be called at startup with
no context. */
- /* Disable preemption while we play with %sr1. */
- preempt_disable();
- mtsp(mm->context, 1);
purge_tlb_start(flags);
+ mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
- preempt_enable();
}
EXPORT_SYMBOL(purge_tlb_entries);
diff --git a/trunk/arch/parisc/kernel/parisc_ksyms.c b/trunk/arch/parisc/kernel/parisc_ksyms.c
index 6795dc6c995f..568b2c61ea02 100644
--- a/trunk/arch/parisc/kernel/parisc_ksyms.c
+++ b/trunk/arch/parisc/kernel/parisc_ksyms.c
@@ -120,11 +120,13 @@ extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
+extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
diff --git a/trunk/arch/parisc/lib/Makefile b/trunk/arch/parisc/lib/Makefile
index 5f2e6904d14a..5651536ac733 100644
--- a/trunk/arch/parisc/lib/Makefile
+++ b/trunk/arch/parisc/lib/Makefile
@@ -2,6 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+ ucmpdi2.o
obj-y := iomap.o
diff --git a/trunk/arch/parisc/lib/ucmpdi2.c b/trunk/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 000000000000..149c016f32c5
--- /dev/null
+++ b/trunk/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,25 @@
+#include
+
+union ull_union {
+ unsigned long long ull;
+ struct {
+ unsigned int high;
+ unsigned int low;
+ } ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ union ull_union au = {.ull = a};
+ union ull_union bu = {.ull = b};
+
+ if (au.ui.high < bu.ui.high)
+ return 0;
+ else if (au.ui.high > bu.ui.high)
+ return 2;
+ if (au.ui.low < bu.ui.low)
+ return 0;
+ else if (au.ui.low > bu.ui.low)
+ return 2;
+ return 1;
+}
diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S
index 256c5bf0adb7..04d69c4a5ac2 100644
--- a/trunk/arch/powerpc/kernel/entry_64.S
+++ b/trunk/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -657,7 +657,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
- ldarx r4,0,r5
+0: ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index 59dd545fdde1..16e77a81ab4f 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
+#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c
index 3acb28e245b4..95068bf569ad 100644
--- a/trunk/arch/powerpc/kernel/signal_32.c
+++ b/trunk/arch/powerpc/kernel/signal_32.c
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
+#endif
return 0;
}
diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c
index 995f8543cb57..c1794286098c 100644
--- a/trunk/arch/powerpc/kernel/signal_64.c
+++ b/trunk/arch/powerpc/kernel/signal_64.c
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
+#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
+#endif
return err;
}
diff --git a/trunk/arch/powerpc/kernel/tm.S b/trunk/arch/powerpc/kernel/tm.S
index 84dbace657ce..2da67e7a16d5 100644
--- a/trunk/arch/powerpc/kernel/tm.S
+++ b/trunk/arch/powerpc/kernel/tm.S
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
+#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
+#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/trunk/arch/powerpc/kvm/e500.h b/trunk/arch/powerpc/kvm/e500.h
index 41cefd43655f..33db48a8ce24 100644
--- a/trunk/arch/powerpc/kvm/e500.h
+++ b/trunk/arch/powerpc/kvm/e500.h
@@ -26,17 +26,20 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-#define E500_TLB_VALID 1
-#define E500_TLB_BITMAP 2
+/* entry is mapped somewhere in host TLB */
+#define E500_TLB_VALID (1 << 0)
+/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
+#define E500_TLB_BITMAP (1 << 1)
+/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn;
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn; /* valid only for TLB0, except briefly */
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+ struct tlbe_ref ref;
};
#ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
- /*
- * information associated with each host TLB entry --
- * TLB1 only for now. If/when guest TLB1 entries can be
- * mapped with host TLB0, this will be used for that too.
- *
- * We don't want to use this for guest TLB0 because then we'd
- * have the overhead of doing the translation again even if
- * the entry is still in the guest TLB (e.g. we swapped out
- * and back, and our host TLB entries got evicted).
- */
- struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
diff --git a/trunk/arch/powerpc/kvm/e500_mmu_host.c b/trunk/arch/powerpc/kvm/e500_mmu_host.c
index a222edfb9a9b..1c6a9d729df4 100644
--- a/trunk/arch/powerpc/kvm/e500_mmu_host.c
+++ b/trunk/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID))
- return;
+ if (!(ref->flags & E500_TLB_VALID)) {
+ WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+ "%s: flags %x\n", __func__, ref->flags);
+ WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
+ }
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags = E500_TLB_VALID;
+ ref->flags |= E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
+ /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel = 0;
- int i;
-
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
-}
-
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- int stlbsel = 1;
+ int tlbsel;
int i;
- kvmppc_e500_tlbil_all(vcpu_e500);
-
- for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->tlb_refs[stlbsel][i];
- kvmppc_e500_ref_release(ref);
+ for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
+ }
}
-
- clear_tlb_privs(vcpu_e500);
}
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- clear_tlb_refs(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
+ clear_tlb_privs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
- /* Drop old ref and setup new one. */
- kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
- vcpu_e500->tlb_refs[1][sesel] = *ref;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
+
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
+ WARN_ON(!(ref->flags & E500_TLB_VALID));
return sesel;
}
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref ref;
+ struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
int sesel;
int r;
- ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- &ref);
+ ref);
if (r)
return r;
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_refs or on initial mapping */
+ /* Triggers after clear_tlb_privs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
- vcpu_e500->tlb_refs[0] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[0])
- goto err;
-
- vcpu_e500->tlb_refs[1] =
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
- GFP_KERNEL);
- if (!vcpu_e500->tlb_refs[1])
- goto err;
-
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- goto err;
+ return -EINVAL;
return 0;
-
-err:
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
- return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
- kfree(vcpu_e500->tlb_refs[0]);
- kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/trunk/arch/powerpc/kvm/e500mc.c b/trunk/arch/powerpc/kvm/e500mc.c
index 1f89d26e65fb..2f4baa074b2e 100644
--- a/trunk/arch/powerpc/kvm/e500mc.c
+++ b/trunk/arch/powerpc/kvm/e500mc.c
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
+ __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
+ __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+ }
kvmppc_load_guest_fp(vcpu);
}
diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c
index 0da39fed355a..299731e9036b 100644
--- a/trunk/arch/powerpc/platforms/pseries/lpar.c
+++ b/trunk/arch/powerpc/platforms/pseries/lpar.c
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
- BUG_ON(lpar_rc != H_NOT_FOUND);
+
+ /*
+ * The test for adjunct partition is performed before the
+ * ANDCOND test. H_RESOURCE may be returned, so we need to
+ * check for that as well.
+ */
+ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig
index eb8fb629f00b..bda6ba6f3cf5 100644
--- a/trunk/arch/s390/Kconfig
+++ b/trunk/arch/s390/Kconfig
@@ -375,19 +375,6 @@ config PACK_STACK
Say Y if you are unsure.
-config SMALL_STACK
- def_bool n
- prompt "Use 8kb for kernel stack instead of 16kb"
- depends on PACK_STACK && 64BIT && !LOCKDEP
- help
- If you say Y here and the compiler supports the -mkernel-backchain
- option the kernel will use a smaller kernel stack size. The reduced
- size is 8kb instead of 16kb. This allows to run more threads on a
- system and reduces the pressure on the memory management for higher
- order page allocations.
-
- Say N if you are unsure.
-
config CHECK_STACK
def_bool y
prompt "Detect kernel stack overflow"
diff --git a/trunk/arch/s390/Makefile b/trunk/arch/s390/Makefile
index 7e3ce78d4290..a7d68a467ce8 100644
--- a/trunk/arch/s390/Makefile
+++ b/trunk/arch/s390/Makefile
@@ -55,22 +55,12 @@ cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
ifeq ($(call cc-option-yn,-mkernel-backchain),y)
cflags-$(CONFIG_PACK_STACK) += -mkernel-backchain -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
# new style option for packed stacks
ifeq ($(call cc-option-yn,-mpacked-stack),y)
cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK
aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK
-cflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-aflags-$(CONFIG_SMALL_STACK) += -D__SMALL_STACK
-ifdef CONFIG_SMALL_STACK
-STACK_SIZE := $(shell echo $$(($(STACK_SIZE)/2)) )
-endif
endif
ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y)
diff --git a/trunk/arch/s390/hypfs/hypfs_dbfs.c b/trunk/arch/s390/hypfs/hypfs_dbfs.c
index 9fd4a40c6752..bb5dd496614f 100644
--- a/trunk/arch/s390/hypfs/hypfs_dbfs.c
+++ b/trunk/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,9 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- if (IS_ERR(dbfs_dir))
- return PTR_ERR(dbfs_dir);
- return 0;
+ return PTR_RET(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/trunk/arch/s390/include/asm/bitops.h b/trunk/arch/s390/include/asm/bitops.h
index 15422933c60b..4d8604e311f3 100644
--- a/trunk/arch/s390/include/asm/bitops.h
+++ b/trunk/arch/s390/include/asm/bitops.h
@@ -61,8 +61,6 @@ extern const char _sb_findmap[];
#ifndef CONFIG_64BIT
-#define __BITOPS_ALIGN 3
-#define __BITOPS_WORDSIZE 32
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
@@ -81,8 +79,6 @@ extern const char _sb_findmap[];
#else /* CONFIG_64BIT */
-#define __BITOPS_ALIGN 7
-#define __BITOPS_WORDSIZE 64
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
@@ -101,8 +97,7 @@ extern const char _sb_findmap[];
#endif /* CONFIG_64BIT */
-#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
-#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
+#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#ifdef CONFIG_SMP
/*
@@ -114,9 +109,9 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
}
@@ -130,9 +125,9 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
}
@@ -146,9 +141,9 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
}
@@ -163,12 +158,12 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make OR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
@@ -182,12 +177,12 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make AND/test mask */
- mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
+ mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
- __BITOPS_BARRIER();
+ barrier();
return (old ^ new) != 0;
}
@@ -201,12 +196,12 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr;
/* calculate address for CS */
- addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
+ addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
/* make XOR/test mask */
- mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
+ mask = 1UL << (nr & (BITS_PER_LONG - 1));
/* Do the atomic update. */
__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
- __BITOPS_BARRIER();
+ barrier();
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
@@ -218,7 +213,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -229,7 +224,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr |= 1 << (nr & 7);
}
@@ -246,7 +241,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
@@ -257,7 +252,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr &= ~(1 << (nr & 7));
}
@@ -273,7 +268,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
@@ -284,7 +279,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
{
unsigned long addr;
- addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
*(unsigned char *) addr ^= 1 << (nr & 7);
}
@@ -302,7 +297,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
@@ -321,7 +316,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
@@ -340,7 +335,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
@@ -376,7 +371,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
unsigned long addr;
unsigned char ch;
- addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
+ addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
ch = *(volatile unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
@@ -384,7 +379,7 @@ static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr
static inline int
__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
return (((volatile char *) addr)
- [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
+ [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
}
#define test_bit(nr,addr) \
@@ -693,18 +688,18 @@ static inline int find_next_bit_left(const unsigned long *addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
set = __flo_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_left(p, size);
@@ -736,22 +731,22 @@ static inline int find_next_zero_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffz_word returns __BITOPS_WORDSIZE
+ * __ffz_word returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, *p >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit(p, size);
@@ -773,22 +768,22 @@ static inline int find_next_bit (const unsigned long * addr,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * __ffs_word returns __BITOPS_WORDSIZE
+ * __ffs_word returns BITS_PER_LONG
* if no one bit is present in the word.
*/
set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit(p, size);
@@ -843,22 +838,22 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_zero_bit_le(p, size);
@@ -885,22 +880,22 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
if (offset >= size)
return size;
- bit = offset & (__BITOPS_WORDSIZE - 1);
+ bit = offset & (BITS_PER_LONG - 1);
offset -= bit;
size -= offset;
- p = addr + offset / __BITOPS_WORDSIZE;
+ p = addr + offset / BITS_PER_LONG;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * s390 version of ffz returns BITS_PER_LONG
* if no zero bit is present in the word.
*/
set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
if (set >= size)
return size + offset;
- if (set < __BITOPS_WORDSIZE)
+ if (set < BITS_PER_LONG)
return set + offset;
- offset += __BITOPS_WORDSIZE;
- size -= __BITOPS_WORDSIZE;
+ offset += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
p++;
}
return offset + find_first_bit_le(p, size);
diff --git a/trunk/arch/s390/include/asm/ccwdev.h b/trunk/arch/s390/include/asm/ccwdev.h
index e6061617a50b..f201af8be580 100644
--- a/trunk/arch/s390/include/asm/ccwdev.h
+++ b/trunk/arch/s390/include/asm/ccwdev.h
@@ -220,7 +220,8 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
-extern int ccw_device_force_console(void);
+extern void ccw_device_wait_idle(struct ccw_device *);
+extern int ccw_device_force_console(struct ccw_device *);
int ccw_device_siosl(struct ccw_device *);
diff --git a/trunk/arch/s390/include/asm/cio.h b/trunk/arch/s390/include/asm/cio.h
index ad2b924167d7..ffb898961c8d 100644
--- a/trunk/arch/s390/include/asm/cio.h
+++ b/trunk/arch/s390/include/asm/cio.h
@@ -296,8 +296,6 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
-extern void wait_cons_dev(void);
-
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/trunk/arch/s390/include/asm/compat.h b/trunk/arch/s390/include/asm/compat.h
index f8c6df6cd1f0..c1e7c646727c 100644
--- a/trunk/arch/s390/include/asm/compat.h
+++ b/trunk/arch/s390/include/asm/compat.h
@@ -70,6 +70,22 @@ typedef u32 compat_ulong_t;
typedef u64 compat_u64;
typedef u32 compat_uptr_t;
+typedef struct {
+ u32 mask;
+ u32 addr;
+} __aligned(8) psw_compat_t;
+
+typedef struct {
+ psw_compat_t psw;
+ u32 gprs[NUM_GPRS];
+ u32 acrs[NUM_ACRS];
+ u32 orig_gpr2;
+} s390_compat_regs;
+
+typedef struct {
+ u32 gprs_high[NUM_GPRS];
+} s390_compat_regs_high;
+
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
@@ -124,18 +140,33 @@ struct compat_flock64 {
};
struct compat_statfs {
- s32 f_type;
- s32 f_bsize;
- s32 f_blocks;
- s32 f_bfree;
- s32 f_bavail;
- s32 f_files;
- s32 f_ffree;
+ u32 f_type;
+ u32 f_bsize;
+ u32 f_blocks;
+ u32 f_bfree;
+ u32 f_bavail;
+ u32 f_files;
+ u32 f_ffree;
+ compat_fsid_t f_fsid;
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
+};
+
+struct compat_statfs64 {
+ u32 f_type;
+ u32 f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
compat_fsid_t f_fsid;
- s32 f_namelen;
- s32 f_frsize;
- s32 f_flags;
- s32 f_spare[5];
+ u32 f_namelen;
+ u32 f_frsize;
+ u32 f_flags;
+ u32 f_spare[4];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
@@ -248,8 +279,6 @@ static inline int is_compat_task(void)
return is_32bit_task();
}
-#endif
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
unsigned long stack;
@@ -260,6 +289,8 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *) (stack - len);
}
+#endif
+
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
diff --git a/trunk/arch/s390/include/asm/elf.h b/trunk/arch/s390/include/asm/elf.h
index 1bfdf24b85a2..78f4f8711d58 100644
--- a/trunk/arch/s390/include/asm/elf.h
+++ b/trunk/arch/s390/include/asm/elf.h
@@ -119,6 +119,8 @@
*/
#include
+#include
+#include
#include
typedef s390_fp_regs elf_fpregset_t;
@@ -180,18 +182,31 @@ extern unsigned long elf_hwcap;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
-#ifdef CONFIG_64BIT
+#ifndef CONFIG_COMPAT
+#define SET_PERSONALITY(ex) \
+do { \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+} while (0)
+#else /* CONFIG_COMPAT */
#define SET_PERSONALITY(ex) \
do { \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & ~PER_MASK)); \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
set_thread_flag(TIF_31BIT); \
- else \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table_emu; \
+ } else { \
clear_thread_flag(TIF_31BIT); \
+ current_thread_info()->sys_call_table = \
+ (unsigned long) &sys_call_table; \
+ } \
} while (0)
-#endif /* CONFIG_64BIT */
+#endif /* CONFIG_COMPAT */
#define STACK_RND_MASK 0x7ffUL
diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h
index 27cb32185ce1..379d96e2105e 100644
--- a/trunk/arch/s390/include/asm/io.h
+++ b/trunk/arch/s390/include/asm/io.h
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
diff --git a/trunk/arch/s390/include/asm/pci.h b/trunk/arch/s390/include/asm/pci.h
index 05333b7f0469..6c1801235db9 100644
--- a/trunk/arch/s390/include/asm/pci.h
+++ b/trunk/arch/s390/include/asm/pci.h
@@ -140,6 +140,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
struct zpci_dev *zpci_alloc_device(void);
int zpci_create_device(struct zpci_dev *);
int zpci_enable_device(struct zpci_dev *);
+int zpci_disable_device(struct zpci_dev *);
void zpci_stop_device(struct zpci_dev *);
void zpci_free_device(struct zpci_dev *);
int zpci_scan_device(struct zpci_dev *);
diff --git a/trunk/arch/s390/include/asm/pci_debug.h b/trunk/arch/s390/include/asm/pci_debug.h
index 6bbec4265b6e..1ca5d1047c71 100644
--- a/trunk/arch/s390/include/asm/pci_debug.h
+++ b/trunk/arch/s390/include/asm/pci_debug.h
@@ -7,14 +7,11 @@ extern debug_info_t *pci_debug_msg_id;
extern debug_info_t *pci_debug_err_id;
#ifdef CONFIG_PCI_DEBUG
-#define zpci_dbg(fmt, args...) \
- do { \
- if (pci_debug_msg_id->level >= 2) \
- debug_sprintf_event(pci_debug_msg_id, 2, fmt , ## args);\
- } while (0)
+#define zpci_dbg(imp, fmt, args...) \
+ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(fmt, args...) do { } while (0)
+#define zpci_dbg(imp, fmt, args...) do { } while (0)
#endif
#define zpci_err(text...) \
diff --git a/trunk/arch/s390/include/asm/pci_insn.h b/trunk/arch/s390/include/asm/pci_insn.h
index 1486a98d5dad..e6a2bdd4d705 100644
--- a/trunk/arch/s390/include/asm/pci_insn.h
+++ b/trunk/arch/s390/include/asm/pci_insn.h
@@ -1,10 +1,6 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
-#include
-
-#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
-
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -82,199 +78,12 @@ struct zpci_fib {
u64 reserved7;
} __packed;
-/* Modify PCI Function Controls */
-static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
- : : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
-{
- u8 cc, status;
-
- do {
- cc = __mpcifc(req, fib, &status);
- if (cc == 2)
- msleep(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
- __func__, cc, status);
- return (cc) ? -EIO : 0;
-}
-
-/* Refresh PCI Translations */
-static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
-{
- register u64 __addr asm("2") = addr;
- register u64 __range asm("3") = range;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d30000,%[fn],%[addr]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn)
- : [addr] "d" (__addr), "d" (__range)
- : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
-{
- u8 cc, status;
-
- do {
- cc = __rpcit(fn, addr, range, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
- __func__, cc, status, addr, range);
- return (cc) ? -EIO : 0;
-}
-
-/* Store PCI function controls */
-static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
-{
- u64 fn = (u64) handle << 32 | space << 16;
- u8 cc;
-
- asm volatile (
- " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
- : : "cc");
- *status = fn >> 24 & 0xff;
- return cc;
-}
-
-/* Set Interruption Controls */
-static inline void sic_instr(u16 ctl, char *unused, u8 isc)
-{
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
-}
-
-/* PCI Load */
-static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u64 __data;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d20000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
- : "d" (__offset)
- : "cc");
- *status = __req >> 24 & 0xff;
- *data = __data;
- return cc;
-}
-
-static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcilg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc) {
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- /* TODO: on IO errors set data to 0xff...
- * here or in users of pcilg (le conversion)?
- */
- }
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store */
-static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
-{
- register u64 __req asm("2") = req;
- register u64 __offset asm("3") = offset;
- u8 cc;
-
- asm volatile (
- " .insn rre,0xb9d00000,%[data],%[req]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (__req)
- : "d" (__offset), [data] "d" (data)
- : "cc");
- *status = __req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistg_instr(u64 data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistg(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
-
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
-
-/* PCI Store Block */
-static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
-{
- u8 cc;
-
- asm volatile (
- " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=d" (cc), [req] "+d" (req)
- : [offset] "d" (offset), [data] "Q" (*data)
- : "cc");
- *status = req >> 24 & 0xff;
- return cc;
-}
-
-static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
-{
- u8 cc, status;
-
- do {
- cc = __pcistb(data, req, offset, &status);
- if (cc == 2)
- udelay(ZPCI_INSN_BUSY_DELAY);
- } while (cc == 2);
- if (cc)
- printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
- __func__, cc, status, req, offset);
- return (cc) ? -EIO : 0;
-}
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
+int s390pci_load(u64 *data, u64 req, u64 offset);
+int s390pci_store(u64 data, u64 req, u64 offset);
+int s390pci_store_block(const u64 *data, u64 req, u64 offset);
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/trunk/arch/s390/include/asm/pci_io.h b/trunk/arch/s390/include/asm/pci_io.h
index 5fd81f31d6c7..83a9caa6ae53 100644
--- a/trunk/arch/s390/include/asm/pci_io.h
+++ b/trunk/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
- rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
+ rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
+ s390pci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@@ -83,15 +83,18 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
- return pcistg_instr(val, req, offset);
+ return s390pci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
{
u64 data;
- u8 cc;
+ int cc;
+
+ cc = s390pci_load(&data, req, offset);
+ if (cc)
+ goto out;
- cc = pcilg_instr(&data, req, offset);
switch (len) {
case 1:
*((u8 *) dst) = (u8) data;
@@ -106,12 +109,13 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
*((u64 *) dst) = (u64) data;
break;
}
+out:
return cc;
}
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
- return pcistb_instr(data, req, offset);
+ return s390pci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 4a2930844d43..4a64c0e5428f 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
(((unsigned long)(vaddr)) &zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
#endif /* !__ASSEMBLY__ */
/*
@@ -344,6 +348,7 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
@@ -759,6 +764,8 @@ void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long __gmap_translate(unsigned long address, struct gmap *);
+unsigned long gmap_translate(unsigned long address, struct gmap *);
unsigned long __gmap_fault(unsigned long address, struct gmap *);
unsigned long gmap_fault(unsigned long address, struct gmap *);
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
@@ -1531,7 +1538,8 @@ extern int s390_enable_sie(void);
/*
* No page table caches to initialise
*/
-#define pgtable_cache_init() do { } while (0)
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
#include
diff --git a/trunk/arch/s390/include/asm/processor.h b/trunk/arch/s390/include/asm/processor.h
index 94e749c90230..6b499870662f 100644
--- a/trunk/arch/s390/include/asm/processor.h
+++ b/trunk/arch/s390/include/asm/processor.h
@@ -161,7 +161,8 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
extern void show_code(struct pt_regs *regs);
extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char buf[8]);
+extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
+ unsigned int len);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/trunk/arch/s390/include/asm/ptrace.h b/trunk/arch/s390/include/asm/ptrace.h
index 3ee5da3bc10c..559512a455da 100644
--- a/trunk/arch/s390/include/asm/ptrace.h
+++ b/trunk/arch/s390/include/asm/ptrace.h
@@ -9,9 +9,7 @@
#include
#ifndef __ASSEMBLY__
-#ifndef __s390x__
-#else /* __s390x__ */
-#endif /* __s390x__ */
+
extern long psw_kernel_bits;
extern long psw_user_bits;
@@ -77,8 +75,6 @@ struct per_struct_kernel {
#define PER_CONTROL_SUSPENSION 0x00400000UL
#define PER_CONTROL_ALTERATION 0x00200000UL
-#ifdef __s390x__
-#endif /* __s390x__ */
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/trunk/arch/s390/include/asm/syscall.h b/trunk/arch/s390/include/asm/syscall.h
index fe7b99759e12..cd29d2f4e4f3 100644
--- a/trunk/arch/s390/include/asm/syscall.h
+++ b/trunk/arch/s390/include/asm/syscall.h
@@ -23,6 +23,7 @@
* type here is what we want [need] for both 32 bit and 64 bit systems.
*/
extern const unsigned int sys_call_table[];
+extern const unsigned int sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
diff --git a/trunk/arch/s390/include/asm/thread_info.h b/trunk/arch/s390/include/asm/thread_info.h
index 9e2cfe0349c3..eb5f64d26d06 100644
--- a/trunk/arch/s390/include/asm/thread_info.h
+++ b/trunk/arch/s390/include/asm/thread_info.h
@@ -14,13 +14,8 @@
#define THREAD_ORDER 1
#define ASYNC_ORDER 1
#else /* CONFIG_64BIT */
-#ifndef __SMALL_STACK
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#else
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#endif
#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -41,6 +36,7 @@ struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
diff --git a/trunk/arch/s390/include/uapi/asm/ptrace.h b/trunk/arch/s390/include/uapi/asm/ptrace.h
index a5ca214b34fd..3aa9f1ec5b29 100644
--- a/trunk/arch/s390/include/uapi/asm/ptrace.h
+++ b/trunk/arch/s390/include/uapi/asm/ptrace.h
@@ -215,12 +215,6 @@ typedef struct
unsigned long addr;
} __attribute__ ((aligned(8))) psw_t;
-typedef struct
-{
- __u32 mask;
- __u32 addr;
-} __attribute__ ((aligned(8))) psw_compat_t;
-
#ifndef __s390x__
#define PSW_MASK_PER 0x40000000UL
@@ -295,20 +289,6 @@ typedef struct
unsigned long orig_gpr2;
} s390_regs;
-typedef struct
-{
- psw_compat_t psw;
- __u32 gprs[NUM_GPRS];
- __u32 acrs[NUM_ACRS];
- __u32 orig_gpr2;
-} s390_compat_regs;
-
-typedef struct
-{
- __u32 gprs_high[NUM_GPRS];
-} s390_compat_regs_high;
-
-
/*
* Now for the user space program event recording (trace) definitions.
* The following structures are used only for the ptrace interface, don't
diff --git a/trunk/arch/s390/include/uapi/asm/statfs.h b/trunk/arch/s390/include/uapi/asm/statfs.h
index 5acca0a34c20..a61d538756f2 100644
--- a/trunk/arch/s390/include/uapi/asm/statfs.h
+++ b/trunk/arch/s390/include/uapi/asm/statfs.h
@@ -7,9 +7,6 @@
#ifndef _S390_STATFS_H
#define _S390_STATFS_H
-#ifndef __s390x__
-#include
-#else
/*
* We can't use because in 64-bit mode
* we mix ints of different sizes in our struct statfs.
@@ -21,49 +18,33 @@ typedef __kernel_fsid_t fsid_t;
#endif
struct statfs {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
struct statfs64 {
- int f_type;
- int f_bsize;
- long f_blocks;
- long f_bfree;
- long f_bavail;
- long f_files;
- long f_ffree;
+ unsigned int f_type;
+ unsigned int f_bsize;
+ unsigned long f_blocks;
+ unsigned long f_bfree;
+ unsigned long f_bavail;
+ unsigned long f_files;
+ unsigned long f_ffree;
__kernel_fsid_t f_fsid;
- int f_namelen;
- int f_frsize;
- int f_flags;
- int f_spare[4];
+ unsigned int f_namelen;
+ unsigned int f_frsize;
+ unsigned int f_flags;
+ unsigned int f_spare[4];
};
-struct compat_statfs64 {
- __u32 f_type;
- __u32 f_bsize;
- __u64 f_blocks;
- __u64 f_bfree;
- __u64 f_bavail;
- __u64 f_files;
- __u64 f_ffree;
- __kernel_fsid_t f_fsid;
- __u32 f_namelen;
- __u32 f_frsize;
- __u32 f_flags;
- __u32 f_spare[4];
-};
-
-#endif /* __s390x__ */
#endif
diff --git a/trunk/arch/s390/kernel/Makefile b/trunk/arch/s390/kernel/Makefile
index 2ac311ef5c9b..1386fcaf4ef6 100644
--- a/trunk/arch/s390/kernel/Makefile
+++ b/trunk/arch/s390/kernel/Makefile
@@ -13,6 +13,14 @@ endif
#
CFLAGS_smp.o := -Wno-nonnull
+#
+# Disable tailcall optimizations for stack / callchain walking functions
+# since this might generate broken code when accessing register 15 and
+# passing its content to other functions.
+#
+CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
+CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+
#
# Pass UTS_MACHINE for user_regset definition
#
@@ -20,10 +28,11 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
-obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
- processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
- debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
- sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o
+obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += dumpstack.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/trunk/arch/s390/kernel/asm-offsets.c b/trunk/arch/s390/kernel/asm-offsets.c
index fface87056eb..7a82f9f70100 100644
--- a/trunk/arch/s390/kernel/asm-offsets.c
+++ b/trunk/arch/s390/kernel/asm-offsets.c
@@ -35,6 +35,7 @@ int main(void)
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+ DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
diff --git a/trunk/arch/s390/kernel/compat_signal.c b/trunk/arch/s390/kernel/compat_signal.c
index 6de049fbe62d..c439ac9ced09 100644
--- a/trunk/arch/s390/kernel/compat_signal.c
+++ b/trunk/arch/s390/kernel/compat_signal.c
@@ -362,6 +362,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* set extra registers only for synchronous signals */
regs->gprs[4] = regs->int_code & 127;
regs->gprs[5] = regs->int_parm_long;
+ regs->gprs[6] = task_thread_info(current)->last_break;
}
/* Place signal number on stack to allow backtrace from handler. */
@@ -421,6 +422,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
+ regs->gprs[5] = task_thread_info(current)->last_break;
return 0;
give_sigsegv:
diff --git a/trunk/arch/s390/kernel/dis.c b/trunk/arch/s390/kernel/dis.c
index 3ad5e9540160..7f4a4a8c847c 100644
--- a/trunk/arch/s390/kernel/dis.c
+++ b/trunk/arch/s390/kernel/dis.c
@@ -1696,14 +1696,15 @@ static struct insn *find_insn(unsigned char *code)
* insn_to_mnemonic - decode an s390 instruction
* @instruction: instruction to decode
* @buf: buffer to fill with mnemonic
+ * @len: length of buffer
*
* Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf.
+ * mnemonic into @buf of length @len.
* @buf is left unchanged if the instruction could not be decoded.
* Returns:
* %0 on success, %-ENOENT if the instruction was not found.
*/
-int insn_to_mnemonic(unsigned char *instruction, char buf[8])
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
{
struct insn *insn;
@@ -1711,10 +1712,10 @@ int insn_to_mnemonic(unsigned char *instruction, char buf[8])
if (!insn)
return -ENOENT;
if (insn->name[0] == '\0')
- snprintf(buf, 8, "%s",
+ snprintf(buf, len, "%s",
long_insn_name[(int) insn->name[1]]);
else
- snprintf(buf, 8, "%.5s", insn->name);
+ snprintf(buf, len, "%.5s", insn->name);
return 0;
}
EXPORT_SYMBOL_GPL(insn_to_mnemonic);
diff --git a/trunk/arch/s390/kernel/dumpstack.c b/trunk/arch/s390/kernel/dumpstack.c
new file mode 100644
index 000000000000..03dce39d01ee
--- /dev/null
+++ b/trunk/arch/s390/kernel/dumpstack.c
@@ -0,0 +1,236 @@
+/*
+ * Stack dumping functions
+ *
+ * Copyright IBM Corp. 1999, 2013
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#ifndef CONFIG_64BIT
+#define LONG "%08lx "
+#define FOURLONG "%08lx %08lx %08lx %08lx\n"
+static int kstack_depth_to_print = 12;
+#else /* CONFIG_64BIT */
+#define LONG "%016lx "
+#define FOURLONG "%016lx %016lx %016lx %016lx\n"
+static int kstack_depth_to_print = 20;
+#endif /* CONFIG_64BIT */
+
+/*
+ * For show_trace we have tree different stack to consider:
+ * - the panic stack which is used if the kernel stack has overflown
+ * - the asynchronous interrupt stack (cpu related)
+ * - the synchronous kernel stack (process related)
+ * The stack trace can start at any of the three stack and can potentially
+ * touch all of them. The order is: panic stack, async stack, sync stack.
+ */
+static unsigned long
+__show_trace(unsigned long sp, unsigned long low, unsigned long high)
+{
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+
+ while (1) {
+ sp = sp & PSW_ADDR_INSN;
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
+ /* Follow the backchain. */
+ while (1) {
+ low = sp;
+ sp = sf->back_chain & PSW_ADDR_INSN;
+ if (!sp)
+ break;
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
+ print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
+ }
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+ if (sp <= low || sp > high - sizeof(*regs))
+ return sp;
+ regs = (struct pt_regs *) sp;
+ printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
+ low = sp;
+ sp = regs->gprs[15];
+ }
+}
+
+static void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ register unsigned long __r15 asm ("15");
+ unsigned long sp;
+
+ sp = (unsigned long) stack;
+ if (!sp)
+ sp = task ? task->thread.ksp : __r15;
+ printk("Call Trace:\n");
+#ifdef CONFIG_CHECK_STACK
+ sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
+ S390_lowcore.panic_stack);
+#endif
+ sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
+ S390_lowcore.async_stack);
+ if (task)
+ __show_trace(sp, (unsigned long) task_stack_page(task),
+ (unsigned long) task_stack_page(task) + THREAD_SIZE);
+ else
+ __show_trace(sp, S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
+ if (!task)
+ task = current;
+ debug_show_held_locks(task);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ register unsigned long *__r15 asm ("15");
+ unsigned long *stack;
+ int i;
+
+ if (!sp)
+ stack = task ? (unsigned long *) task->thread.ksp : __r15;
+ else
+ stack = sp;
+
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if ((i * sizeof(long) % 32) == 0)
+ printk("%s ", i == 0 ? "" : "\n");
+ printk(LONG, *stack++);
+ }
+ printk("\n");
+ show_trace(task, sp);
+}
+
+static void show_last_breaking_event(struct pt_regs *regs)
+{
+#ifdef CONFIG_64BIT
+ printk("Last Breaking-Event-Address:\n");
+ printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
+ print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
+#endif
+}
+
+/*
+ * The architecture-independent dump_stack generator
+ */
+void dump_stack(void)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+{
+ return (regs->psw.mask & bits) / ((~bits + 1) & bits);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ char *mode;
+
+ mode = user_mode(regs) ? "User" : "Krnl";
+ printk("%s PSW : %p %p",
+ mode, (void *) regs->psw.mask,
+ (void *) regs->psw.addr);
+ print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
+ printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
+ "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
+ mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
+ mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
+ mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
+ mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
+ mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
+#ifdef CONFIG_64BIT
+ printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
+#endif
+ printk("\n%s GPRS: " FOURLONG, mode,
+ regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
+ printk(" " FOURLONG,
+ regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
+ printk(" " FOURLONG,
+ regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
+ printk(" " FOURLONG,
+ regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
+ show_code(regs);
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk("CPU: %d %s %s %.*s\n",
+ task_thread_info(current)->cpu, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+ current->comm, current->pid, current,
+ (void *) current->thread.ksp);
+ show_registers(regs);
+ /* Show stack backtrace if pt_regs is from kernel mode */
+ if (!user_mode(regs))
+ show_trace(NULL, (unsigned long *) regs->gprs[15]);
+ show_last_breaking_event(regs);
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+
+ oops_enter();
+ lgr_info_log();
+ debug_stop_all();
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
+#ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+#endif
+#ifdef CONFIG_SMP
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+#endif
+ printk("\n");
+ notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
+ print_modules();
+ show_regs(regs);
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception: panic_on_oops");
+ oops_exit();
+ do_exit(SIGSEGV);
+}
diff --git a/trunk/arch/s390/kernel/entry.S b/trunk/arch/s390/kernel/entry.S
index 94feff7d6132..4d5e6f8a7978 100644
--- a/trunk/arch/s390/kernel/entry.S
+++ b/trunk/arch/s390/kernel/entry.S
@@ -45,6 +45,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
#define BASED(name) name-system_call(%r13)
@@ -97,10 +98,10 @@ STACK_SIZE = 1 << STACK_SHIFT
sra %r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: l %r15,\stack # load target stack
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro ADD64 high,low,timer
@@ -150,7 +151,7 @@ ENTRY(__switch_to)
l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
lr %r15,%r5
- ahi %r15,STACK_SIZE # end of kernel stack of next
+ ahi %r15,STACK_INIT # end of kernel stack of next
st %r3,__LC_CURRENT # store task struct of next
st %r5,__LC_THREAD_INFO # store thread info of next
st %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -178,7 +179,6 @@ sysc_stm:
l %r13,__LC_SVC_NEW_PSW+4
sysc_per:
l %r15,__LC_KERNEL_STACK
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -188,6 +188,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+3(%r12),_TIF_SYSCALL
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
lh %r8,__PT_INT_CODE+2(%r11)
sla %r8,2 # shift and test for svc0
jnz sysc_nr_ok
@@ -198,7 +199,6 @@ sysc_do_svc:
lr %r8,%r1
sla %r8,2
sysc_nr_ok:
- l %r10,BASED(.Lsys_call_table) # 31 bit system call table
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
st %r2,__PT_ORIG_GPR2(%r11)
st %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -359,11 +359,11 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
l %r15,__LC_KERNEL_STACK
-2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
stm %r8,%r9,__PT_PSW(%r11)
@@ -485,7 +485,6 @@ io_work:
#
io_work_user:
l %r1,__LC_KERNEL_STACK
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -646,7 +645,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
l %r1,__LC_KERNEL_STACK # switch to kernel stack
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r15)
@@ -674,6 +672,7 @@ mcck_panic:
sra %r14,PAGE_SHIFT
jz 0f
l %r15,__LC_PANIC_STACK
+ j mcck_skip
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j mcck_skip
@@ -714,12 +713,10 @@ ENTRY(restart_int_handler)
*/
stack_overflow:
l %r15,__LC_PANIC_STACK # change to panic stack
- ahi %r15,-__PT_SIZE # create pt_regs
- stm %r0,%r7,__PT_R0(%r15)
- stm %r8,%r9,__PT_PSW(%r15)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stm %r0,%r7,__PT_R0(%r11)
+ stm %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(32,%r11),0(%r14)
- lr %r15,%r11
- ahi %r15,-STACK_FRAME_OVERHEAD
l %r1,BASED(1f)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
lr %r2,%r11 # pass pointer to pt_regs
@@ -799,15 +796,14 @@ cleanup_system_call:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# set up saved register 11
l %r15,__LC_KERNEL_STACK
- ahi %r15,-__PT_SIZE
- st %r15,12(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ st %r9,12(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
+ stm %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register 15
- ahi %r15,-STACK_FRAME_OVERHEAD
st %r15,28(%r11) # r15 stack pointer
# set new psw address and exit
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
@@ -910,7 +906,6 @@ cleanup_idle_wait:
.Ltrace_enter: .long do_syscall_trace_enter
.Ltrace_exit: .long do_syscall_trace_exit
.Lschedule_tail: .long schedule_tail
-.Lsys_call_table: .long sys_call_table
.Lsysc_per: .long sysc_per + 0x80000000
#ifdef CONFIG_TRACE_IRQFLAGS
.Lhardirqs_on: .long trace_hardirqs_on_caller
diff --git a/trunk/arch/s390/kernel/entry.h b/trunk/arch/s390/kernel/entry.h
index c3a736a3ed44..aa0ab02e9595 100644
--- a/trunk/arch/s390/kernel/entry.h
+++ b/trunk/arch/s390/kernel/entry.h
@@ -7,6 +7,7 @@
#include
extern void *restart_stack;
+extern unsigned long suspend_zero_pages;
void system_call(void);
void pgm_check_handler(void);
diff --git a/trunk/arch/s390/kernel/entry64.S b/trunk/arch/s390/kernel/entry64.S
index 2e6d60c55f90..4c17eece707e 100644
--- a/trunk/arch/s390/kernel/entry64.S
+++ b/trunk/arch/s390/kernel/entry64.S
@@ -39,6 +39,7 @@ __PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING | _TIF_PER_TRAP )
@@ -124,10 +125,10 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: lg %r15,\stack # load target stack
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME scratch,enter_timer
@@ -177,7 +178,7 @@ ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
lgr %r15,%r5
- aghi %r15,STACK_SIZE # end of kernel stack of next
+ aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r5,__LC_THREAD_INFO # store thread info of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
@@ -203,10 +204,8 @@ sysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
sysc_per:
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
sysc_vtime:
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
@@ -217,6 +216,7 @@ sysc_vtime:
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
sysc_do_svc:
oi __TI_flags+7(%r12),_TIF_SYSCALL
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz sysc_nr_ok
@@ -227,13 +227,6 @@ sysc_do_svc:
sth %r1,__PT_INT_CODE+2(%r11)
slag %r8,%r1,2
sysc_nr_ok:
- larl %r10,sys_call_table # 64 bit system call table
-#ifdef CONFIG_COMPAT
- tm __TI_flags+5(%r12),(_TIF_31BIT>>16)
- jno sysc_noemu
- larl %r10,sys_call_table_emu # 31 bit system call table
-sysc_noemu:
-#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -389,6 +382,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz pgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
LAST_BREAK %r14
@@ -398,8 +392,7 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 2f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -526,7 +519,6 @@ io_work:
#
io_work_user:
lg %r1,__LC_KERNEL_STACK
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -688,7 +680,6 @@ mcck_skip:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno mcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
@@ -755,14 +746,12 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- lg %r11,__LC_PANIC_STACK # change to panic stack
- aghi %r11,-__PT_SIZE # create pt_regs
+ lg %r15,__LC_PANIC_STACK # change to panic stack
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- lgr %r15,%r11
- aghi %r15,-STACK_FRAME_OVERHEAD
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
@@ -846,15 +835,14 @@ cleanup_system_call:
mvc __TI_last_break(8,%r12),16(%r11)
0: # set up saved register r11
lg %r15,__LC_KERNEL_STACK
- aghi %r15,-__PT_SIZE
- stg %r15,24(%r11) # r11 pt_regs pointer
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r15)
- mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
# setup saved register r15
- aghi %r15,-STACK_FRAME_OVERHEAD
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,sysc_do_svc
@@ -1011,6 +999,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
+ .globl sys_call_table_emu
sys_call_table_emu:
#include "syscalls.S"
#undef SYSCALL
diff --git a/trunk/arch/s390/kernel/machine_kexec.c b/trunk/arch/s390/kernel/machine_kexec.c
index b3de27700016..ac2178161ec3 100644
--- a/trunk/arch/s390/kernel/machine_kexec.c
+++ b/trunk/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -67,6 +68,35 @@ void setup_regs(void)
memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area));
}
+/*
+ * PM notifier callback for kdump
+ */
+static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ if (crashk_res.start)
+ crash_map_reserved_pages();
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ if (crashk_res.start)
+ crash_unmap_reserved_pages();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init machine_kdump_pm_init(void)
+{
+ pm_notifier(machine_kdump_pm_cb, 0);
+ return 0;
+}
+arch_initcall(machine_kdump_pm_init);
#endif
/*
diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c
index 29268859d8ee..0f419c5765c8 100644
--- a/trunk/arch/s390/kernel/setup.c
+++ b/trunk/arch/s390/kernel/setup.c
@@ -377,11 +377,14 @@ static void __init setup_lowcore(void)
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
- lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
+ lc->kernel_stack = ((unsigned long) &init_thread_union)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
- __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
+ __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
- __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
+ __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c
index 549c9d173c0f..8bde89eafd88 100644
--- a/trunk/arch/s390/kernel/smp.c
+++ b/trunk/arch/s390/kernel/smp.c
@@ -181,8 +181,10 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
- lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
- lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
+ lc->async_stack = pcpu->async_stack + ASYNC_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+ lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
+ - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@@ -253,7 +255,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
struct _lowcore *lc = pcpu->lowcore;
struct thread_info *ti = task_thread_info(tsk);
- lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
+ lc->kernel_stack = (unsigned long) task_stack_page(tsk)
+ + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
lc->user_timer = ti->user_timer;
@@ -810,8 +813,10 @@ void __init smp_prepare_boot_cpu(void)
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
- pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
- pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
+ pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
+ + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
set_cpu_present(0, true);
diff --git a/trunk/arch/s390/kernel/suspend.c b/trunk/arch/s390/kernel/suspend.c
index aa1494d0e380..c479d2f9605b 100644
--- a/trunk/arch/s390/kernel/suspend.c
+++ b/trunk/arch/s390/kernel/suspend.c
@@ -41,6 +41,7 @@ struct page_key_data {
static struct page_key_data *page_key_data;
static struct page_key_data *page_key_rp, *page_key_wp;
static unsigned long page_key_rx, page_key_wx;
+unsigned long suspend_zero_pages;
/*
* For each page in the hibernation image one additional byte is
@@ -149,6 +150,36 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
}
+/*
+ * PM notifier callback for suspend
+ */
+static int suspend_pm_cb(struct notifier_block *nb, unsigned long action,
+ void *ptr)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ suspend_zero_pages = __get_free_pages(GFP_KERNEL, LC_ORDER);
+ if (!suspend_zero_pages)
+ return NOTIFY_BAD;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ free_pages(suspend_zero_pages, LC_ORDER);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int __init suspend_pm_init(void)
+{
+ pm_notifier(suspend_pm_cb, 0);
+ return 0;
+}
+arch_initcall(suspend_pm_init);
+
void save_processor_state(void)
{
/* swsusp_arch_suspend() actually saves all cpu register contents.
diff --git a/trunk/arch/s390/kernel/swsusp_asm64.S b/trunk/arch/s390/kernel/swsusp_asm64.S
index d4ca4e0617b5..c487be4cfc81 100644
--- a/trunk/arch/s390/kernel/swsusp_asm64.S
+++ b/trunk/arch/s390/kernel/swsusp_asm64.S
@@ -36,8 +36,8 @@ ENTRY(swsusp_arch_suspend)
/* Store prefix register on stack */
stpx __SF_EMPTY(%r15)
- /* Save prefix register contents for lowcore */
- llgf %r4,__SF_EMPTY(%r15)
+ /* Save prefix register contents for lowcore copy */
+ llgf %r10,__SF_EMPTY(%r15)
/* Get pointer to save area */
lghi %r1,0x1000
@@ -91,7 +91,18 @@ ENTRY(swsusp_arch_suspend)
xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
spx __SF_EMPTY(%r15)
+ /* Save absolute zero pages */
+ larl %r2,suspend_zero_pages
+ lg %r2,0(%r2)
+ lghi %r4,0
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
+ /* Copy lowcore to absolute zero lowcore */
lghi %r2,0
+ lgr %r4,%r10
lghi %r3,2*PAGE_SIZE
lghi %r5,2*PAGE_SIZE
1: mvcle %r2,%r4,0
@@ -248,8 +259,20 @@ restore_registers:
/* Load old stack */
lg %r15,0x2f8(%r13)
+ /* Save prefix register */
+ mvc __SF_EMPTY(4,%r15),0x318(%r13)
+
+ /* Restore absolute zero pages */
+ lghi %r2,0
+ larl %r4,suspend_zero_pages
+ lg %r4,0(%r4)
+ lghi %r3,2*PAGE_SIZE
+ lghi %r5,2*PAGE_SIZE
+1: mvcle %r2,%r4,0
+ jo 1b
+
/* Restore prefix register */
- spx 0x318(%r13)
+ spx __SF_EMPTY(%r15)
/* Activate DAT */
stosm __SF_EMPTY(%r15),0x04
diff --git a/trunk/arch/s390/kernel/traps.c b/trunk/arch/s390/kernel/traps.c
index 13dd63fba367..c5762324d9ee 100644
--- a/trunk/arch/s390/kernel/traps.c
+++ b/trunk/arch/s390/kernel/traps.c
@@ -12,49 +12,16 @@
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
-#include
-#include
-#include
-#include
+#include
+#include
+#include
#include
-#include
+#include
#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
#include "entry.h"
int show_unhandled_signals = 1;
-#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
-
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
#ifdef CONFIG_64BIT
@@ -72,215 +39,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
#endif
}
-/*
- * For show_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stack and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long
-__show_trace(unsigned long sp, unsigned long low, unsigned long high)
-{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- sp = sp & PSW_ADDR_INSN;
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain & PSW_ADDR_INSN;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
- print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
- low = sp;
- sp = regs->gprs[15];
- }
-}
-
-static void show_trace(struct task_struct *task, unsigned long *stack)
-{
- register unsigned long __r15 asm ("15");
- unsigned long sp;
-
- sp = (unsigned long) stack;
- if (!sp)
- sp = task ? task->thread.ksp : __r15;
- printk("Call Trace:\n");
-#ifdef CONFIG_CHECK_STACK
- sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
- S390_lowcore.panic_stack);
-#endif
- sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
- if (task)
- __show_trace(sp, (unsigned long) task_stack_page(task),
- (unsigned long) task_stack_page(task) + THREAD_SIZE);
- else
- __show_trace(sp, S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
- if (!task)
- task = current;
- debug_show_held_locks(task);
-}
-
-void show_stack(struct task_struct *task, unsigned long *sp)
-{
- register unsigned long * __r15 asm ("15");
- unsigned long *stack;
- int i;
-
- if (!sp)
- stack = task ? (unsigned long *) task->thread.ksp : __r15;
- else
- stack = sp;
-
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
- break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
- }
- printk("\n");
- show_trace(task, sp);
-}
-
-static void show_last_breaking_event(struct pt_regs *regs)
-{
-#ifdef CONFIG_64BIT
- printk("Last Breaking-Event-Address:\n");
- printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
- print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
-#endif
-}
-
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
-
-static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
-{
- return (regs->psw.mask & bits) / ((~bits + 1) & bits);
-}
-
-void show_registers(struct pt_regs *regs)
-{
- char *mode;
-
- mode = user_mode(regs) ? "User" : "Krnl";
- printk("%s PSW : %p %p",
- mode, (void *) regs->psw.mask,
- (void *) regs->psw.addr);
- print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
- printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
- "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
- mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
- mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
- mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
- mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
- mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
- printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
- regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
- regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
- regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
- regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
-
- show_code(regs);
-}
-
-void show_regs(struct pt_regs *regs)
-{
- printk("CPU: %d %s %s %.*s\n",
- task_thread_info(current)->cpu, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
- show_registers(regs);
- /* Show stack backtrace if pt_regs is from kernel mode */
- if (!user_mode(regs))
- show_trace(NULL, (unsigned long *) regs->gprs[15]);
- show_last_breaking_event(regs);
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(struct pt_regs *regs, const char *str)
-{
- static int die_counter;
-
- oops_enter();
- lgr_info_log();
- debug_stop_all();
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP ");
-#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
- printk("DEBUG_PAGEALLOC");
-#endif
- printk("\n");
- notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
- print_modules();
- show_regs(regs);
- bust_spinlocks(0);
- add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irq(&die_lock);
- if (in_interrupt())
- panic("Fatal exception in interrupt");
- if (panic_on_oops)
- panic("Fatal exception: panic_on_oops");
- oops_exit();
- do_exit(SIGSEGV);
-}
-
static inline void report_user_fault(struct pt_regs *regs, int signr)
{
if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
diff --git a/trunk/arch/s390/kvm/trace.h b/trunk/arch/s390/kvm/trace.h
index 2b29e62351d3..53252d2d4720 100644
--- a/trunk/arch/s390/kvm/trace.h
+++ b/trunk/arch/s390/kvm/trace.h
@@ -117,7 +117,7 @@ TRACE_EVENT(kvm_s390_intercept_instruction,
__entry->instruction,
insn_to_mnemonic((unsigned char *)
&__entry->instruction,
- __entry->insn) ?
+ __entry->insn, sizeof(__entry->insn)) ?
"unknown" : __entry->insn)
);
diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c
index dff631d34b45..466fb3383960 100644
--- a/trunk/arch/s390/lib/uaccess_pt.c
+++ b/trunk/arch/s390/lib/uaccess_pt.c
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
* contains the (negative) exception code.
*/
-static __always_inline unsigned long follow_table(struct mm_struct *mm,
- unsigned long addr, int write)
+#ifdef CONFIG_64BIT
+static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
+ switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x39UL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_REGION2:
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x3aUL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_REGION3:
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -0x3bUL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ case _ASCE_TYPE_SEGMENT:
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INV))
+ return -0x10UL;
+ if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
+ if (write && (*table & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
+ (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
+ }
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ }
+ table = table + ((address >> 12) & 0xff);
+ if (unlikely(*table & _PAGE_INVALID))
+ return -0x11UL;
+ if (write && (*table & _PAGE_RO))
+ return -0x04UL;
+ return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
+}
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return -0x3aUL;
+#else /* CONFIG_64BIT */
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return -0x3bUL;
+static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
+{
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ table = table + ((address >> 20) & 0x7ff);
+ if (unlikely(*table & _SEGMENT_ENTRY_INV))
return -0x10UL;
- if (pmd_large(*pmd)) {
- if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
- return -0x04UL;
- return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
- }
- if (unlikely(pmd_bad(*pmd)))
- return -0x10UL;
-
- ptep = pte_offset_map(pmd, addr);
- if (!pte_present(*ptep))
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ table = table + ((address >> 12) & 0xff);
+ if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
- if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
+ if (write && (*table & _PAGE_RO))
return -0x04UL;
-
- return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+ return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
+#endif /* CONFIG_64BIT */
+
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
{
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
static size_t clear_user_pt(size_t n, void __user *to)
{
- void *zpage = &empty_zero_page;
+ void *zpage = (void *) empty_zero_page;
long done, size, ret;
done = 0;
diff --git a/trunk/arch/s390/mm/cmm.c b/trunk/arch/s390/mm/cmm.c
index 479e94282910..9d84a1feefef 100644
--- a/trunk/arch/s390/mm/cmm.c
+++ b/trunk/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
- rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
- if (rc)
- goto out_kthread;
- return 0;
+ if (!IS_ERR(cmm_thread_ptr))
+ return 0;
-out_kthread:
+ rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
diff --git a/trunk/arch/s390/mm/fault.c b/trunk/arch/s390/mm/fault.c
index 2fb9e63b8fc4..047c3e4c59a2 100644
--- a/trunk/arch/s390/mm/fault.c
+++ b/trunk/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
int fault;
trans_exc_code = regs->int_parm_long;
- /* Protection exception is suppressing, decrement psw address. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
+ /*
+ * Protection exceptions are suppressing, decrement psw address.
+ * The exception to this rule are aborted transactions, for these
+ * the PSW already points to the correct location.
+ */
+ if (!(regs->int_code & 0x200))
+ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
diff --git a/trunk/arch/s390/mm/init.c b/trunk/arch/s390/mm/init.c
index 49ce6bb2c641..9f9c315b4c07 100644
--- a/trunk/arch/s390/mm/init.c
+++ b/trunk/arch/s390/mm/init.c
@@ -63,10 +63,18 @@ static unsigned long __init setup_zero_pages(void)
break;
case 0x2097: /* z10 */
case 0x2098: /* z10 */
- default:
+ case 0x2817: /* z196 */
+ case 0x2818: /* z196 */
order = 2;
break;
+ case 0x2827: /* zEC12 */
+ default:
+ order = 5;
+ break;
}
+ /* Limit number of empty zero pages for small memory sizes */
+ if (order > 2 && totalram_pages <= 16384)
+ order = 2;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/trunk/arch/s390/mm/pageattr.c b/trunk/arch/s390/mm/pageattr.c
index d21040ed5e59..80adfbf75065 100644
--- a/trunk/arch/s390/mm/pageattr.c
+++ b/trunk/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
#include
#include
+static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
+{
+ asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
+ : [addr] "+a" (addr) : [skey] "d" (skey));
+ return addr;
+}
+
void storage_key_init_range(unsigned long start, unsigned long end)
{
- unsigned long boundary, function, size;
+ unsigned long boundary, size;
while (start < end) {
- if (MACHINE_HAS_EDAT2) {
- /* set storage keys for a 2GB frame */
- function = 0x22000 | PAGE_DEFAULT_KEY;
- size = 1UL << 31;
- boundary = (start + size) & ~(size - 1);
- if (boundary <= end) {
- do {
- start = pfmf(function, start);
- } while (start < boundary);
- continue;
- }
- }
if (MACHINE_HAS_EDAT1) {
/* set storage keys for a 1MB frame */
- function = 0x21000 | PAGE_DEFAULT_KEY;
size = 1UL << 20;
boundary = (start + size) & ~(size - 1);
if (boundary <= end) {
do {
- start = pfmf(function, start);
+ start = sske_frame(start, PAGE_DEFAULT_KEY);
} while (start < boundary);
continue;
}
diff --git a/trunk/arch/s390/mm/pgtable.c b/trunk/arch/s390/mm/pgtable.c
index ae44d2a34313..bd954e96f51c 100644
--- a/trunk/arch/s390/mm/pgtable.c
+++ b/trunk/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
}
EXPORT_SYMBOL_GPL(gmap_map_segment);
-/*
- * this function is assumed to be called with mmap_sem held
- */
-unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
{
- unsigned long *table, vmaddr, segment;
- struct mm_struct *mm;
- struct gmap_pgtable *mp;
- struct gmap_rmap *rmap;
- struct vm_area_struct *vma;
- struct page *page;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ unsigned long *table;
- current->thread.gmap_addr = address;
- mm = gmap->mm;
- /* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
if (unlikely(*table & _REGION_ENTRY_INV))
- return -EFAULT;
+ return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
+ return table;
+}
+
+/**
+ * __gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ * The mmap_sem of the mm that belongs to the address space must be held
+ * when this function gets called.
+ */
+unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, vmaddr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
- segment = *table;
- if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
} else if (segment & _SEGMENT_ENTRY_RO) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
- vma = find_vma(mm, vmaddr);
- if (!vma || vma->vm_start > vmaddr)
- return -EFAULT;
-
- /* Walk the parent mm page table */
- pgd = pgd_offset(mm, vmaddr);
- pud = pud_alloc(mm, pgd, vmaddr);
- if (!pud)
- return -ENOMEM;
- pmd = pmd_alloc(mm, pud, vmaddr);
- if (!pmd)
- return -ENOMEM;
- if (!pmd_present(*pmd) &&
- __pte_alloc(mm, vma, pmd, vmaddr))
- return -ENOMEM;
- /* pmd now points to a valid segment table entry. */
- rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
- if (!rmap)
- return -ENOMEM;
- /* Link gmap segment table entry location to page table. */
- page = pmd_page(*pmd);
- mp = (struct gmap_pgtable *) page->index;
- rmap->entry = table;
- spin_lock(&mm->page_table_lock);
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__gmap_translate);
+
+/**
+ * gmap_translate - translate a guest address to a user space address
+ * @address: guest address
+ * @gmap: pointer to guest mapping meta data structure
+ *
+ * Returns user space address which corresponds to the guest address or
+ * -EFAULT if no such mapping exists.
+ * This function does not establish potentially missing page table entries.
+ */
+unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
+{
+ unsigned long rc;
+
+ down_read(&gmap->mm->mmap_sem);
+ rc = __gmap_translate(address, gmap);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_translate);
+
+static int gmap_connect_pgtable(unsigned long segment,
+ unsigned long *segment_ptr,
+ struct gmap *gmap)
+{
+ unsigned long vmaddr;
+ struct vm_area_struct *vma;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct mm_struct *mm;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ mm = gmap->mm;
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = segment_ptr;
+ spin_lock(&mm->page_table_lock);
+ if (*segment_ptr == segment) {
list_add(&rmap->list, &mp->mapper);
- spin_unlock(&mm->page_table_lock);
/* Set gmap segment table entry to page table. */
- *table = pmd_val(*pmd) & PAGE_MASK;
- return vmaddr | (address & ~PMD_MASK);
+ *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
+ rmap = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ kfree(rmap);
+ return 0;
+}
+
+static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *segment_ptr, segment;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int rc;
+
+ current->thread.gmap_addr = address;
+ segment_ptr = gmap_table_walk(address, gmap);
+ if (IS_ERR(segment_ptr))
+ return -EFAULT;
+ /* Convert the gmap address to an mm address. */
+ while (1) {
+ segment = *segment_ptr;
+ if (!(segment & _SEGMENT_ENTRY_INV)) {
+ /* Page table is present */
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ }
+ if (!(segment & _SEGMENT_ENTRY_RO))
+ /* Nothing mapped in the gmap address space. */
+ break;
+ rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
+ if (rc)
+ return rc;
}
return -EFAULT;
}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_discard);
-void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
-{
- struct gmap_rmap *rmap, *next;
- struct gmap_pgtable *mp;
- struct page *page;
- int flush;
-
- flush = 0;
- spin_lock(&mm->page_table_lock);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- mp = (struct gmap_pgtable *) page->index;
- list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
- list_del(&rmap->list);
- kfree(rmap);
- flush = 1;
- }
- spin_unlock(&mm->page_table_lock);
- if (flush)
- __tlb_flush_global();
-}
-
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
{
}
-static inline void gmap_unmap_notifier(struct mm_struct *mm,
- unsigned long *table)
+static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
+ unsigned long *table)
{
}
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
unsigned int bit, mask;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
mm = tlb->mm;
if (mm_has_pgste(mm)) {
- gmap_unmap_notifier(mm, table);
+ gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
diff --git a/trunk/arch/s390/net/bpf_jit_comp.c b/trunk/arch/s390/net/bpf_jit_comp.c
index 0972e91cced2..82f165f8078c 100644
--- a/trunk/arch/s390/net/bpf_jit_comp.c
+++ b/trunk/arch/s390/net/bpf_jit_comp.c
@@ -747,10 +747,9 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(fp->len, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL)
return;
- memset(addrs, 0, fp->len * sizeof(*addrs));
memset(&jit, 0, sizeof(cjit));
memset(&cjit, 0, sizeof(cjit));
diff --git a/trunk/arch/s390/pci/Makefile b/trunk/arch/s390/pci/Makefile
index f0f426a113ce..086a2e37935d 100644
--- a/trunk/arch/s390/pci/Makefile
+++ b/trunk/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
- pci_sysfs.o pci_event.o pci_debug.o
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+ pci_event.o pci_debug.o pci_insn.o
diff --git a/trunk/arch/s390/pci/pci.c b/trunk/arch/s390/pci/pci.c
index 27b4c17855b9..e6f15b5d8b7d 100644
--- a/trunk/arch/s390/pci/pci.c
+++ b/trunk/arch/s390/pci/pci.c
@@ -99,9 +99,6 @@ static int __read_mostly aisb_max;
static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
-debug_info_t *pci_debug_msg_id;
-debug_info_t *pci_debug_err_id;
-
static inline int irq_to_msi_nr(unsigned int irq)
{
return irq & ZPCI_MSI_MASK;
@@ -179,7 +176,7 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
fib->aisb = (u64) bucket->aisb + aisb / 8;
fib->aisbo = aisb & ZPCI_MSI_MASK;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@@ -209,7 +206,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
- rc = mpcifc_instr(req, fib);
+ rc = s390pci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@@ -249,10 +246,9 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
if (zdev->fmb)
return -EINVAL;
- zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
+ zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
if (!zdev->fmb)
return -ENOMEM;
- memset(zdev->fmb, 0, sizeof(*zdev->fmb));
WARN_ON((u64) zdev->fmb & 0xf);
args.fmb_addr = virt_to_phys(zdev->fmb);
@@ -284,12 +280,12 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = pcilg_instr(&data, req, offset);
- data = data << ((8 - len) * 8);
- data = le64_to_cpu(data);
- if (!rc)
+ rc = s390pci_load(&data, req, offset);
+ if (!rc) {
+ data = data << ((8 - len) * 8);
+ data = le64_to_cpu(data);
*val = (u32) data;
- else
+ } else
*val = 0xffffffff;
return rc;
}
@@ -302,7 +298,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
- rc = pcistg_instr(data, req, offset);
+ rc = s390pci_store(data, req, offset);
return rc;
}
@@ -409,20 +405,28 @@ static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_load(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_load(zdev, where, val, size);
+
+ return ret;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct zpci_dev *zdev = get_zdev_by_bus(bus);
+ int ret;
if (!zdev || devfn != ZPCI_DEVFN)
- return 0;
- return zpci_cfg_store(zdev, where, val, size);
+ ret = -ENODEV;
+ else
+ ret = zpci_cfg_store(zdev, where, val, size);
+
+ return ret;
}
static struct pci_ops pci_root_ops = {
@@ -474,7 +478,7 @@ static void zpci_irq_handler(void *dont, void *need)
}
/* enable interrupts again */
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
/* check again to not lose initiative */
rmb();
@@ -596,19 +600,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
}
};
-static void zpci_unmap_resources(struct pci_dev *pdev)
-{
- resource_size_t len;
- int i;
-
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- len = pci_resource_len(pdev, i);
- if (!len)
- continue;
- pci_iounmap(pdev, (void *) pdev->resource[i].start);
- }
-};
-
struct zpci_dev *zpci_alloc_device(void)
{
struct zpci_dev *zdev;
@@ -636,32 +627,6 @@ void zpci_free_device(struct zpci_dev *zdev)
kfree(zdev);
}
-/* Called on removal of pci_dev, leaves zpci and bus device */
-static void zpci_remove_device(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = get_zdev(pdev);
-
- dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- zpci_dma_exit_device(zdev);
- zpci_fmb_disable_device(zdev);
- zpci_sysfs_remove_device(&pdev->dev);
- zpci_unmap_resources(pdev);
- list_del(&zdev->entry); /* can be called from init */
- zdev->pdev = NULL;
-}
-
-static void zpci_scan_devices(void)
-{
- struct zpci_dev *zdev;
-
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry)
- if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
- zpci_scan_device(zdev);
- mutex_unlock(&zpci_list_lock);
-}
-
/*
* Too late for any s390 specific setup, since interrupts must be set up
* already which requires DMA setup too and the pci scan will access the
@@ -688,12 +653,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
return 0;
}
-void pcibios_disable_device(struct pci_dev *pdev)
-{
- zpci_remove_device(pdev);
- pdev->sysdata = NULL;
-}
-
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
@@ -789,7 +748,7 @@ static int __init zpci_irq_init(void)
spin_lock_init(&bucket->lock);
/* set summary to 1 to be called every time for the ISC */
*zpci_irq_si = 1;
- sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
out_ai:
@@ -872,7 +831,19 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
spin_unlock(&zpci_iomap_lock);
}
-static int zpci_create_device_bus(struct zpci_dev *zdev)
+int pcibios_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+
+ zdev->pdev = pdev;
+ zpci_debug_init_device(zdev);
+ zpci_fmb_enable_device(zdev);
+ zpci_map_resources(zdev);
+
+ return 0;
+}
+
+static int zpci_scan_bus(struct zpci_dev *zdev)
{
struct resource *res;
LIST_HEAD(resources);
@@ -909,8 +880,8 @@ static int zpci_create_device_bus(struct zpci_dev *zdev)
pci_add_resource(&resources, res);
}
- zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
+ zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
+ zdev, &resources);
if (!zdev->bus)
return -EIO;
@@ -959,6 +930,13 @@ int zpci_enable_device(struct zpci_dev *zdev)
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
+int zpci_disable_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ return clp_disable_fh(zdev);
+}
+EXPORT_SYMBOL_GPL(zpci_disable_device);
+
int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
@@ -967,9 +945,16 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out;
- rc = zpci_create_device_bus(zdev);
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto out_free;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ }
+ rc = zpci_scan_bus(zdev);
if (rc)
- goto out_bus;
+ goto out_disable;
mutex_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
@@ -977,21 +962,12 @@ int zpci_create_device(struct zpci_dev *zdev)
hotplug_ops->create_slot(zdev);
mutex_unlock(&zpci_list_lock);
- if (zdev->state == ZPCI_FN_STATE_STANDBY)
- return 0;
-
- rc = zpci_enable_device(zdev);
- if (rc)
- goto out_start;
return 0;
-out_start:
- mutex_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- if (hotplug_ops)
- hotplug_ops->remove_slot(zdev);
- mutex_unlock(&zpci_list_lock);
-out_bus:
+out_disable:
+ if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ zpci_disable_device(zdev);
+out_free:
zpci_free_domain(zdev);
out:
return rc;
@@ -1016,15 +992,9 @@ int zpci_scan_device(struct zpci_dev *zdev)
goto out;
}
- zpci_debug_init_device(zdev);
- zpci_fmb_enable_device(zdev);
- zpci_map_resources(zdev);
pci_bus_add_devices(zdev->bus);
- /* now that pdev was added to the bus mark it as used */
- zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
-
out:
zpci_dma_exit_device(zdev);
clp_disable_fh(zdev);
@@ -1087,13 +1057,13 @@ void zpci_deregister_hp_ops(void)
}
EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
-unsigned int s390_pci_probe = 1;
+unsigned int s390_pci_probe;
EXPORT_SYMBOL_GPL(s390_pci_probe);
char * __init pcibios_setup(char *str)
{
- if (!strcmp(str, "off")) {
- s390_pci_probe = 0;
+ if (!strcmp(str, "on")) {
+ s390_pci_probe = 1;
return NULL;
}
return str;
@@ -1138,7 +1108,6 @@ static int __init pci_base_init(void)
if (rc)
goto out_find;
- zpci_scan_devices();
return 0;
out_find:
diff --git a/trunk/arch/s390/pci/pci_clp.c b/trunk/arch/s390/pci/pci_clp.c
index f339fe2feb15..bd34359d1546 100644
--- a/trunk/arch/s390/pci/pci_clp.c
+++ b/trunk/arch/s390/pci/pci_clp.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
/*
@@ -144,6 +145,7 @@ int clp_add_pci_device(u32 fid, u32 fh, int configured)
struct zpci_dev *zdev;
int rc;
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
zdev = zpci_alloc_device();
if (IS_ERR(zdev))
return PTR_ERR(zdev);
@@ -204,8 +206,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
*fh = rrb->response.fh;
else {
- pr_err("Set PCI FN failed with response: %x cc: %d\n",
- rrb->response.hdr.rsp, rc);
+ zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
+ rrb->response.hdr.rsp);
rc = -EIO;
}
clp_free_block(rrb);
@@ -221,6 +223,8 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
if (!rc)
/* Success -> store enabled handle in zdev */
zdev->fh = fh;
+
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
@@ -237,9 +241,8 @@ int clp_disable_fh(struct zpci_dev *zdev)
if (!rc)
/* Success -> store disabled handle in zdev */
zdev->fh = fh;
- else
- dev_err(&zdev->pdev->dev,
- "Failed to disable fn handle: 0x%x\n", fh);
+
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/trunk/arch/s390/pci/pci_debug.c b/trunk/arch/s390/pci/pci_debug.c
index a5d07bc2a547..771b82359af4 100644
--- a/trunk/arch/s390/pci/pci_debug.c
+++ b/trunk/arch/s390/pci/pci_debug.c
@@ -11,12 +11,17 @@
#include
#include
#include
+#include
#include
#include
#include
static struct dentry *debugfs_root;
+debug_info_t *pci_debug_msg_id;
+EXPORT_SYMBOL_GPL(pci_debug_msg_id);
+debug_info_t *pci_debug_err_id;
+EXPORT_SYMBOL_GPL(pci_debug_err_id);
static char *pci_perf_names[] = {
/* hardware counters */
@@ -168,7 +173,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
debug_set_level(pci_debug_msg_id, 3);
- zpci_dbg("Debug view initialized\n");
/* error log */
pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
@@ -176,7 +180,6 @@ int __init zpci_debug_init(void)
return -EINVAL;
debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
debug_set_level(pci_debug_err_id, 6);
- zpci_err("Debug view initialized\n");
debugfs_root = debugfs_create_dir("pci", NULL);
return 0;
diff --git a/trunk/arch/s390/pci/pci_dma.c b/trunk/arch/s390/pci/pci_dma.c
index a547419907c3..f8e69d5bc0a9 100644
--- a/trunk/arch/s390/pci/pci_dma.c
+++ b/trunk/arch/s390/pci/pci_dma.c
@@ -169,8 +169,9 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
* needs to be redone!
*/
goto no_refresh;
- rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
+
+ rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -268,8 +269,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
- WARN_ON_ONCE(offset > PAGE_SIZE);
-
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
@@ -291,7 +290,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
- return dma_addr + offset;
+ return dma_addr + (offset & ~PAGE_MASK);
}
out_free:
diff --git a/trunk/arch/s390/pci/pci_insn.c b/trunk/arch/s390/pci/pci_insn.c
new file mode 100644
index 000000000000..22eeb9d7ffeb
--- /dev/null
+++ b/trunk/arch/s390/pci/pci_insn.c
@@ -0,0 +1,202 @@
+/*
+ * s390 specific pci instructions
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+{
+ u8 cc, status;
+
+ do {
+ cc = __mpcifc(req, fib, &status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
+ __func__, cc, status);
+ return (cc) ? -EIO : 0;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
+ __func__, cc, status, addr, range);
+ return (cc) ? -EIO : 0;
+}
+
+/* Set Interruption Controls */
+void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+{
+ asm volatile (
+ " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+}
+
+/* PCI Load */
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ if (!cc)
+ *data = __data;
+
+ return cc;
+}
+
+int s390pci_load(u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_load);
+
+/* PCI Store */
+static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store(u64 data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store);
+
+/* PCI Store Block */
+static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
+ __func__, cc, status, req, offset);
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(s390pci_store_block);
diff --git a/trunk/arch/s390/pci/pci_msi.c b/trunk/arch/s390/pci/pci_msi.c
index 0297931335e1..b097aed05a9b 100644
--- a/trunk/arch/s390/pci/pci_msi.c
+++ b/trunk/arch/s390/pci/pci_msi.c
@@ -18,8 +18,9 @@
/* mapping of irq numbers to msi_desc */
static struct hlist_head *msi_hash;
-static unsigned int msihash_shift = 6;
-#define msi_hashfn(nr) hash_long(nr, msihash_shift)
+static const unsigned int msi_hash_bits = 8;
+#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
+#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
static DEFINE_SPINLOCK(msi_map_lock);
@@ -74,6 +75,7 @@ int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
map->irq = nr;
map->msi = msi;
zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
+ INIT_HLIST_NODE(&map->msi_chain);
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
__func__, nr, msi_hashfn(nr));
@@ -125,11 +127,11 @@ int __init zpci_msihash_init(void)
{
unsigned int i;
- msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
+ msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
if (!msi_hash)
return -ENOMEM;
- for (i = 0; i < (1U << msihash_shift); i++)
+ for (i = 0; i < MSI_HASH_BUCKETS; i++)
INIT_HLIST_HEAD(&msi_hash[i]);
return 0;
}
diff --git a/trunk/arch/sparc/include/asm/Kbuild b/trunk/arch/sparc/include/asm/Kbuild
index e26d430ce2fd..ff18e3cfb6b1 100644
--- a/trunk/arch/sparc/include/asm/Kbuild
+++ b/trunk/arch/sparc/include/asm/Kbuild
@@ -2,11 +2,16 @@
generic-y += clkdev.h
+generic-y += cputime.h
generic-y += div64.h
+generic-y += emergency-restart.h
generic-y += exec.h
generic-y += local64.h
+generic-y += mutex.h
generic-y += irq_regs.h
generic-y += local.h
generic-y += module.h
+generic-y += serial.h
generic-y += trace_clock.h
+generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/trunk/arch/sparc/include/asm/cputime.h b/trunk/arch/sparc/include/asm/cputime.h
deleted file mode 100644
index 1a642b81e019..000000000000
--- a/trunk/arch/sparc/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_CPUTIME_H
-#define __SPARC_CPUTIME_H
-
-#include
-
-#endif /* __SPARC_CPUTIME_H */
diff --git a/trunk/arch/sparc/include/asm/emergency-restart.h b/trunk/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/trunk/arch/sparc/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/trunk/arch/sparc/include/asm/mutex.h b/trunk/arch/sparc/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/trunk/arch/sparc/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include
diff --git a/trunk/arch/sparc/include/asm/pgtable_64.h b/trunk/arch/sparc/include/asm/pgtable_64.h
index 08fcce90316b..7619f2f792af 100644
--- a/trunk/arch/sparc/include/asm/pgtable_64.h
+++ b/trunk/arch/sparc/include/asm/pgtable_64.h
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
+#include
#include
/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/trunk/arch/sparc/include/asm/serial.h b/trunk/arch/sparc/include/asm/serial.h
deleted file mode 100644
index f90d61c28059..000000000000
--- a/trunk/arch/sparc/include/asm/serial.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_SERIAL_H
-#define __SPARC_SERIAL_H
-
-#define BASE_BAUD ( 1843200 / 16 )
-
-#endif /* __SPARC_SERIAL_H */
diff --git a/trunk/arch/sparc/include/asm/smp_32.h b/trunk/arch/sparc/include/asm/smp_32.h
index b73da3c5f10a..3c8917f054de 100644
--- a/trunk/arch/sparc/include/asm/smp_32.h
+++ b/trunk/arch/sparc/include/asm/smp_32.h
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
void cpu_panic(void);
-extern void smp4m_irq_rotate(int cpu);
/*
* General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
void sun4d_init_smp(void);
void smp_callin(void);
-void smp_boot_cpus(void);
void smp_store_cpu_info(int);
void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
-#define prof_counter(__cpu) cpu_data(__cpu).counter
-
void smp_setup_cpu_possible_map(void);
#endif /* !(__ASSEMBLY__) */
diff --git a/trunk/arch/sparc/include/asm/switch_to_64.h b/trunk/arch/sparc/include/asm/switch_to_64.h
index cad36f56fa03..c7de3323819c 100644
--- a/trunk/arch/sparc/include/asm/switch_to_64.h
+++ b/trunk/arch/sparc/include/asm/switch_to_64.h
@@ -18,8 +18,7 @@ do { \
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
-do { flush_tlb_pending(); \
- save_and_clear_fpu(); \
+do { save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
diff --git a/trunk/arch/sparc/include/asm/tlbflush_64.h b/trunk/arch/sparc/include/asm/tlbflush_64.h
index 2ef463494153..f0d6a9700f4c 100644
--- a/trunk/arch/sparc/include/asm/tlbflush_64.h
+++ b/trunk/arch/sparc/include/asm/tlbflush_64.h
@@ -11,24 +11,40 @@
struct tlb_batch {
struct mm_struct *mm;
unsigned long tlb_nr;
+ unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR];
};
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tsb_user(struct tlb_batch *tb);
+extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
/* TLB flush operations. */
-extern void flush_tlb_pending(void);
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define flush_tlb_range(vma,start,end) \
- do { (void)(start); flush_tlb_pending(); } while (0)
-#define flush_tlb_page(vma,addr) flush_tlb_pending()
-#define flush_tlb_mm(mm) flush_tlb_pending()
+extern void flush_tlb_pending(void);
+extern void arch_enter_lazy_mmu_mode(void);
+extern void arch_leave_lazy_mmu_mode(void);
+#define arch_flush_lazy_mmu_mode() do {} while (0)
/* Local cpu only. */
extern void __flush_tlb_all(void);
-
+extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \
} while (0)
+static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
+}
+
#else /* CONFIG_SMP */
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
#define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \
} while (0)
+#define global_flush_tlb_page(mm, vaddr) \
+ smp_flush_tlb_page(mm, vaddr)
+
#endif /* ! CONFIG_SMP */
#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/trunk/arch/sparc/include/uapi/asm/Kbuild b/trunk/arch/sparc/include/uapi/asm/Kbuild
index ce175aff71b7..b5843ee09fb5 100644
--- a/trunk/arch/sparc/include/uapi/asm/Kbuild
+++ b/trunk/arch/sparc/include/uapi/asm/Kbuild
@@ -44,7 +44,6 @@ header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += traps.h
-header-y += types.h
header-y += uctx.h
header-y += unistd.h
header-y += utrap.h
diff --git a/trunk/arch/sparc/include/uapi/asm/types.h b/trunk/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644
index 383d156cde9c..000000000000
--- a/trunk/arch/sparc/include/uapi/asm/types.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _SPARC_TYPES_H
-#define _SPARC_TYPES_H
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-
-#if defined(__sparc__)
-
-#include
-
-#endif /* defined(__sparc__) */
-
-#endif /* defined(_SPARC_TYPES_H) */
diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c
index 537eb66abd06..ca64d2a86ec0 100644
--- a/trunk/arch/sparc/kernel/smp_64.c
+++ b/trunk/arch/sparc/kernel/smp_64.c
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
}
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1074,56 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
put_cpu();
}
+struct tlb_pending_info {
+ unsigned long ctx;
+ unsigned long nr;
+ unsigned long *vaddrs;
+};
+
+static void tlb_pending_func(void *info)
+{
+ struct tlb_pending_info *t = info;
+
+ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
+}
+
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
+ struct tlb_pending_info info;
int cpu = get_cpu();
+ info.ctx = ctx;
+ info.nr = nr;
+ info.vaddrs = vaddrs;
+
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
- mm_cpumask(mm));
+ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
+ &info, 1);
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
}
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long context = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ else
+ smp_cross_call_masked(&xcall_flush_tlb_page,
+ context, vaddr, 0,
+ mm_cpumask(mm));
+ __flush_tlb_page(context, vaddr);
+
+ put_cpu();
+}
+
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
diff --git a/trunk/arch/sparc/lib/bitext.c b/trunk/arch/sparc/lib/bitext.c
index 48d00e72ce15..8ec4e9c0251a 100644
--- a/trunk/arch/sparc/lib/bitext.c
+++ b/trunk/arch/sparc/lib/bitext.c
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{
-
- if ((size & 07) != 0)
- BUG();
- memset(map, 0, size>>3);
-
+ bitmap_zero(map, size);
memset(t, 0, sizeof *t);
spin_lock_init(&t->lock);
t->map = map;
diff --git a/trunk/arch/sparc/mm/iommu.c b/trunk/arch/sparc/mm/iommu.c
index 0f4f7191fbba..28f96f27c768 100644
--- a/trunk/arch/sparc/mm/iommu.c
+++ b/trunk/arch/sparc/mm/iommu.c
@@ -34,7 +34,7 @@
#define IOMMU_RNGE IOMMU_RNGE_256MB
#define IOMMU_START 0xF0000000
#define IOMMU_WINSIZE (256*1024*1024U)
-#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
+#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
/* srmmu.c */
diff --git a/trunk/arch/sparc/mm/srmmu.c b/trunk/arch/sparc/mm/srmmu.c
index c38bb72e3e80..036c2797dece 100644
--- a/trunk/arch/sparc/mm/srmmu.c
+++ b/trunk/arch/sparc/mm/srmmu.c
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
- srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
+ srmmu_nocache_bitmap =
+ __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
diff --git a/trunk/arch/sparc/mm/tlb.c b/trunk/arch/sparc/mm/tlb.c
index ba6ae7ffdc2c..83d89bcb44af 100644
--- a/trunk/arch/sparc/mm/tlb.c
+++ b/trunk/arch/sparc/mm/tlb.c
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+ struct mm_struct *mm = tb->mm;
- if (tb->tlb_nr) {
- flush_tsb_user(tb);
+ if (!tb->tlb_nr)
+ goto out;
- if (CTX_VALID(tb->mm->context)) {
+ flush_tsb_user(tb);
+
+ if (CTX_VALID(mm->context)) {
+ if (tb->tlb_nr == 1) {
+ global_flush_tlb_page(mm, tb->vaddrs[0]);
+ } else {
#ifdef CONFIG_SMP
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
&tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
- tb->tlb_nr = 0;
}
+ tb->tlb_nr = 0;
+
+out:
put_cpu_var(tlb_batch);
}
+void arch_enter_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+ tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+
+ if (tb->tlb_nr)
+ flush_tlb_pending();
+ tb->active = 0;
+}
+
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec)
{
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0;
}
+ if (!tb->active) {
+ global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
+ goto out;
+ }
+
if (nr == 0)
tb->mm = mm;
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
+out:
put_cpu_var(tlb_batch);
}
diff --git a/trunk/arch/sparc/mm/tsb.c b/trunk/arch/sparc/mm/tsb.c
index 428982b9becf..2cc3bce5ee91 100644
--- a/trunk/arch/sparc/mm/tsb.c
+++ b/trunk/arch/sparc/mm/tsb.c
@@ -7,11 +7,10 @@
#include
#include
#include
-#include
-#include
-#include
#include
+#include
#include
+#include
#include
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
- unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+ unsigned long hash_shift,
+ unsigned long nentries)
{
- unsigned long i;
+ unsigned long tag, ent, hash;
- for (i = 0; i < tb->tlb_nr; i++) {
- unsigned long v = tb->vaddrs[i];
- unsigned long tag, ent, hash;
+ v &= ~0x1UL;
+ hash = tsb_hash(v, hash_shift, nentries);
+ ent = tsb + (hash * sizeof(struct tsb));
+ tag = (v >> 22UL);
- v &= ~0x1UL;
+ tsb_flush(ent, tag);
+}
- hash = tsb_hash(v, hash_shift, nentries);
- ent = tsb + (hash * sizeof(struct tsb));
- tag = (v >> 22UL);
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+ unsigned long tsb, unsigned long nentries)
+{
+ unsigned long i;
- tsb_flush(ent, tag);
- }
+ for (i = 0; i < tb->tlb_nr; i++)
+ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
}
void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_unlock_irqrestore(&mm->context.lock, flags);
}
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long nentries, base, flags;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+ }
+#endif
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
diff --git a/trunk/arch/sparc/mm/ultra.S b/trunk/arch/sparc/mm/ultra.S
index f8e13d421fcb..432aa0cb1b38 100644
--- a/trunk/arch/sparc/mm/ultra.S
+++ b/trunk/arch/sparc/mm/ultra.S
@@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */
nop
nop
+ .align 32
+ .globl __flush_tlb_page
+__flush_tlb_page: /* 22 insns */
+ /* %o0 = context, %o1 = vaddr */
+ rdpr %pstate, %g7
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, %pstate
+ mov SECONDARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ stxa %o0, [%o4] ASI_DMMU
+ andcc %o1, 1, %g0
+ andn %o1, 1, %o3
+ be,pn %icc, 1f
+ or %o3, 0x10, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g2, [%o4] ASI_DMMU
+ sethi %hi(KERNBASE), %o4
+ flush %o4
+ retl
+ wrpr %g7, 0x0, %pstate
+ nop
+ nop
+ nop
+ nop
+
.align 32
.globl __flush_tlb_pending
__flush_tlb_pending: /* 26 insns */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
retl
wrpr %g7, 0x0, %pstate
+__cheetah_flush_tlb_page: /* 22 insns */
+ /* %o0 = context, %o1 = vaddr */
+ rdpr %pstate, %g7
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
+ stxa %o0, [%o4] ASI_DMMU
+ andcc %o1, 1, %g0
+ be,pn %icc, 1f
+ andn %o1, 1, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+1: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ membar #Sync
+ stxa %g2, [%o4] ASI_DMMU
+ sethi %hi(KERNBASE), %o4
+ flush %o4
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g7, 0x0, %pstate
+
__cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
retl
nop
+__hypervisor_flush_tlb_page: /* 11 insns */
+ /* %o0 = context, %o1 = vaddr */
+ mov %o0, %g2
+ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
+ mov %g2, %o1 /* ARG1: mmu context */
+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
+ srlx %o0, PAGE_SHIFT, %o0
+ sllx %o0, PAGE_SHIFT, %o0
+ ta HV_MMU_UNMAP_ADDR_TRAP
+ brnz,pn %o0, __hypervisor_tlb_tl0_error
+ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
+ retl
+ nop
+
__hypervisor_flush_tlb_pending: /* 16 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
call tlb_patch_one
mov 19, %o2
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__cheetah_flush_tlb_page), %o1
+ or %o1, %lo(__cheetah_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 22, %o2
+
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
- .globl xcall_flush_tlb_pending
-xcall_flush_tlb_pending: /* 21 insns */
- /* %g5=context, %g1=nr, %g7=vaddrs[] */
- sllx %g1, 3, %g1
+ .globl xcall_flush_tlb_page
+xcall_flush_tlb_page: /* 17 insns */
+ /* %g5=context, %g1=vaddr */
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
or %g5, %g4, %g5
mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %g5
- andcc %g5, 0x1, %g0
+ andcc %g1, 0x1, %g0
be,pn %icc, 2f
-
- andn %g5, 0x1, %g5
+ andn %g1, 0x1, %g5
stxa %g0, [%g5] ASI_IMMU_DEMAP
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
membar #Sync
- brnz,pt %g1, 1b
- nop
stxa %g2, [%g4] ASI_DMMU
retry
nop
+ nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range: /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
membar #Sync
retry
- .globl __hypervisor_xcall_flush_tlb_pending
-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
- sllx %g1, 3, %g1
+ .globl __hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 17 insns */
+ /* %g5=ctx, %g1=vaddr */
mov %o0, %g2
mov %o1, %g3
mov %o2, %g4
-1: sub %g1, (1 << 3), %g1
- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
+ mov %g1, %o0 /* ARG0: virtual address */
mov %g5, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
srlx %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
mov %o0, %g5
- brnz,pt %g1, 1b
- nop
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 10, %o2
+ sethi %hi(__flush_tlb_page), %o0
+ or %o0, %lo(__flush_tlb_page), %o0
+ sethi %hi(__hypervisor_flush_tlb_page), %o1
+ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
+ call tlb_patch_one
+ mov 11, %o2
+
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 21, %o2
- sethi %hi(xcall_flush_tlb_pending), %o0
- or %o0, %lo(xcall_flush_tlb_pending), %o0
- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
+ sethi %hi(xcall_flush_tlb_page), %o0
+ or %o0, %lo(xcall_flush_tlb_page), %o0
+ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
+ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
call tlb_patch_one
- mov 21, %o2
+ mov 17, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/trunk/arch/tile/include/asm/irqflags.h b/trunk/arch/tile/include/asm/irqflags.h
index 241c0bb60b12..c96f9bbb760d 100644
--- a/trunk/arch/tile/include/asm/irqflags.h
+++ b/trunk/arch/tile/include/asm/irqflags.h
@@ -40,7 +40,15 @@
#include
#include
-/* Set and clear kernel interrupt masks. */
+/*
+ * Set and clear kernel interrupt masks.
+ *
+ * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
+ * clobber. We rely on it being equivalent to a compiler barrier in
+ * this code since arch_local_irq_save() and friends must act as
+ * compiler barriers. This compiler semantic is baked into enough
+ * places that the compiler will maintain it going forward.
+ */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
diff --git a/trunk/arch/tile/kernel/setup.c b/trunk/arch/tile/kernel/setup.c
index d1e15f7b59c6..7a5aa1a7864e 100644
--- a/trunk/arch/tile/kernel/setup.c
+++ b/trunk/arch/tile/kernel/setup.c
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD
-/*
- * Note that the kernel can potentially support other compression
- * techniques than gz, though we don't do so by default. If we ever
- * decide to do so we can either look for other filename extensions,
- * or just allow a file with this name to be compressed with an
- * arbitrary compressor (somewhat counterintuitively).
- */
static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
+static char __initdata initramfs_file[128] = "initramfs";
static int __init setup_initramfs_file(char *str)
{
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file);
/*
- * We look for an "initramfs.cpio.gz" file in the hvfs.
- * If there is one, we allocate some memory for it and it will be
- * unpacked to the initramfs.
+ * We look for a file called "initramfs" in the hvfs. If there is one, we
+ * allocate some memory for it and it will be unpacked to the initramfs.
+ * If it's compressed, the initd code will uncompress it first.
*/
static void __init load_hv_initrd(void)
{
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
if (fd == HV_ENOENT) {
- if (set_initramfs_file)
+ if (set_initramfs_file) {
pr_warning("No such hvfs initramfs file '%s'\n",
initramfs_file);
- return;
+ return;
+ } else {
+ /* Try old backwards-compatible name. */
+ fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
+ if (fd == HV_ENOENT)
+ return;
+ }
}
BUG_ON(fd < 0);
stat = hv_fs_fstat(fd);
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 70c0f3da0476..15b5cef4aa38 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -1549,6 +1549,7 @@ config X86_SMAP
config EFI
bool "EFI runtime service support"
depends on ACPI
+ select UCS2_STRING
---help---
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
diff --git a/trunk/arch/x86/boot/compressed/Makefile b/trunk/arch/x86/boot/compressed/Makefile
index 8a84501acb1b..5ef205c5f37b 100644
--- a/trunk/arch/x86/boot/compressed/Makefile
+++ b/trunk/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
# create a compressed vmlinux image from the original vmlinux
#
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
+targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/piggy.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
-$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
ifeq ($(CONFIG_EFI_STUB), y)
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-targets += vmlinux.bin.all vmlinux.relocs
+targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c
index c205035a6b96..35ee62fccf98 100644
--- a/trunk/arch/x86/boot/compressed/eboot.c
+++ b/trunk/arch/x86/boot/compressed/eboot.c
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
+static efi_status_t setup_efi_vars(struct boot_params *params)
+{
+ struct setup_data *data;
+ struct efi_var_bootdata *efidata;
+ u64 store_size, remaining_size, var_size;
+ efi_status_t status;
+
+ if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+ while (data && data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+
+ status = efi_call_phys4((void *)sys_table->runtime->query_variable_info,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
+ &remaining_size, &var_size);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_phys3(sys_table->boottime->allocate_pool,
+ EFI_LOADER_DATA, sizeof(*efidata), &efidata);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ efidata->data.type = SETUP_EFI_VARS;
+ efidata->data.len = sizeof(struct efi_var_bootdata) -
+ sizeof(struct setup_data);
+ efidata->data.next = 0;
+ efidata->store_size = store_size;
+ efidata->remaining_size = remaining_size;
+ efidata->max_var_size = var_size;
+
+ if (data)
+ data->next = (unsigned long)efidata;
+ else
+ params->hdr.setup_data = (unsigned long)efidata;
+
+}
+
static efi_status_t setup_efi_pci(struct boot_params *params)
{
efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
+ setup_efi_vars(boot_params);
+
setup_efi_pci(boot_params);
status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h
index 60c89f30c727..2fb5d5884e23 100644
--- a/trunk/arch/x86/include/asm/efi.h
+++ b/trunk/arch/x86/include/asm/efi.h
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
+struct efi_var_bootdata {
+ struct setup_data data;
+ u64 store_size;
+ u64 remaining_size;
+ u64 max_var_size;
+};
+
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h
index 5edd1742cfd0..7361e47db79f 100644
--- a/trunk/arch/x86/include/asm/paravirt.h
+++ b/trunk/arch/x86/include/asm/paravirt.h
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-void arch_flush_lazy_mmu_mode(void);
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h
index 142236ed83af..b3b0ec1dac86 100644
--- a/trunk/arch/x86/include/asm/paravirt_types.h
+++ b/trunk/arch/x86/include/asm/paravirt_types.h
@@ -91,6 +91,7 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
+ void (*flush)(void);
};
struct pv_time_ops {
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/trunk/arch/x86/include/asm/syscall.h b/trunk/arch/x86/include/asm/syscall.h
index 1ace47b62592..2e188d68397c 100644
--- a/trunk/arch/x86/include/asm/syscall.h
+++ b/trunk/arch/x86/include/asm/syscall.h
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
*/
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- return regs->orig_ax & __SYSCALL_MASK;
+ return regs->orig_ax;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
- regs->ax = regs->orig_ax & __SYSCALL_MASK;
+ regs->ax = regs->orig_ax;
}
static inline long syscall_get_error(struct task_struct *task,
diff --git a/trunk/arch/x86/include/asm/tlb.h b/trunk/arch/x86/include/asm/tlb.h
index 4fef20773b8f..c7797307fc2b 100644
--- a/trunk/arch/x86/include/asm/tlb.h
+++ b/trunk/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
#define tlb_flush(tlb) \
{ \
- if (tlb->fullmm == 0) \
+ if (!tlb->fullmm && !tlb->need_flush_all) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/trunk/arch/x86/include/uapi/asm/bootparam.h b/trunk/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf90710..08744242b8d2 100644
--- a/trunk/arch/x86/include/uapi/asm/bootparam.h
+++ b/trunk/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
+#define SETUP_EFI_VARS 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/trunk/arch/x86/kernel/cpu/mshyperv.c b/trunk/arch/x86/kernel/cpu/mshyperv.c
index a7d26d83fb70..8f4be53ea04b 100644
--- a/trunk/arch/x86/kernel/cpu/mshyperv.c
+++ b/trunk/arch/x86/kernel/cpu/mshyperv.c
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
- /*
- * Xen emulates Hyper-V to support enlightened Windows.
- * Check to see first if we are on a Xen Hypervisor.
- */
- if (xen_cpuid_base())
- return false;
-
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
-#if IS_ENABLED(CONFIG_HYPERV)
- /*
- * Setup the IDT for hypervisor callback.
- */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-#endif
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
void hv_register_vmbus_handler(int irq, irq_handler_t handler)
{
+ /*
+ * Setup the IDT for hypervisor callback.
+ */
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+
vmbus_irq = irq;
vmbus_isr = handler;
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index dab7580c47ae..cc45deb791b0 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
};
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
+ EVENT_EXTRA_END
+};
+
+static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
EVENT_EXTRA_END
};
@@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ if (boot_cpu_data.x86_model == 45)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
@@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ if (boot_cpu_data.x86_model == 62)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b05a575d56f4..26830f3af0df 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at)
return 0;
+ memset(®s, 0, sizeof(regs));
+
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period);
- regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/trunk/arch/x86/kernel/microcode_core_early.c b/trunk/arch/x86/kernel/microcode_core_early.c
index 577db8417d15..833d51d6ee06 100644
--- a/trunk/arch/x86/kernel/microcode_core_early.c
+++ b/trunk/arch/x86/kernel/microcode_core_early.c
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
u32 eax = 0x00000000;
u32 ebx, ecx = 0, edx;
- if (!have_cpuid_p())
- return X86_VENDOR_UNKNOWN;
-
native_cpuid(&eax, &ebx, &ecx, &edx);
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
return X86_VENDOR_UNKNOWN;
}
+static int __cpuinit x86_family(void)
+{
+ u32 eax = 0x00000001;
+ u32 ebx, ecx = 0, edx;
+ int x86;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ x86 = (eax >> 8) & 0xf;
+ if (x86 == 15)
+ x86 += (eax >> 20) & 0xff;
+
+ return x86;
+}
+
void __init load_ucode_bsp(void)
{
- int vendor = x86_vendor();
+ int vendor, x86;
+
+ if (!have_cpuid_p())
+ return;
- if (vendor == X86_VENDOR_INTEL)
+ vendor = x86_vendor();
+ x86 = x86_family();
+
+ if (vendor == X86_VENDOR_INTEL && x86 >= 6)
load_ucode_intel_bsp();
}
void __cpuinit load_ucode_ap(void)
{
- int vendor = x86_vendor();
+ int vendor, x86;
+
+ if (!have_cpuid_p())
+ return;
+
+ vendor = x86_vendor();
+ x86 = x86_family();
- if (vendor == X86_VENDOR_INTEL)
+ if (vendor == X86_VENDOR_INTEL && x86 >= 6)
load_ucode_intel_ap();
}
diff --git a/trunk/arch/x86/kernel/paravirt.c b/trunk/arch/x86/kernel/paravirt.c
index 17fff18a1031..8bfb335f74bb 100644
--- a/trunk/arch/x86/kernel/paravirt.c
+++ b/trunk/arch/x86/kernel/paravirt.c
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
+void paravirt_flush_lazy_mmu(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
-void arch_flush_lazy_mmu_mode(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
+ .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c
index 90d8cc930f5e..fae9134a2de9 100644
--- a/trunk/arch/x86/kernel/setup.c
+++ b/trunk/arch/x86/kernel/setup.c
@@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
+ * On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
#else
-# define CRASH_KERNEL_ADDR_MAX MAXMEM
+# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
+# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
#endif
static void __init reserve_crashkernel_low(void)
@@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
unsigned long long low_base = 0, low_size = 0;
unsigned long total_low_mem;
unsigned long long base;
+ bool auto_set = false;
int ret;
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
+ /* crashkernel=Y,low */
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
&low_size, &base);
- if (ret != 0 || low_size <= 0)
- return;
+ if (ret != 0) {
+ /*
+ * two parts from lib/swiotlb.c:
+ * swiotlb size: user specified with swiotlb= or default.
+ * swiotlb overflow buffer: now is hardcoded to 32k.
+ * We round it to 8M for other buffers that
+ * may need to stay low too.
+ */
+ low_size = swiotlb_size_or_default() + (8UL<<20);
+ auto_set = true;
+ } else {
+ /* passed with crashkernel=0,low ? */
+ if (!low_size)
+ return;
+ }
low_base = memblock_find_in_range(low_size, (1ULL<<32),
low_size, alignment);
if (!low_base) {
- pr_info("crashkernel low reservation failed - No suitable area found.\n");
+ if (!auto_set)
+ pr_info("crashkernel low reservation failed - No suitable area found.\n");
return;
}
@@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
+ bool high = false;
int ret;
total_mem = memblock_phys_mem_size();
+ /* crashkernel=XM */
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
- if (ret != 0 || crash_size <= 0)
- return;
+ if (ret != 0 || crash_size <= 0) {
+ /* crashkernel=X,high */
+ ret = parse_crashkernel_high(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+ high = true;
+ }
/* 0 means: find the address automatically */
if (crash_base <= 0) {
@@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
- CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
+ high ? CRASH_KERNEL_ADDR_HIGH_MAX :
+ CRASH_KERNEL_ADDR_LOW_MAX,
+ crash_size, alignment);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
diff --git a/trunk/arch/x86/kvm/lapic.c b/trunk/arch/x86/kvm/lapic.c
index 02b51dd4e4ad..f77df1c5de6e 100644
--- a/trunk/arch/x86/kvm/lapic.c
+++ b/trunk/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr);
+ addr, sizeof(u8));
}
void kvm_lapic_init(void)
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index f19ac0aca60d..e1721324c271 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+ sizeof(u32)))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
gpa_offset = data & ~(PAGE_MASK | 1);
- /* Check that the address is 32-byte aligned. */
- if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
- break;
-
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
- &vcpu->arch.pv_time, data & ~1ULL))
+ &vcpu->arch.pv_time, data & ~1ULL,
+ sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS))
+ data & KVM_STEAL_VALID_BITS,
+ sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c
index 1cbd89ca5569..7114c63f047d 100644
--- a/trunk/arch/x86/lguest/boot.c
+++ b/trunk/arch/x86/lguest/boot.c
@@ -1334,6 +1334,7 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
+ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 2b97525246d4..0e883364abb5 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd))
+ if (pgd_none(*pgd)) {
set_pgd(pgd, *pgd_ref);
- else
+ arch_flush_lazy_mmu_mode();
+ } else {
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/trunk/arch/x86/mm/pageattr-test.c b/trunk/arch/x86/mm/pageattr-test.c
index b0086567271c..0e38951e65eb 100644
--- a/trunk/arch/x86/mm/pageattr-test.c
+++ b/trunk/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
s->gpg++;
i += GPS/PAGE_SIZE;
} else if (level == PG_LEVEL_2M) {
- if (!(pte_val(*pte) & _PAGE_PSE)) {
+ if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
addr, level, (u64)pte_val(*pte));
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index 091934e1d0d9..fb4e73ec24d8 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* We are safe now. Check whether the new pgprot is the same:
*/
old_pte = *kpte;
- old_prot = new_prot = req_prot = pte_pgprot(old_pte);
+ old_prot = req_prot = pte_pgprot(old_pte);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
* for the ancient hardware that doesn't support it.
*/
- if (pgprot_val(new_prot) & _PAGE_PRESENT)
- pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ if (pgprot_val(req_prot) & _PAGE_PRESENT)
+ pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+ pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
- new_prot = canon_pgprot(new_prot);
+ req_prot = canon_pgprot(req_prot);
/*
* old_pte points to the large page base address. So we need
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
+
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_HIBERNATION
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 193350b51f90..17fda6a8b3c2 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
+ /*
+ * NOTE! For PAE, any changes to the top page-directory-pointer-table
+ * entries need a full cr3 reload to flush.
+ */
+#ifdef CONFIG_X86_PAE
+ tlb->need_flush_all = 1;
+#endif
tlb_remove_page(tlb, virt_to_page(pmd));
}
diff --git a/trunk/arch/x86/pci/common.c b/trunk/arch/x86/pci/common.c
index 901177d75ff5..305c68b8d538 100644
--- a/trunk/arch/x86/pci/common.c
+++ b/trunk/arch/x86/pci/common.c
@@ -6,6 +6,7 @@
#include
#include
+#include
#include
#include
#include
@@ -170,6 +171,16 @@ void pcibios_fixup_bus(struct pci_bus *b)
pcibios_fixup_device_resources(dev);
}
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
/*
* Only use DMI information to set this if nothing was passed
* on the kernel command line (which was parsed earlier).
diff --git a/trunk/arch/x86/pci/xen.c b/trunk/arch/x86/pci/xen.c
index 94e76620460f..4a9be6ddf054 100644
--- a/trunk/arch/x86/pci/xen.c
+++ b/trunk/arch/x86/pci/xen.c
@@ -177,7 +177,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
goto error;
i = 0;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSIX) ?
"pcifront-msi-x" :
"pcifront-msi",
@@ -244,7 +244,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
dev_dbg(&dev->dev,
"xen: msi already bound to pirq=%d\n", pirq);
}
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
DOMID_SELF);
@@ -326,7 +326,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
}
ret = xen_bind_pirq_msi_to_irq(dev, msidesc,
- map_irq.pirq, map_irq.index,
+ map_irq.pirq,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
domid);
diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c
index 5f2ecaf3f9d8..e4a86a677ce1 100644
--- a/trunk/arch/x86/platform/efi/efi.c
+++ b/trunk/arch/x86/platform/efi/efi.c
@@ -41,6 +41,7 @@
#include
#include
#include
+#include
#include
#include
@@ -51,6 +52,13 @@
#define EFI_DEBUG 1
+/*
+ * There's some additional metadata associated with each
+ * variable. Intel's reference implementation is 60 bytes - bump that
+ * to account for potential alignment constraints
+ */
+#define VAR_METADATA_SIZE 64
+
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
.acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +77,13 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
+static u64 efi_var_store_size;
+static u64 efi_var_remaining_size;
+static u64 efi_var_max_var_size;
+static u64 boot_used_size;
+static u64 boot_var_size;
+static u64 active_size;
+
unsigned long x86_efi_facility;
/*
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
}
early_param("add_efi_memmap", setup_add_efi_memmap);
+static bool efi_no_storage_paranoia;
+
+static int __init setup_storage_paranoia(char *arg)
+{
+ efi_no_storage_paranoia = true;
+ return 0;
+}
+early_param("efi_no_storage_paranoia", setup_storage_paranoia);
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- return efi_call_virt3(get_next_variable,
- name_size, name, vendor);
+ efi_status_t status;
+ static bool finished = false;
+ static u64 var_size;
+
+ status = efi_call_virt3(get_next_variable,
+ name_size, name, vendor);
+
+ if (status == EFI_NOT_FOUND) {
+ finished = true;
+ if (var_size < boot_used_size) {
+ boot_var_size = boot_used_size - var_size;
+ active_size += boot_var_size;
+ } else {
+ printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
+ }
+ }
+
+ if (boot_used_size && !finished) {
+ unsigned long size;
+ u32 attr;
+ efi_status_t s;
+ void *tmp;
+
+ s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
+
+ if (s != EFI_BUFFER_TOO_SMALL || !size)
+ return status;
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+
+ if (!tmp)
+ return status;
+
+ s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
+
+ if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
+ var_size += size;
+ var_size += ucs2_strsize(name, 1024);
+ active_size += size;
+ active_size += VAR_METADATA_SIZE;
+ active_size += ucs2_strsize(name, 1024);
+ }
+
+ kfree(tmp);
+ }
+
+ return status;
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- return efi_call_virt5(set_variable,
- name, vendor, attr,
- data_size, data);
+ efi_status_t status;
+ u32 orig_attr = 0;
+ unsigned long orig_size = 0;
+
+ status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
+ NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL)
+ orig_size = 0;
+
+ status = efi_call_virt5(set_variable,
+ name, vendor, attr,
+ data_size, data);
+
+ if (status == EFI_SUCCESS) {
+ if (orig_size) {
+ active_size -= orig_size;
+ active_size -= ucs2_strsize(name, 1024);
+ active_size -= VAR_METADATA_SIZE;
+ }
+ if (data_size) {
+ active_size += data_size;
+ active_size += ucs2_strsize(name, 1024);
+ active_size += VAR_METADATA_SIZE;
+ }
+ }
+
+ return status;
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -682,6 +776,9 @@ void __init efi_init(void)
char vendor[100] = "unknown";
int i = 0;
void *tmp;
+ struct setup_data *data;
+ struct efi_var_bootdata *efi_var_data;
+ u64 pa_data;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +796,22 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = early_ioremap(pa_data, sizeof(*efi_var_data));
+ if (data->type == SETUP_EFI_VARS) {
+ efi_var_data = (struct efi_var_bootdata *)data;
+
+ efi_var_store_size = efi_var_data->store_size;
+ efi_var_remaining_size = efi_var_data->remaining_size;
+ efi_var_max_var_size = efi_var_data->max_var_size;
+ }
+ pa_data = data->next;
+ early_iounmap(data, sizeof(*efi_var_data));
+ }
+
+ boot_used_size = efi_var_store_size - efi_var_remaining_size;
+
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
/*
@@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
}
return 0;
}
+
+/*
+ * Some firmware has serious problems when using more than 50% of the EFI
+ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
+ * we never use more than this safe limit.
+ *
+ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
+ * store.
+ */
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (!max_size && remaining_size > size)
+ printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+ " is returning MaxVariableSize=0\n");
+ /*
+ * Some firmware implementations refuse to boot if there's insufficient
+ * space in the variable store. We account for that by refusing the
+ * write if permitting it would reduce the available space to under
+ * 50%. However, some firmware won't reclaim variable space until
+ * after the used (not merely the actively used) space drops below
+ * a threshold. We can approximate that case with the value calculated
+ * above. If both the firmware and our calculations indicate that the
+ * available space would drop below 50%, refuse the write.
+ */
+
+ if (!storage_size || size > remaining_size ||
+ (max_size && size > max_size))
+ return EFI_OUT_OF_RESOURCES;
+
+ if (!efi_no_storage_paranoia &&
+ ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
+ (remaining_size - size < storage_size / 2)))
+ return EFI_OUT_OF_RESOURCES;
+
+ return EFI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(efi_query_variable_store);
diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c
index c8e1c7b95c3b..ddbd54a9b845 100644
--- a/trunk/arch/x86/xen/enlighten.c
+++ b/trunk/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
#include
#include
#include
+#include
#include
#include
@@ -1306,6 +1307,55 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static void __init xen_boot_params_init_edd(void)
+{
+#if IS_ENABLED(CONFIG_EDD)
+ struct xen_platform_op op;
+ struct edd_info *edd_info;
+ u32 *mbr_signature;
+ unsigned nr;
+ int ret;
+
+ edd_info = boot_params.eddbuf;
+ mbr_signature = boot_params.edd_mbr_sig_buffer;
+
+ op.cmd = XENPF_firmware_info;
+
+ op.u.firmware_info.type = XEN_FW_DISK_INFO;
+ for (nr = 0; nr < EDDMAXNR; nr++) {
+ struct edd_info *info = edd_info + nr;
+
+ op.u.firmware_info.index = nr;
+ info->params.length = sizeof(info->params);
+ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
+ &info->params);
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+
+#define C(x) info->x = op.u.firmware_info.u.disk_info.x
+ C(device);
+ C(version);
+ C(interface_support);
+ C(legacy_max_cylinder);
+ C(legacy_max_head);
+ C(legacy_sectors_per_track);
+#undef C
+ }
+ boot_params.eddbuf_entries = nr;
+
+ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
+ for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
+ op.u.firmware_info.index = nr;
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ break;
+ mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
+ }
+ boot_params.edd_mbr_sig_buf_entries = nr;
+#endif
+}
+
/*
* Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected
@@ -1508,6 +1558,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
+ xen_boot_params_init_edd();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
@@ -1589,8 +1641,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
xen_vcpu_setup(cpu);
- if (xen_have_vector_callback)
+ if (xen_have_vector_callback) {
xen_init_lock_cpu(cpu);
+ if (xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+ }
break;
default:
break;
diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c
index 6afbb2ca9a0a..e006c18d288a 100644
--- a/trunk/arch/x86/xen/mmu.c
+++ b/trunk/arch/x86/xen/mmu.c
@@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr)
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot(void *addr, pgprot_t prot)
+static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
+static void set_page_prot(void *addr, pgprot_t prot)
+{
+ return set_page_prot_flags(addr, prot, UVMF_NONE);
+}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
@@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot((void *)addr, PAGE_KERNEL);
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot((void *)addr, PAGE_KERNEL);
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
clear_page((void *)addr);
(*pt_end)--;
}
@@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
+ .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c
index 09ea61d2e02f..0d466d7c7175 100644
--- a/trunk/arch/x86/xen/smp.c
+++ b/trunk/arch/x86/xen/smp.c
@@ -144,6 +144,13 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
+ /*
+ * The IRQ worker on PVHVM goes through the native path and uses the
+ * IPI mechanism.
+ */
+ if (xen_hvm_domain())
+ return 0;
+
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
cpu,
@@ -167,6 +174,9 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
+ if (xen_hvm_domain())
+ return rc;
+
if (per_cpu(xen_irq_work, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
@@ -418,7 +428,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu)
{
- while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
+ while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
@@ -426,7 +436,8 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ if (!xen_hvm_domain())
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
}
@@ -657,11 +668,7 @@ static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
static void xen_hvm_cpu_die(unsigned int cpu)
{
- unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
+ xen_cpu_die(cpu);
native_cpu_die(cpu);
}
diff --git a/trunk/arch/x86/xen/spinlock.c b/trunk/arch/x86/xen/spinlock.c
index f7a080ef0354..8b54603ce816 100644
--- a/trunk/arch/x86/xen/spinlock.c
+++ b/trunk/arch/x86/xen/spinlock.c
@@ -364,6 +364,16 @@ void __cpuinit xen_init_lock_cpu(int cpu)
int irq;
const char *name;
+ WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+ cpu, per_cpu(lock_kicker_irq, cpu));
+
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
cpu,
@@ -382,11 +392,26 @@ void __cpuinit xen_init_lock_cpu(int cpu)
void xen_uninit_lock_cpu(int cpu)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
+ per_cpu(lock_kicker_irq, cpu) = -1;
}
void __init xen_init_spinlocks(void)
{
+ /*
+ * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
+ * (xen: disable PV spinlocks on HVM)
+ */
+ if (xen_hvm_domain())
+ return;
+
BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
pv_lock_ops.spin_is_locked = xen_spin_is_locked;
diff --git a/trunk/arch/x86/xen/time.c b/trunk/arch/x86/xen/time.c
index 0296a9522501..3d88bfdf9e1c 100644
--- a/trunk/arch/x86/xen/time.c
+++ b/trunk/arch/x86/xen/time.c
@@ -377,7 +377,7 @@ static const struct clock_event_device xen_vcpuop_clockevent = {
static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent;
-static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events) = { .irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
@@ -401,6 +401,9 @@ void xen_setup_timer(int cpu)
struct clock_event_device *evt;
int irq;
+ evt = &per_cpu(xen_clock_events, cpu);
+ WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
+
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu);
@@ -413,7 +416,6 @@ void xen_setup_timer(int cpu)
IRQF_FORCE_RESUME,
name, NULL);
- evt = &per_cpu(xen_clock_events, cpu);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
@@ -426,6 +428,7 @@ void xen_teardown_timer(int cpu)
BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu);
unbind_from_irqhandler(evt->irq, NULL);
+ evt->irq = -1;
}
void xen_setup_cpu_clockevents(void)
@@ -497,7 +500,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
- xen_setup_timer(cpu);
+ /*
+ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
+ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
+ * early bootup and also during CPU hotplug events).
+ */
xen_setup_cpu_clockevents();
}
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 074b758efc42..7c288358a745 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -39,6 +39,7 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c
index 6206a934eb8c..5efc5a647183 100644
--- a/trunk/block/blk-sysfs.c
+++ b/trunk/block/blk-sysfs.c
@@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
+ if (ret < 0) \
+ return ret; \
if (neg) \
val = !val; \
\
diff --git a/trunk/block/partition-generic.c b/trunk/block/partition-generic.c
index ae95ee6a58aa..789cdea05893 100644
--- a/trunk/block/partition-generic.c
+++ b/trunk/block/partition-generic.c
@@ -257,7 +257,6 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
-EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/trunk/crypto/algif_hash.c b/trunk/crypto/algif_hash.c
index ef5356cd280a..0262210cad38 100644
--- a/trunk/crypto/algif_hash.c
+++ b/trunk/crypto/algif_hash.c
@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
+ msg->msg_namelen = 0;
+
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
diff --git a/trunk/crypto/algif_skcipher.c b/trunk/crypto/algif_skcipher.c
index 6a6dfc062d2a..a1c4f0a55583 100644
--- a/trunk/crypto/algif_skcipher.c
+++ b/trunk/crypto/algif_skcipher.c
@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
long copied = 0;
lock_sock(sk);
+ msg->msg_namelen = 0;
for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
diff --git a/trunk/crypto/gcm.c b/trunk/crypto/gcm.c
index 137ad1ec5438..13ccbda34ff9 100644
--- a/trunk/crypto/gcm.c
+++ b/trunk/crypto/gcm.c
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
struct crypto_rfc4543_req_ctx {
u8 auth_tag[16];
+ u8 assocbuf[32];
struct scatterlist cipher[1];
struct scatterlist payload[2];
struct scatterlist assoc[2];
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
- sg_init_table(assoc, 2);
- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
- req->assoc->offset);
+ if (req->assoc->length == req->assoclen) {
+ sg_init_table(assoc, 2);
+ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+ req->assoc->offset);
+ } else {
+ BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
+
+ scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
+ req->assoclen, 0);
+
+ sg_init_table(assoc, 2);
+ sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
+ }
scatterwalk_crypto_chain(assoc, payload, 0, 2);
aead_request_set_tfm(subreq, ctx->child);
diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig
index 92ed9692c47e..4bf68c8d4797 100644
--- a/trunk/drivers/acpi/Kconfig
+++ b/trunk/drivers/acpi/Kconfig
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI
+ depends on EFI && X86
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
diff --git a/trunk/drivers/acpi/acpi_i2c.c b/trunk/drivers/acpi/acpi_i2c.c
index 82045e3f5cac..a82c7626aa9b 100644
--- a/trunk/drivers/acpi/acpi_i2c.c
+++ b/trunk/drivers/acpi/acpi_i2c.c
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
acpi_handle handle;
acpi_status status;
- handle = ACPI_HANDLE(&adapter->dev);
+ handle = ACPI_HANDLE(adapter->dev.parent);
if (!handle)
return;
diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c
index 5ff173066127..ac8688b89705 100644
--- a/trunk/drivers/acpi/pci_root.c
+++ b/trunk/drivers/acpi/pci_root.c
@@ -65,44 +65,12 @@ static struct acpi_scan_handler pci_root_handler = {
.detach = acpi_pci_root_remove,
};
-/* Lock to protect both acpi_pci_roots and acpi_pci_drivers lists */
+/* Lock to protect both acpi_pci_roots lists */
static DEFINE_MUTEX(acpi_pci_root_lock);
static LIST_HEAD(acpi_pci_roots);
-static LIST_HEAD(acpi_pci_drivers);
static DEFINE_MUTEX(osc_lock);
-int acpi_pci_register_driver(struct acpi_pci_driver *driver)
-{
- int n = 0;
- struct acpi_pci_root *root;
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&driver->node, &acpi_pci_drivers);
- if (driver->add)
- list_for_each_entry(root, &acpi_pci_roots, node) {
- driver->add(root);
- n++;
- }
- mutex_unlock(&acpi_pci_root_lock);
-
- return n;
-}
-EXPORT_SYMBOL(acpi_pci_register_driver);
-
-void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
-{
- struct acpi_pci_root *root;
-
- mutex_lock(&acpi_pci_root_lock);
- list_del(&driver->node);
- if (driver->remove)
- list_for_each_entry(root, &acpi_pci_roots, node)
- driver->remove(root);
- mutex_unlock(&acpi_pci_root_lock);
-}
-EXPORT_SYMBOL(acpi_pci_unregister_driver);
-
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle - the ACPI CA node in question.
@@ -413,9 +381,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_status status;
int result;
struct acpi_pci_root *root;
- struct acpi_pci_driver *driver;
u32 flags, base_flags;
- bool is_osc_granted = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -476,6 +442,30 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ mutex_lock(&acpi_pci_root_lock);
+ list_add_tail(&root->node, &acpi_pci_roots);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ printk(KERN_ERR PREFIX
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto out_del_root;
+ }
+
/* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
@@ -494,6 +484,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags;
}
}
+
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
@@ -514,54 +505,28 @@ static int acpi_pci_root_add(struct acpi_device *device,
status = acpi_pci_osc_control_set(device->handle, &flags,
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_SUCCESS(status)) {
- is_osc_granted = true;
dev_info(&device->dev,
"ACPI _OSC control (0x%02x) granted\n", flags);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ pcie_clear_aspm(root->bus);
+ }
} else {
- is_osc_granted = false;
dev_info(&device->dev,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
+ pr_info("ACPI _OSC control for PCIe not granted, "
+ "disabling ASPM\n");
+ pcie_no_aspm();
}
} else {
dev_info(&device->dev,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
- }
-
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&root->node, &acpi_pci_roots);
- mutex_unlock(&acpi_pci_root_lock);
-
- /*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- printk(KERN_ERR PREFIX
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto out_del_root;
- }
-
- /* ASPM setting */
- if (is_osc_granted) {
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
- pcie_clear_aspm(root->bus);
- } else {
- pr_info("ACPI _OSC control for PCIe not granted, "
- "disabling ASPM\n");
- pcie_no_aspm();
+ "Unable to request _OSC control "
+ "(_OSC support mask: 0x%02x)\n", flags);
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
@@ -573,12 +538,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_assign_unassigned_bus_resources(root->bus);
}
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry(driver, &acpi_pci_drivers, node)
- if (driver->add)
- driver->add(root);
- mutex_unlock(&acpi_pci_root_lock);
-
/* need to after hot-added ioapic is registered */
if (system_state != SYSTEM_BOOTING)
pci_enable_bridges(root->bus);
@@ -599,16 +558,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
static void acpi_pci_root_remove(struct acpi_device *device)
{
struct acpi_pci_root *root = acpi_driver_data(device);
- struct acpi_pci_driver *driver;
pci_stop_root_bus(root->bus);
- mutex_lock(&acpi_pci_root_lock);
- list_for_each_entry_reverse(driver, &acpi_pci_drivers, node)
- if (driver->remove)
- driver->remove(root);
- mutex_unlock(&acpi_pci_root_lock);
-
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
diff --git a/trunk/drivers/acpi/pci_slot.c b/trunk/drivers/acpi/pci_slot.c
index cd1434eb1de8..033d1179bdb5 100644
--- a/trunk/drivers/acpi/pci_slot.c
+++ b/trunk/drivers/acpi/pci_slot.c
@@ -9,6 +9,9 @@
* Copyright (C) 2007-2008 Hewlett-Packard Development Company, L.P.
* Alex Chiang
*
+ * Copyright (C) 2013 Huawei Tech. Co., Ltd.
+ * Jiang Liu
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -28,10 +31,9 @@
#include
#include
#include
+#include
#include
#include
-#include
-#include
#include
static bool debug;
@@ -61,20 +63,12 @@ ACPI_MODULE_NAME("pci_slot");
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
- acpi_handle root_handle; /* handle of the root bridge */
struct pci_slot *pci_slot; /* corresponding pci_slot */
struct list_head list; /* node in the list of slots */
};
-static int acpi_pci_slot_add(struct acpi_pci_root *root);
-static void acpi_pci_slot_remove(struct acpi_pci_root *root);
-
static LIST_HEAD(slot_list);
static DEFINE_MUTEX(slot_list_lock);
-static struct acpi_pci_driver acpi_pci_slot_driver = {
- .add = acpi_pci_slot_add,
- .remove = acpi_pci_slot_remove,
-};
static int
check_slot(acpi_handle handle, unsigned long long *sun)
@@ -113,21 +107,8 @@ check_slot(acpi_handle handle, unsigned long long *sun)
return device;
}
-struct callback_args {
- acpi_walk_callback user_function; /* only for walk_p2p_bridge */
- struct pci_bus *pci_bus;
- acpi_handle root_handle;
-};
-
/*
- * register_slot
- *
- * Called once for each SxFy object in the namespace. Don't worry about
- * calling pci_create_slot multiple times for the same pci_bus:device,
- * since each subsequent call simply bumps the refcount on the pci_slot.
- *
- * The number of calls to pci_destroy_slot from unregister_slot is
- * symmetrical.
+ * Check whether handle has an associated slot and create PCI slot if it has.
*/
static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -137,13 +118,22 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
char name[SLOT_NAME_SIZE];
struct acpi_pci_slot *slot;
struct pci_slot *pci_slot;
- struct callback_args *parent_context = context;
- struct pci_bus *pci_bus = parent_context->pci_bus;
+ struct pci_bus *pci_bus = context;
device = check_slot(handle, &sun);
if (device < 0)
return AE_OK;
+ /*
+ * There may be multiple PCI functions associated with the same slot.
+ * Check whether PCI slot has already been created for this PCI device.
+ */
+ list_for_each_entry(slot, &slot_list, list) {
+ pci_slot = slot->pci_slot;
+ if (pci_slot->bus == pci_bus && pci_slot->number == device)
+ return AE_OK;
+ }
+
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
err("%s: cannot allocate memory\n", __func__);
@@ -158,12 +148,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
- slot->root_handle = parent_context->root_handle;
slot->pci_slot = pci_slot;
- INIT_LIST_HEAD(&slot->list);
- mutex_lock(&slot_list_lock);
list_add(&slot->list, &slot_list);
- mutex_unlock(&slot_list_lock);
get_device(&pci_bus->dev);
@@ -173,131 +159,24 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-/*
- * walk_p2p_bridge - discover and walk p2p bridges
- * @handle: points to an acpi_pci_root
- * @context: p2p_bridge_context pointer
- *
- * Note that when we call ourselves recursively, we pass a different
- * value of pci_bus in the child_context.
- */
-static acpi_status
-walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- int device, function;
- unsigned long long adr;
- acpi_status status;
- acpi_handle dummy_handle;
- acpi_walk_callback user_function;
-
- struct pci_dev *dev;
- struct pci_bus *pci_bus;
- struct callback_args child_context;
- struct callback_args *parent_context = context;
-
- pci_bus = parent_context->pci_bus;
- user_function = parent_context->user_function;
-
- status = acpi_get_handle(handle, "_ADR", &dummy_handle);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- device = (adr >> 16) & 0xffff;
- function = adr & 0xffff;
-
- dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
- if (!dev || !dev->subordinate)
- goto out;
-
- child_context.pci_bus = dev->subordinate;
- child_context.user_function = user_function;
- child_context.root_handle = parent_context->root_handle;
-
- dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- user_function, NULL, &child_context, NULL);
- if (ACPI_FAILURE(status))
- goto out;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- walk_p2p_bridge, NULL, &child_context, NULL);
-out:
- pci_dev_put(dev);
- return AE_OK;
-}
-
-/*
- * walk_root_bridge - generic root bridge walker
- * @root: poiner of an acpi_pci_root
- * @user_function: user callback for slot objects
- *
- * Call user_function for all objects underneath this root bridge.
- * Walk p2p bridges underneath us and call user_function on those too.
- */
-static int
-walk_root_bridge(struct acpi_pci_root *root, acpi_walk_callback user_function)
-{
- acpi_status status;
- acpi_handle handle = root->device->handle;
- struct pci_bus *pci_bus = root->bus;
- struct callback_args context;
-
- context.pci_bus = pci_bus;
- context.user_function = user_function;
- context.root_handle = handle;
-
- dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- user_function, NULL, &context, NULL);
- if (ACPI_FAILURE(status))
- return status;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- walk_p2p_bridge, NULL, &context, NULL);
- if (ACPI_FAILURE(status))
- err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
-
- return status;
-}
-
-/*
- * acpi_pci_slot_add
- * @handle: points to an acpi_pci_root
- */
-static int
-acpi_pci_slot_add(struct acpi_pci_root *root)
+void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle)
{
- acpi_status status;
-
- status = walk_root_bridge(root, register_slot);
- if (ACPI_FAILURE(status))
- err("%s: register_slot failure - %d\n", __func__, status);
-
- return status;
+ mutex_lock(&slot_list_lock);
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ register_slot, NULL, bus, NULL);
+ mutex_unlock(&slot_list_lock);
}
-/*
- * acpi_pci_slot_remove
- * @handle: points to an acpi_pci_root
- */
-static void
-acpi_pci_slot_remove(struct acpi_pci_root *root)
+void acpi_pci_slot_remove(struct pci_bus *bus)
{
struct acpi_pci_slot *slot, *tmp;
- struct pci_bus *pbus;
- acpi_handle handle = root->device->handle;
mutex_lock(&slot_list_lock);
list_for_each_entry_safe(slot, tmp, &slot_list, list) {
- if (slot->root_handle == handle) {
+ if (slot->pci_slot->bus == bus) {
list_del(&slot->list);
- pbus = slot->pci_slot->bus;
pci_destroy_slot(slot->pci_slot);
- put_device(&pbus->dev);
+ put_device(&bus->dev);
kfree(slot);
}
}
@@ -332,5 +211,4 @@ static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
void __init acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
- acpi_pci_register_driver(&acpi_pci_slot_driver);
}
diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c
index fc95308e9a11..ee255c60bdac 100644
--- a/trunk/drivers/acpi/processor_idle.c
+++ b/trunk/drivers/acpi/processor_idle.c
@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
+static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
+ acpi_cstate);
static int disabled_by_idle_boot_param(void)
{
@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = acpi_cstate[index];
+ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
- struct acpi_processor_cx *cx = acpi_cstate[index];
+ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
ACPI_FLUSH_CPU_CACHE();
@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = acpi_cstate[index];
+ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = acpi_cstate[index];
+ struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
pr = __this_cpu_read(processors);
@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
- acpi_cstate[count] = cx;
+ per_cpu(acpi_cstate[count], dev->cpu) = cx;
count++;
if (count == CPUIDLE_STATE_MAX)
diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c
index 5e7e991717d7..f54d1985e594 100644
--- a/trunk/drivers/acpi/scan.c
+++ b/trunk/drivers/acpi/scan.c
@@ -1790,7 +1790,6 @@ int __init acpi_scan_init(void)
acpi_platform_init();
acpi_csrt_init();
acpi_container_init();
- acpi_pci_slot_init();
mutex_lock(&acpi_scan_lock);
/*
diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c
index 6a67b07de494..251e57d38942 100644
--- a/trunk/drivers/ata/ahci.c
+++ b/trunk/drivers/ata/ahci.c
@@ -415,17 +415,17 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
- { PCI_DEVICE(0x1b4b, 0x9123),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
- { PCI_DEVICE(0x1b4b, 0x9125),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
- { PCI_DEVICE(0x1b4b, 0x917a),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
- { PCI_DEVICE(0x1b4b, 0x9192),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
- { PCI_DEVICE(0x1b4b, 0x91a3),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
/* Promise */
diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c
index ffdd32d22602..2f48123d74c4 100644
--- a/trunk/drivers/ata/ata_piix.c
+++ b/trunk/drivers/ata/ata_piix.c
@@ -150,6 +150,7 @@ enum piix_controller_ids {
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
+ ich8_2port_sata_snb,
};
struct piix_map_db {
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
- { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
+ [ich8_2port_sata_snb] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
+
+ [ich8_2port_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
+ | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
};
#define AHCI_PCI_BAR 5
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index 497adea1f0d6..63c743baf920 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
* from SATA Settings page of Identify Device Data Log.
*/
if (ata_id_has_devslp(dev->id)) {
- u8 sata_setting[ATA_SECT_SIZE];
+ u8 *sata_setting = ap->sector_buf;
int i, j;
dev->flags |= ATA_DFLAG_DEVSLP;
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+ dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c
index 318b41358187..ff44787e5a45 100644
--- a/trunk/drivers/ata/libata-scsi.c
+++ b/trunk/drivers/ata/libata-scsi.c
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == 0 &&
- sshdr.asc == 0 && sshdr.ascq == 0)
+ if (sshdr.sense_key == RECOVERED_ERROR &&
+ sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == 0 &&
- sshdr.asc == 0 && sshdr.ascq == 0)
+ if (sshdr.sense_key == RECOVERED_ERROR &&
+ sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c
index 5f74587ef258..71671c42ef45 100644
--- a/trunk/drivers/base/power/qos.c
+++ b/trunk/drivers/base/power/qos.c
@@ -46,6 +46,7 @@
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
- mutex_lock(&dev_pm_qos_mtx);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
+ pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
out:
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
/**
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
kfree(req);
}
+static void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_drop_user_request(dev, type);
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
return ret;
}
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->latency_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
- pm_qos_sysfs_remove_latency(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
- }
}
/**
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_latency(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
}
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
-
dev->power.qos->flags_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
- mutex_unlock(&dev_pm_qos_mtx);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
return ret;
}
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
- pm_qos_sysfs_remove_flags(dev);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
- }
}
/**
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_flags(dev);
+
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
diff --git a/trunk/drivers/base/regmap/regcache-rbtree.c b/trunk/drivers/base/regmap/regcache-rbtree.c
index e6732cf7c06e..79f4fca9877a 100644
--- a/trunk/drivers/base/regmap/regcache-rbtree.c
+++ b/trunk/drivers/base/regmap/regcache-rbtree.c
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
base = 0;
if (max < rbnode->base_reg + rbnode->blklen)
- end = rbnode->base_reg + rbnode->blklen - max;
+ end = max - rbnode->base_reg + 1;
else
end = rbnode->blklen;
diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c
index 3d2367501fd0..58cfb3232428 100644
--- a/trunk/drivers/base/regmap/regmap.c
+++ b/trunk/drivers/base/regmap/regmap.c
@@ -710,12 +710,12 @@ struct regmap *regmap_init(struct device *dev,
}
}
+ regmap_debugfs_init(map, config->name);
+
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- regmap_debugfs_init(map, config->name);
-
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
@@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(async->work_buf);
kfree(async);
}
+
+ return ret;
}
trace_regmap_hw_write_start(map->dev, reg,
diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c
index 25ef5c014fca..92b6d7c51e39 100644
--- a/trunk/drivers/block/aoe/aoecmd.c
+++ b/trunk/drivers/block/aoe/aoecmd.c
@@ -51,8 +51,9 @@ new_skb(ulong len)
{
struct sk_buff *skb;
- skb = alloc_skb(len, GFP_ATOMIC);
+ skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
if (skb) {
+ skb_reserve(skb, MAX_HEADER);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index fe5f6403417f..dfe758382eaf 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
ioctl_by_bdev(bdev, BLKRRPART, 0);
+
+ /* Grab the block_device to prevent its destruction after we
+ * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+ */
+ bdgrab(bdev);
return 0;
out_clr:
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- if (bdev)
+ if (bdev) {
+ bdput(bdev);
invalidate_bdev(bdev);
+ }
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
if (bdev) {
@@ -1044,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
-
- /*
- * Remove all partitions, since BLKRRPART won't remove user
- * added partitions when max_part=0
- */
- if (bdev) {
- struct disk_part_iter piter;
- struct hd_struct *part;
-
- mutex_lock_nested(&bdev->bd_mutex, 1);
- invalidate_partition(bdev->bd_disk, 0);
- disk_part_iter_init(&piter, bdev->bd_disk,
- DISK_PITER_INCL_EMPTY);
- while ((part = disk_part_iter_next(&piter)))
- delete_partition(bdev->bd_disk, part->partno);
- disk_part_iter_exit(&piter);
- mutex_unlock(&bdev->bd_mutex);
- }
-
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c
index 92250af84e7d..32c678028e53 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.c
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.c
@@ -81,12 +81,17 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
+struct list_head online_list;
+struct list_head removing_list;
+spinlock_t dev_lock;
+
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
+static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag)
/*
* Reset the HBA (without sleeping)
*
- * Just like hba_reset, except does not call sleep, so can be
- * run from interrupt/tasklet context.
- *
* @dd Pointer to the driver data structure.
*
* return value
* 0 The reset was successful.
* -1 The HBA Reset bit did not clear.
*/
-static int hba_reset_nosleep(struct driver_data *dd)
+static int mtip_hba_reset(struct driver_data *dd)
{
unsigned long timeout;
- /* Chip quirk: quiesce any chip function */
- mdelay(10);
-
/* Set the reset bit */
writel(HOST_RESET, dd->mmio + HOST_CTL);
/* Flush */
readl(dd->mmio + HOST_CTL);
- /*
- * Wait 10ms then spin for up to 1 second
- * waiting for reset acknowledgement
- */
- timeout = jiffies + msecs_to_jiffies(1000);
- mdelay(10);
- while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
- && time_before(jiffies, timeout))
- mdelay(1);
+ /* Spin for up to 2 seconds, waiting for reset acknowledgement */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ mdelay(10);
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return -1;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
- return -1;
+ } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout));
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port)
dev_warn(&port->dd->pdev->dev,
"PxCMD.CR not clear, escalating reset\n");
- if (hba_reset_nosleep(port->dd))
+ if (mtip_hba_reset(port->dd))
dev_err(&port->dd->pdev->dev,
"HBA reset escalation failed.\n");
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port)
}
+static int mtip_device_reset(struct driver_data *dd)
+{
+ int rv = 0;
+
+ if (mtip_check_surprise_removal(dd->pdev))
+ return 0;
+
+ if (mtip_hba_reset(dd) < 0)
+ rv = -EFAULT;
+
+ mdelay(1);
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Enable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ return rv;
+}
+
/*
* Helper function for tag logging
*/
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data)
if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- mtip_restart_port(port);
+ mtip_device_reset(port->dd);
wake_up_interruptible(&port->svc_wait);
}
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
int rv = 0, ready2go = 1;
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
unsigned long to;
+ struct driver_data *dd = port->dd;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
- dev_err(&port->dd->pdev->dev,
- "SG buffer is not 8 byte aligned\n");
+ dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
return -EFAULT;
}
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port,
mdelay(100);
} while (time_before(jiffies, to));
if (!ready2go) {
- dev_warn(&port->dd->pdev->dev,
+ dev_warn(&dd->pdev->dev,
"Internal cmd active. new cmd [%02X]\n", fis->command);
return -EBUSY;
}
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
port->ic_pause_timer = 0;
- if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
- clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
- else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
- clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (atomic == GFP_KERNEL) {
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port, 5000) < 0) {
- dev_warn(&port->dd->pdev->dev,
+ dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
release_slot(port, MTIP_TAG_INTERNAL);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
- /* Poll if atomic, wait_for_completion otherwise */
if (atomic == GFP_KERNEL) {
/* Wait for the command to complete or timeout. */
- if (wait_for_completion_timeout(
+ if (wait_for_completion_interruptible_timeout(
&wait,
- msecs_to_jiffies(timeout)) == 0) {
- dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [%d] "
- "within timeout of %lu ms\n",
- atomic, timeout);
- if (mtip_check_surprise_removal(port->dd->pdev) ||
+ msecs_to_jiffies(timeout)) <= 0) {
+ if (rv == -ERESTARTSYS) { /* interrupted */
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] was interrupted after %lu ms\n",
+ fis->command, timeout);
+ rv = -EINTR;
+ goto exec_ic_exit;
+ } else if (rv == 0) /* timeout */
+ dev_err(&dd->pdev->dev,
+ "Internal command did not complete [%02X] within timeout of %lu ms\n",
+ fis->command, timeout);
+ else
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
+ fis->command, rv, timeout);
+
+ if (mtip_check_surprise_removal(dd->pdev) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
+ &dd->dd_flag)) {
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned due to SR\n",
+ fis->command);
rv = -ENXIO;
goto exec_ic_exit;
}
+ mtip_device_reset(dd); /* recover from timeout issue */
rv = -EAGAIN;
+ goto exec_ic_exit;
}
} else {
+ u32 hba_stat, port_stat;
+
/* Spin for checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))
&& time_before(jiffies, timeout)) {
- if (mtip_check_surprise_removal(port->dd->pdev)) {
+ if (mtip_check_surprise_removal(dd->pdev)) {
rv = -ENXIO;
goto exec_ic_exit;
}
if ((fis->command != ATA_CMD_STANDBYNOW1) &&
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
+ &dd->dd_flag)) {
rv = -ENXIO;
goto exec_ic_exit;
}
- if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
- atomic_inc(&int_cmd->active); /* error */
- break;
+ port_stat = readl(port->mmio + PORT_IRQ_STAT);
+ if (!port_stat)
+ continue;
+
+ if (port_stat & PORT_IRQ_ERR) {
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] failed\n",
+ fis->command);
+ mtip_device_reset(dd);
+ rv = -EIO;
+ goto exec_ic_exit;
+ } else {
+ writel(port_stat, port->mmio + PORT_IRQ_STAT);
+ hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
+ if (hba_stat)
+ writel(hba_stat,
+ dd->mmio + HOST_IRQ_STAT);
}
+ break;
}
}
- if (atomic_read(&int_cmd->active) > 1) {
- dev_err(&port->dd->pdev->dev,
- "Internal command [%02X] failed\n", fis->command);
- rv = -EIO;
- }
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
rv = -ENXIO;
- if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- mtip_restart_port(port);
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
+ mtip_device_reset(dd);
rv = -EAGAIN;
}
}
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
* -EINVAL Invalid parameters passed in, trim not supported
* -EIO Error submitting trim request to hw
*/
-static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
+ unsigned int len)
{
int i, rv = 0;
u64 tlba, tlen, sect_left;
@@ -1810,45 +1851,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
return (bool) !!port->identify_valid;
}
-/*
- * Reset the HBA.
- *
- * Resets the HBA by setting the HBA Reset bit in the Global
- * HBA Control register. After setting the HBA Reset bit the
- * function waits for 1 second before reading the HBA Reset
- * bit to make sure it has cleared. If HBA Reset is not clear
- * an error is returned. Cannot be used in non-blockable
- * context.
- *
- * @dd Pointer to the driver data structure.
- *
- * return value
- * 0 The reset was successful.
- * -1 The HBA Reset bit did not clear.
- */
-static int mtip_hba_reset(struct driver_data *dd)
-{
- mtip_deinit_port(dd->port);
-
- /* Set the reset bit */
- writel(HOST_RESET, dd->mmio + HOST_CTL);
-
- /* Flush */
- readl(dd->mmio + HOST_CTL);
-
- /* Wait for reset to clear */
- ssleep(1);
-
- /* Check the bit has cleared */
- if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
- dev_err(&dd->pdev->dev,
- "Reset bit did not clear.\n");
- return -1;
- }
-
- return 0;
-}
-
/*
* Display the identify command data.
*
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev,
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+/* debugsfs entries */
+
+static ssize_t show_device_status(struct device_driver *drv, char *buf)
+{
+ int size = 0;
+ struct driver_data *dd, *tmp;
+ unsigned long flags;
+ char id_buf[42];
+ u16 status = 0;
+
+ spin_lock_irqsave(&dev_lock, flags);
+ size += sprintf(&buf[size], "Devices Present:\n");
+ list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
+ if (dd->pdev) {
+ if (dd->port &&
+ dd->port->identify &&
+ dd->port->identify_valid) {
+ strlcpy(id_buf,
+ (char *) (dd->port->identify + 10), 21);
+ status = *(dd->port->identify + 141);
+ } else {
+ memset(id_buf, 0, 42);
+ status = 0;
+ }
+
+ if (dd->port &&
+ test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
+ size += sprintf(&buf[size],
+ " device %s %s (ftl rebuild %d %%)\n",
+ dev_name(&dd->pdev->dev),
+ id_buf,
+ status);
+ } else {
+ size += sprintf(&buf[size],
+ " device %s %s\n",
+ dev_name(&dd->pdev->dev),
+ id_buf);
+ }
+ }
+ }
+
+ size += sprintf(&buf[size], "Devices Being Removed:\n");
+ list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
+ if (dd->pdev) {
+ if (dd->port &&
+ dd->port->identify &&
+ dd->port->identify_valid) {
+ strlcpy(id_buf,
+ (char *) (dd->port->identify+10), 21);
+ status = *(dd->port->identify + 141);
+ } else {
+ memset(id_buf, 0, 42);
+ status = 0;
+ }
+
+ if (dd->port &&
+ test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
+ size += sprintf(&buf[size],
+ " device %s %s (ftl rebuild %d %%)\n",
+ dev_name(&dd->pdev->dev),
+ id_buf,
+ status);
+ } else {
+ size += sprintf(&buf[size],
+ " device %s %s\n",
+ dev_name(&dd->pdev->dev),
+ id_buf);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev_lock, flags);
+
+ return size;
+}
+
+static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
+{
+ int size = *offset;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+
+ if (!len || *offset)
+ return 0;
+
+ size += show_device_status(NULL, buf);
+
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
+}
+
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return *offset;
}
+static const struct file_operations mtip_device_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_device_status,
+ .llseek = no_llseek,
+};
+
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
+ unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
+ INIT_LIST_HEAD(&dd->online_list);
+ INIT_LIST_HEAD(&dd->remove_list);
+
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev,
instance++;
if (rv != MTIP_FTL_REBUILD_MAGIC)
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
+ else
+ rv = 0; /* device in rebuild state, return 0 from probe */
+
+ /* Add to online list even if in ftl rebuild */
+ spin_lock_irqsave(&dev_lock, flags);
+ list_add(&dd->online_list, &online_list);
+ spin_unlock_irqrestore(&dev_lock, flags);
+
goto done;
block_initialize_err:
@@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
int counter = 0;
+ unsigned long flags;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ spin_lock_irqsave(&dev_lock, flags);
+ list_del_init(&dd->online_list);
+ list_add(&dd->remove_list, &removing_list);
+ spin_unlock_irqrestore(&dev_lock, flags);
+
if (mtip_check_surprise_removal(pdev)) {
while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
counter++;
@@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
+ spin_lock_irqsave(&dev_lock, flags);
+ list_del_init(&dd->remove_list);
+ spin_unlock_irqrestore(&dev_lock, flags);
+
kfree(dd);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
}
@@ -4513,6 +4638,11 @@ static int __init mtip_init(void)
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
+ spin_lock_init(&dev_lock);
+
+ INIT_LIST_HEAD(&online_list);
+ INIT_LIST_HEAD(&removing_list);
+
/* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) {
@@ -4522,11 +4652,18 @@ static int __init mtip_init(void)
}
mtip_major = error;
- if (!dfs_parent) {
- dfs_parent = debugfs_create_dir("rssd", NULL);
- if (IS_ERR_OR_NULL(dfs_parent)) {
- pr_warn("Error creating debugfs parent\n");
- dfs_parent = NULL;
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ pr_warn("Error creating debugfs parent\n");
+ dfs_parent = NULL;
+ }
+ if (dfs_parent) {
+ dfs_device_status = debugfs_create_file("device_status",
+ S_IRUGO, dfs_parent, NULL,
+ &mtip_device_status_fops);
+ if (IS_ERR_OR_NULL(dfs_device_status)) {
+ pr_err("Error creating device_status node\n");
+ dfs_device_status = NULL;
}
}
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h
index 3bffff5f670c..8e8334c9dd0f 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.h
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.h
@@ -129,9 +129,9 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
- MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
- (1 << MTIP_PF_EH_ACTIVE_BIT) | \
- (1 << MTIP_PF_SE_ACTIVE_BIT) | \
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
+ (1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_SE_ACTIVE_BIT) |
(1 << MTIP_PF_DM_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
@@ -144,9 +144,9 @@ enum {
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
- MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
- (1 << MTIP_DDF_SEC_LOCK_BIT) | \
- (1 << MTIP_DDF_OVER_TEMP_BIT) | \
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ (1 << MTIP_DDF_SEC_LOCK_BIT) |
+ (1 << MTIP_DDF_OVER_TEMP_BIT) |
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
MTIP_DDF_CLEANUP_BIT = 5,
@@ -180,7 +180,7 @@ struct mtip_work {
#define MTIP_TRIM_TIMEOUT_MS 240000
#define MTIP_MAX_TRIM_ENTRIES 8
-#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
+#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
struct mtip_trim_entry {
u32 lba; /* starting lba of region */
@@ -501,6 +501,10 @@ struct driver_data {
atomic_t irq_workers_active;
int isr_binding;
+
+ struct list_head online_list; /* linkage for online list */
+
+ struct list_head remove_list; /* linkage for removing list */
};
#endif
diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c
index f556f8a8b3f9..b7b7a88d9f68 100644
--- a/trunk/drivers/block/rbd.c
+++ b/trunk/drivers/block/rbd.c
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
+ struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request(img_request, obj_request) {
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
int ret;
obj_request->callback = rbd_img_obj_callback;
diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c
index e3f9a99b8522..d784650d14f0 100644
--- a/trunk/drivers/char/hpet.c
+++ b/trunk/drivers/char/hpet.c
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
struct hpet_dev *devp;
unsigned long addr;
- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
- return -EINVAL;
-
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
if (addr & (PAGE_SIZE - 1))
return -ENOSYS;
- vma->vm_flags |= VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot)) {
- printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
- __func__);
- return -EAGAIN;
- }
-
- return 0;
+ return vm_iomap_memory(vma, addr, PAGE_SIZE);
#else
return -ENOSYS;
#endif
diff --git a/trunk/drivers/char/hw_random/core.c b/trunk/drivers/char/hw_random/core.c
index 69ae5972713c..a0f7724852eb 100644
--- a/trunk/drivers/char/hw_random/core.c
+++ b/trunk/drivers/char/hw_random/core.c
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
+static void __exit hwrng_exit(void)
+{
+ mutex_lock(&rng_mutex);
+ BUG_ON(current_rng);
+ kfree(rng_buffer);
+ mutex_unlock(&rng_mutex);
+}
+
+module_exit(hwrng_exit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/char/virtio_console.c b/trunk/drivers/char/virtio_console.c
index e905d5f53051..ce5f3fc25d6d 100644
--- a/trunk/drivers/char/virtio_console.c
+++ b/trunk/drivers/char/virtio_console.c
@@ -149,7 +149,8 @@ struct ports_device {
spinlock_t ports_lock;
/* To protect the vq operations for the control channel */
- spinlock_t cvq_lock;
+ spinlock_t c_ivq_lock;
+ spinlock_t c_ovq_lock;
/* The current config space is stored here */
struct virtio_console_config config;
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
+
+ spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
+ spin_unlock(&portdev->c_ovq_lock);
return 0;
}
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* rproc_serial does not want the console port, only
* the generic port implementation.
*/
- port->host_connected = port->guest_connected = true;
+ port->host_connected = true;
else if (!use_multiport(port->portdev)) {
/*
* If we're not using multiport support,
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work)
portdev = container_of(work, struct ports_device, control_work);
vq = portdev->c_ivq;
- spin_lock(&portdev->cvq_lock);
+ spin_lock(&portdev->c_ivq_lock);
while ((buf = virtqueue_get_buf(vq, &len))) {
- spin_unlock(&portdev->cvq_lock);
+ spin_unlock(&portdev->c_ivq_lock);
buf->len = len;
buf->offset = 0;
handle_control_message(portdev, buf);
- spin_lock(&portdev->cvq_lock);
+ spin_lock(&portdev->c_ivq_lock);
if (add_inbuf(portdev->c_ivq, buf) < 0) {
dev_warn(&portdev->vdev->dev,
"Error adding buffer to queue\n");
free_buf(buf, false);
}
}
- spin_unlock(&portdev->cvq_lock);
+ spin_unlock(&portdev->c_ivq_lock);
}
static void out_intr(struct virtqueue *vq)
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq)
port->inbuf = get_inbuf(port);
/*
- * Don't queue up data when port is closed. This condition
+ * Normally the port should not accept data when the port is
+ * closed. For generic serial ports, the host won't (shouldn't)
+ * send data till the guest is connected. But this condition
* can be reached when a console port is not yet connected (no
- * tty is spawned) and the host sends out data to console
- * ports. For generic serial ports, the host won't
- * (shouldn't) send data till the guest is connected.
+ * tty is spawned) and the other side sends out data over the
+ * vring, or when a remote devices start sending data before
+ * the ports are opened.
+ *
+ * A generic serial port will discard data if not connected,
+ * while console ports and rproc-serial ports accepts data at
+ * any time. rproc-serial is initiated with guest_connected to
+ * false because port_fops_open expects this. Console ports are
+ * hooked up with an HVC console and is initialized with
+ * guest_connected to true.
*/
- if (!port->guest_connected)
+
+ if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
discard_port_data(port);
spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev)
if (multiport) {
unsigned int nr_added_bufs;
- spin_lock_init(&portdev->cvq_lock);
+ spin_lock_init(&portdev->c_ivq_lock);
+ spin_lock_init(&portdev->c_ovq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+ nr_added_bufs = fill_queue(portdev->c_ivq,
+ &portdev->c_ivq_lock);
if (!nr_added_bufs) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev)
return ret;
if (use_multiport(portdev))
- fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+ fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
list_for_each_entry(port, &portdev->ports, list) {
port->in_vq = portdev->in_vqs[port->id];
diff --git a/trunk/drivers/clk/tegra/clk-tegra20.c b/trunk/drivers/clk/tegra/clk-tegra20.c
index 1e2de7305362..f873dcefe0de 100644
--- a/trunk/drivers/clk/tegra/clk-tegra20.c
+++ b/trunk/drivers/clk/tegra/clk-tegra20.c
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void)
clks[pll_a_out0] = clk;
/* PLLE */
- clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
+ clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
0, 100000000, &pll_e_params,
0, pll_e_freq_table, NULL);
clk_register_clkdev(clk, "pll_e", NULL);
diff --git a/trunk/drivers/cpufreq/cpufreq-cpu0.c b/trunk/drivers/cpufreq/cpufreq-cpu0.c
index 4e5b7fb8927c..37d23a0f8c56 100644
--- a/trunk/drivers/cpufreq/cpufreq-cpu0.c
+++ b/trunk/drivers/cpufreq/cpufreq-cpu0.c
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
- struct device_node *np;
+ struct device_node *np, *parent;
int ret;
- for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
+ parent = of_find_node_by_path("/cpus");
+ if (!parent) {
+ pr_err("failed to find OF /cpus\n");
+ return -ENOENT;
+ }
+
+ for_each_child_of_node(parent, np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
diff --git a/trunk/drivers/cpufreq/cpufreq_governor.h b/trunk/drivers/cpufreq/cpufreq_governor.h
index 46bde01eee62..cc4bd2f6838a 100644
--- a/trunk/drivers/cpufreq/cpufreq_governor.h
+++ b/trunk/drivers/cpufreq/cpufreq_governor.h
@@ -14,8 +14,8 @@
* published by the Free Software Foundation.
*/
-#ifndef _CPUFREQ_GOVERNER_H
-#define _CPUFREQ_GOVERNER_H
+#ifndef _CPUFREQ_GOVERNOR_H
+#define _CPUFREQ_GOVERNOR_H
#include
#include
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
-#endif /* _CPUFREQ_GOVERNER_H */
+#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c
index ad72922919ed..6133ef5cf671 100644
--- a/trunk/drivers/cpufreq/intel_pstate.c
+++ b/trunk/drivers/cpufreq/intel_pstate.c
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
sample_time = cpu->pstate_policy->sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
- delay -= jiffies % delay;
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
diff --git a/trunk/drivers/crypto/ux500/cryp/cryp_core.c b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
index 8bc5fef07e7a..22c9063e0120 100644
--- a/trunk/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
.shutdown = ux500_cryp_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "cryp1"
+ .name = "cryp1",
.pm = &ux500_cryp_pm,
}
};
diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig
index 80b69971cf28..aeaea32bcfda 100644
--- a/trunk/drivers/dma/Kconfig
+++ b/trunk/drivers/dma/Kconfig
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
+ depends on GENERIC_HARDIRQS
select DMA_ENGINE
default y if CPU_AT32AP7000
help
diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c
index 6e13f262139a..88cfc61329d2 100644
--- a/trunk/drivers/dma/at_hdmac.c
+++ b/trunk/drivers/dma/at_hdmac.c
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
- BUG_ON(atc_chan_is_enabled(atchan));
-
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)
{
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
+ if (atc_chan_is_enabled(atchan))
+ return;
+
if (list_empty(&atchan->active_list) ||
list_is_singular(&atchan->active_list)) {
atc_complete_all(atchan);
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)
return;
spin_lock_irqsave(&atchan->lock, flags);
- if (!atc_chan_is_enabled(atchan)) {
- atc_advance_work(atchan);
- }
+ atc_advance_work(atchan);
spin_unlock_irqrestore(&atchan->lock, flags);
}
diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c
index c4b4fd2acc42..08b43bf37158 100644
--- a/trunk/drivers/dma/omap-dma.c
+++ b/trunk/drivers/dma/omap-dma.c
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
if (vchan_issue_pending(&c->vc) && !c->desc) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
+ /*
+ * c->cyclic is used only by audio and in this case the DMA need
+ * to be started without delay.
+ */
+ if (!c->cyclic) {
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
+ } else {
+ omap_dma_start_desc(c);
+ }
}
spin_unlock_irqrestore(&c->vc.lock, flags);
}
diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c
index 718153122759..5dbc5946c4c3 100644
--- a/trunk/drivers/dma/pl330.c
+++ b/trunk/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
- struct dma_pl330_chan *pch;
+ struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err2;
+ goto probe_err3;
+ }
+
+ if (adev->dev.of_node) {
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ }
}
dev_info(&adev->dev,
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
- ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
- if (ret) {
- dev_err(&adev->dev,
- "unable to register DMA to the generic DT DMA helpers\n");
- goto probe_err2;
- }
-
return 0;
+probe_err3:
+ amba_set_drvdata(adev, NULL);
+ /* Idle the DMAC */
+ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ chan.device_node) {
+
+ /* Remove the channel */
+ list_del(&pch->chan.device_node);
+
+ /* Flush the channel */
+ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
+ pl330_free_chan_resources(&pch->chan);
+ }
probe_err2:
pl330_del(pi);
probe_err1:
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev)
if (!pdmac)
return 0;
- of_dma_controller_free(adev->dev.of_node);
+ if (adev->dev.of_node)
+ of_dma_controller_free(adev->dev.of_node);
+ dma_async_device_unregister(&pdmac->ddma);
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
diff --git a/trunk/drivers/eisa/eisa-bus.c b/trunk/drivers/eisa/eisa-bus.c
index 806c77bfd434..272a3ec35957 100644
--- a/trunk/drivers/eisa/eisa-bus.c
+++ b/trunk/drivers/eisa/eisa-bus.c
@@ -275,19 +275,18 @@ static int __init eisa_request_resources(struct eisa_root_device *root,
}
if (slot) {
- edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ (i * 0x400);
edev->res[i].end = edev->res[i].start + 0xff;
edev->res[i].flags = IORESOURCE_IO;
} else {
- edev->res[i].name = NULL;
edev->res[i].start = SLOT_ADDRESS(root, slot)
+ EISA_VENDOR_ID_OFFSET;
edev->res[i].end = edev->res[i].start + 3;
- edev->res[i].flags = IORESOURCE_BUSY;
+ edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY;
}
+ dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]);
if (request_resource(root->res, &edev->res[i]))
goto failed;
}
@@ -314,41 +313,40 @@ static int __init eisa_probe(struct eisa_root_device *root)
{
int i, c;
struct eisa_device *edev;
+ char *enabled_str;
- printk(KERN_INFO "EISA: Probing bus %d at %s\n",
- root->bus_nr, dev_name(root->dev));
+ dev_info(root->dev, "Probing EISA bus %d\n", root->bus_nr);
/* First try to get hold of slot 0. If there is no device
* here, simply fail, unless root->force_probe is set. */
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
- printk(KERN_ERR "EISA: Couldn't allocate mainboard slot\n");
+ dev_err(root->dev, "EISA: Couldn't allocate mainboard slot\n");
return -ENOMEM;
}
- if (eisa_request_resources(root, edev, 0)) {
- printk(KERN_WARNING \
- "EISA: Cannot allocate resource for mainboard\n");
+ if (eisa_init_device(root, edev, 0)) {
kfree(edev);
if (!root->force_probe)
- return -EBUSY;
+ return -ENODEV;
goto force_probe;
}
- if (eisa_init_device(root, edev, 0)) {
- eisa_release_resources(edev);
+ if (eisa_request_resources(root, edev, 0)) {
+ dev_warn(root->dev,
+ "EISA: Cannot allocate resource for mainboard\n");
kfree(edev);
if (!root->force_probe)
- return -ENODEV;
+ return -EBUSY;
goto force_probe;
}
- printk(KERN_INFO "EISA: Mainboard %s detected.\n", edev->id.sig);
+ dev_info(&edev->dev, "EISA: Mainboard %s detected\n", edev->id.sig);
if (eisa_register_device(edev)) {
- printk(KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
+ dev_err(&edev->dev, "EISA: Failed to register %s\n",
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
@@ -358,55 +356,47 @@ static int __init eisa_probe(struct eisa_root_device *root)
for (c = 0, i = 1; i <= root->slots; i++) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev) {
- printk(KERN_ERR "EISA: Out of memory for slot %d\n", i);
+ dev_err(root->dev, "EISA: Out of memory for slot %d\n",
+ i);
continue;
}
- if (eisa_request_resources(root, edev, i)) {
- printk(KERN_WARNING \
- "Cannot allocate resource for EISA slot %d\n",
- i);
+ if (eisa_init_device(root, edev, i)) {
kfree(edev);
continue;
}
- if (eisa_init_device(root, edev, i)) {
- eisa_release_resources(edev);
+ if (eisa_request_resources(root, edev, i)) {
+ dev_warn(root->dev,
+ "Cannot allocate resource for EISA slot %d\n",
+ i);
kfree(edev);
continue;
}
-
- printk(KERN_INFO "EISA: slot %d : %s detected",
- i, edev->id.sig);
-
- switch (edev->state) {
- case EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED:
- printk(" (forced enabled)");
- break;
-
- case EISA_CONFIG_FORCED:
- printk(" (forced disabled)");
- break;
-
- case 0:
- printk(" (disabled)");
- break;
- }
-
- printk (".\n");
+
+ if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED))
+ enabled_str = " (forced enabled)";
+ else if (edev->state == EISA_CONFIG_FORCED)
+ enabled_str = " (forced disabled)";
+ else if (edev->state == 0)
+ enabled_str = " (disabled)";
+ else
+ enabled_str = "";
+
+ dev_info(&edev->dev, "EISA: slot %d: %s detected%s\n", i,
+ edev->id.sig, enabled_str);
c++;
if (eisa_register_device(edev)) {
- printk(KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
+ dev_err(&edev->dev, "EISA: Failed to register %s\n",
+ edev->id.sig);
eisa_release_resources(edev);
kfree(edev);
}
}
- printk(KERN_INFO "EISA: Detected %d card%s.\n", c, c == 1 ? "" : "s");
-
+ dev_info(root->dev, "EISA: Detected %d card%s\n", c, c == 1 ? "" : "s");
return 0;
}
diff --git a/trunk/drivers/eisa/pci_eisa.c b/trunk/drivers/eisa/pci_eisa.c
index cdae207028a7..a333bf3517de 100644
--- a/trunk/drivers/eisa/pci_eisa.c
+++ b/trunk/drivers/eisa/pci_eisa.c
@@ -19,48 +19,72 @@
/* There is only *one* pci_eisa device per machine, right ? */
static struct eisa_root_device pci_eisa_root;
-static int __init pci_eisa_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init pci_eisa_init(struct pci_dev *pdev)
{
- int rc;
+ int rc, i;
+ struct resource *res, *bus_res = NULL;
if ((rc = pci_enable_device (pdev))) {
- printk (KERN_ERR "pci_eisa : Could not enable device %s\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "Could not enable device\n");
return rc;
}
+ /*
+ * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI
+ * device, so the resources available on EISA are the same as those
+ * available on the 82375 bus. This works the same as a PCI-PCI
+ * bridge in subtractive-decode mode (see pci_read_bridge_bases()).
+ * We assume other PCI-EISA bridges are similar.
+ *
+ * eisa_root_register() can only deal with a single io port resource,
+ * so we use the first valid io port resource.
+ */
+ pci_bus_for_each_resource(pdev->bus, res, i)
+ if (res && (res->flags & IORESOURCE_IO)) {
+ bus_res = res;
+ break;
+ }
+
+ if (!bus_res) {
+ dev_err(&pdev->dev, "No resources available\n");
+ return -1;
+ }
+
pci_eisa_root.dev = &pdev->dev;
- pci_eisa_root.res = pdev->bus->resource[0];
- pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start;
+ pci_eisa_root.res = bus_res;
+ pci_eisa_root.bus_base_addr = bus_res->start;
pci_eisa_root.slots = EISA_MAX_SLOTS;
pci_eisa_root.dma_mask = pdev->dma_mask;
dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
if (eisa_root_register (&pci_eisa_root)) {
- printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
+ dev_err(&pdev->dev, "Could not register EISA root\n");
return -1;
}
return 0;
}
-static struct pci_device_id pci_eisa_pci_tbl[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
- { 0, }
-};
+/*
+ * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
+ * Otherwise pnp resource will get enabled early and could prevent eisa
+ * to be initialized.
+ * Also need to make sure pci_eisa_init_early() is called after
+ * x86/pci_subsys_init().
+ * So need to use subsys_initcall_sync with it.
+ */
+static int __init pci_eisa_init_early(void)
+{
+ struct pci_dev *dev = NULL;
+ int ret;
-static struct pci_driver __refdata pci_eisa_driver = {
- .name = "pci_eisa",
- .id_table = pci_eisa_pci_tbl,
- .probe = pci_eisa_init,
-};
+ for_each_pci_dev(dev)
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
+ ret = pci_eisa_init(dev);
+ if (ret)
+ return ret;
+ }
-static int __init pci_eisa_init_module (void)
-{
- return pci_register_driver (&pci_eisa_driver);
+ return 0;
}
-
-device_initcall(pci_eisa_init_module);
-MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
+subsys_initcall_sync(pci_eisa_init_early);
diff --git a/trunk/drivers/firmware/Kconfig b/trunk/drivers/firmware/Kconfig
index 42c759a4d047..3e532002e4d1 100644
--- a/trunk/drivers/firmware/Kconfig
+++ b/trunk/drivers/firmware/Kconfig
@@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP
config EFI_VARS
tristate "EFI Variable Support via sysfs"
depends on EFI
+ select UCS2_STRING
default n
help
If you say Y here, you are able to get EFI (Extensible Firmware
diff --git a/trunk/drivers/firmware/efivars.c b/trunk/drivers/firmware/efivars.c
index 7acafb80fd4c..f4baa11d3644 100644
--- a/trunk/drivers/firmware/efivars.c
+++ b/trunk/drivers/firmware/efivars.c
@@ -80,6 +80,7 @@
#include
#include
#include
+#include
#include
#include
@@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
static bool efivar_wq_enabled = true;
-/* Return the number of unicode characters in data */
-static unsigned long
-utf16_strnlen(efi_char16_t *s, size_t maxlength)
-{
- unsigned long length = 0;
-
- while (*s++ != 0 && length < maxlength)
- length++;
- return length;
-}
-
-static inline unsigned long
-utf16_strlen(efi_char16_t *s)
-{
- return utf16_strnlen(s, ~0UL);
-}
-
-/*
- * Return the number of bytes is the length of this string
- * Note: this is NOT the same as the number of unicode characters
- */
-static inline unsigned long
-utf16_strsize(efi_char16_t *data, unsigned long maxlength)
-{
- return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
-}
-
-static inline int
-utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
-{
- while (1) {
- if (len == 0)
- return 0;
- if (*a < *b)
- return -1;
- if (*a > *b)
- return 1;
- if (*a == 0) /* implies *b == 0 */
- return 0;
- a++;
- b++;
- len--;
- }
-}
-
static bool
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
@@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
u16 filepathlength;
int i, desclength = 0, namelen;
- namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+ namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
/* Either "Boot" or "Driver" followed by four digits of hex */
for (i = match; i < match+4; i++) {
@@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
* There's no stored length for the description, so it has to be
* found by hand
*/
- desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+ desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
/* Each boot entry must have a descriptor */
if (!desclength)
@@ -436,24 +392,12 @@ static efi_status_t
check_var_size_locked(struct efivars *efivars, u32 attributes,
unsigned long size)
{
- u64 storage_size, remaining_size, max_size;
- efi_status_t status;
const struct efivar_operations *fops = efivars->ops;
- if (!efivars->ops->query_variable_info)
+ if (!efivars->ops->query_variable_store)
return EFI_UNSUPPORTED;
- status = fops->query_variable_info(attributes, &storage_size,
- &remaining_size, &max_size);
-
- if (status != EFI_SUCCESS)
- return status;
-
- if (!storage_size || size > remaining_size || size > max_size ||
- (remaining_size - size) < (storage_size / 2))
- return EFI_OUT_OF_RESOURCES;
-
- return status;
+ return fops->query_variable_store(attributes, size);
}
@@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
spin_lock_irq(&efivars->lock);
status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+ new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
status = efivars->ops->set_variable(new_var->VariableName,
@@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,
* QueryVariableInfo() isn't supported by the firmware.
*/
- varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
+ varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
status = check_var_size(efivars, attributes, varsize);
if (status != EFI_SUCCESS) {
@@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
inode = NULL;
- len = utf16_strlen(entry->var.VariableName);
+ len = ucs2_strlen(entry->var.VariableName);
/* name, plus '-', plus GUID, plus NUL*/
name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
@@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
if (efi_guidcmp(entry->var.VendorGuid, vendor))
continue;
- if (utf16_strncmp(entry->var.VariableName, efi_name,
- utf16_strlen(efi_name))) {
+ if (ucs2_strncmp(entry->var.VariableName, efi_name,
+ ucs2_strlen(efi_name))) {
/*
* Check if an old format,
* which doesn't support holding
@@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name_old[i] = name_old[i];
- if (utf16_strncmp(entry->var.VariableName, efi_name_old,
- utf16_strlen(efi_name_old)))
+ if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
+ ucs2_strlen(efi_name_old)))
continue;
}
@@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf16_strsize(new_var->VariableName, 1024);
+ strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = ucs2_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
@@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+ new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
if (status && status != EFI_UNSUPPORTED) {
spin_unlock_irq(&efivars->lock);
@@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
- utf16_strsize(new_var->VariableName,
+ ucs2_strsize(new_var->VariableName,
1024),
new_var->VariableName,
&new_var->VendorGuid);
@@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf16_strsize(del_var->VariableName, 1024);
+ strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = ucs2_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
@@ -1684,16 +1628,17 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
return count;
}
-static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
+static bool variable_is_present(struct efivars *efivars,
+ efi_char16_t *variable_name,
+ efi_guid_t *vendor)
{
struct efivar_entry *entry, *n;
- struct efivars *efivars = &__efivars;
unsigned long strsize1, strsize2;
bool found = false;
- strsize1 = utf16_strsize(variable_name, 1024);
+ strsize1 = ucs2_strsize(variable_name, 1024);
list_for_each_entry_safe(entry, n, &efivars->list, list) {
- strsize2 = utf16_strsize(entry->var.VariableName, 1024);
+ strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(variable_name, &(entry->var.VariableName),
strsize2) &&
@@ -1759,8 +1704,8 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
if (status != EFI_SUCCESS) {
break;
} else {
- if (!variable_is_present(variable_name,
- &vendor)) {
+ if (!variable_is_present(efivars,
+ variable_name, &vendor)) {
found = true;
break;
}
@@ -2064,7 +2009,8 @@ int register_efivars(struct efivars *efivars,
* we'll ever see a different variable name,
* and may end up looping here forever.
*/
- if (variable_is_present(variable_name, &vendor_guid)) {
+ if (variable_is_present(efivars, variable_name,
+ &vendor_guid)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
status = EFI_NOT_FOUND;
@@ -2131,7 +2077,7 @@ efivars_init(void)
ops.get_variable = efi.get_variable;
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
- ops.query_variable_info = efi.query_variable_info;
+ ops.query_variable_store = efi_query_variable_store;
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
diff --git a/trunk/drivers/gpio/gpio-ich.c b/trunk/drivers/gpio/gpio-ich.c
index f9dbd503fc40..de3c317bd3e2 100644
--- a/trunk/drivers/gpio/gpio-ich.c
+++ b/trunk/drivers/gpio/gpio-ich.c
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
* If it can't be trusted, assume that the pin can be used as a GPIO.
*/
if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
- return 1;
+ return 0;
return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
}
diff --git a/trunk/drivers/gpio/gpio-pca953x.c b/trunk/drivers/gpio/gpio-pca953x.c
index 24059462c87f..9391cf16e990 100644
--- a/trunk/drivers/gpio/gpio-pca953x.c
+++ b/trunk/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
chip->gpio_chip.ngpio,
irq_base,
&pca953x_irq_simple_ops,
- NULL);
+ chip);
if (!chip->domain)
return -ENODEV;
diff --git a/trunk/drivers/gpio/gpio-pxa.c b/trunk/drivers/gpio/gpio-pxa.c
index 9cc108d2b770..8325f580c0f1 100644
--- a/trunk/drivers/gpio/gpio-pxa.c
+++ b/trunk/drivers/gpio/gpio-pxa.c
@@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = {
.of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
-module_platform_driver(pxa_gpio_driver);
+
+static int __init pxa_gpio_init(void)
+{
+ return platform_driver_register(&pxa_gpio_driver);
+}
+postcore_initcall(pxa_gpio_init);
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
diff --git a/trunk/drivers/gpio/gpio-stmpe.c b/trunk/drivers/gpio/gpio-stmpe.c
index 770476a9da87..3ce5bc38ac31 100644
--- a/trunk/drivers/gpio/gpio-stmpe.c
+++ b/trunk/drivers/gpio/gpio-stmpe.c
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
+static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
+ struct device_node *np)
{
- int base = stmpe_gpio->irq_base;
+ int base = 0;
- stmpe_gpio->domain = irq_domain_add_simple(NULL,
+ if (!np)
+ base = stmpe_gpio->irq_base;
+
+ stmpe_gpio->domain = irq_domain_add_simple(np,
stmpe_gpio->chip.ngpio, base,
&stmpe_gpio_irq_simple_ops, stmpe_gpio);
if (!stmpe_gpio->domain) {
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
stmpe_gpio->chip.dev = &pdev->dev;
+#ifdef CONFIG_OF
+ stmpe_gpio->chip.of_node = np;
+#endif
stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
if (pdata)
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
goto out_free;
if (irq >= 0) {
- ret = stmpe_gpio_irq_init(stmpe_gpio);
+ ret = stmpe_gpio_irq_init(stmpe_gpio, np);
if (ret)
goto out_disable;
diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c
index 792c3e3795ca..dd64a06dc5b4 100644
--- a/trunk/drivers/gpu/drm/drm_crtc.c
+++ b/trunk/drivers/gpu/drm/drm_crtc.c
@@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- drm_modeset_unlock_all(dev);
return PTR_ERR(fb);
}
@@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- drm_modeset_unlock_all(dev);
return PTR_ERR(fb);
}
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 59d6b9bf204b..892ff9f95975 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
if (!fb_helper->fb)
return 0;
- drm_modeset_lock_all(dev);
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+ drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
-
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/trunk/drivers/gpu/drm/drm_fops.c b/trunk/drivers/gpu/drm/drm_fops.c
index 13fdcd10a605..429e07d0b0f1 100644
--- a/trunk/drivers/gpu/drm/drm_fops.c
+++ b/trunk/drivers/gpu/drm/drm_fops.c
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp)
int retcode = 0;
int need_setup = 0;
struct address_space *old_mapping;
+ struct address_space *old_imapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp)
if (!dev->open_count++)
need_setup = 1;
mutex_lock(&dev->struct_mutex);
+ old_imapping = inode->i_mapping;
old_mapping = dev->dev_mapping;
if (old_mapping == NULL)
dev->dev_mapping = &inode->i_data;
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp)
err_undo:
mutex_lock(&dev->struct_mutex);
- filp->f_mapping = old_mapping;
- inode->i_mapping = old_mapping;
+ filp->f_mapping = old_imapping;
+ inode->i_mapping = old_imapping;
iput(container_of(dev->dev_mapping, struct inode, i_data));
dev->dev_mapping = old_mapping;
mutex_unlock(&dev->struct_mutex);
diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c
index 04fa6f1808d1..f83f0719922e 100644
--- a/trunk/drivers/gpu/drm/drm_modes.c
+++ b/trunk/drivers/gpu/drm/drm_modes.c
@@ -506,7 +506,7 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
}
EXPORT_SYMBOL(drm_gtf_mode);
-#if IS_ENABLED(CONFIG_VIDEOMODE)
+#ifdef CONFIG_VIDEOMODE_HELPERS
int drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode)
{
@@ -523,26 +523,25 @@ int drm_display_mode_from_videomode(const struct videomode *vm,
dmode->clock = vm->pixelclock / 1000;
dmode->flags = 0;
- if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PHSYNC;
- else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
+ else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NHSYNC;
- if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PVSYNC;
- else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
+ else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NVSYNC;
- if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
- if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
+ if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_set_name(dmode);
return 0;
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
-#endif
-#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
+#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
@@ -572,7 +571,8 @@ int of_get_drm_display_mode(struct device_node *np,
return 0;
}
EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
-#endif
+#endif /* CONFIG_OF */
+#endif /* CONFIG_VIDEOMODE_HELPERS */
/**
* drm_mode_set_name - set the name on a mode
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3b11ab0fbc96..9a48e1a2d417 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
- BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
+ BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c
index 32a3693905ec..1ce45a0a2d3e 100644
--- a/trunk/drivers/gpu/drm/i915/intel_crt.c
+++ b/trunk/drivers/gpu/drm/i915/intel_crt.c
@@ -45,6 +45,9 @@
struct intel_crt {
struct intel_encoder base;
+ /* DPMS state is stored in the connector, which we need in the
+ * encoder's enable/disable callbacks */
+ struct intel_connector *connector;
bool force_hotplug_required;
u32 adpa_reg;
};
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
return true;
}
-static void intel_disable_crt(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 temp;
-
- temp = I915_READ(crt->adpa_reg);
- temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
- temp &= ~ADPA_DAC_ENABLE;
- I915_WRITE(crt->adpa_reg, temp);
-}
-
-static void intel_enable_crt(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
- u32 temp;
-
- temp = I915_READ(crt->adpa_reg);
- temp |= ADPA_DAC_ENABLE;
- I915_WRITE(crt->adpa_reg, temp);
-}
-
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
I915_WRITE(crt->adpa_reg, temp);
}
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+ intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
@@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev)
}
connector = &intel_connector->base;
+ crt->connector = intel_connector;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c
index d7d4afe01341..8fc93f90a7cd 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dp.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dp.c
@@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
+ mutex_unlock(&dev->mode_config.mutex);
}
kfree(intel_dig_port);
}
diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
index fe22bb780e1d..78d8e919509f 100644
--- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
int i;
unsigned char misc = 0;
unsigned char ext_vga[6];
- unsigned char ext_vga_index24;
- unsigned char dac_index90 = 0;
u8 bppshift;
static unsigned char dacvalue[] = {
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
option2 = 0x0000b000;
break;
case G200_ER:
- dac_index90 = 0;
break;
}
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG_DAC(i, dacvalue[i]);
}
- if (mdev->type == G200_ER) {
- WREG_DAC(0x90, dac_index90);
- }
-
+ if (mdev->type == G200_ER)
+ WREG_DAC(0x90, 0);
if (option)
pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (mdev->type == G200_WB)
ext_vga[1] |= 0x88;
- ext_vga_index24 = 0x05;
-
/* Set pixel clocks */
misc = 0x2d;
WREG8(MGA_MISC_OUT, misc);
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
}
if (mdev->type == G200_ER)
- WREG_ECRT(24, ext_vga_index24);
+ WREG_ECRT(0x24, 0x5);
if (mdev->type == G200_EV) {
WREG_ECRT(6, 0);
diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index e816f06637a7..0e2c1a4f1659 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
}
}
+static void
+nouveau_bios_shadow_platform(struct nouveau_bios *bios)
+{
+ struct pci_dev *pdev = nv_device(bios)->pdev;
+ size_t size;
+
+ void __iomem *rom = pci_platform_rom(pdev, &size);
+ if (rom && size) {
+ bios->data = kmalloc(size, GFP_KERNEL);
+ if (bios->data) {
+ memcpy_fromio(bios->data, rom, size);
+ bios->size = size;
+ }
+ }
+}
+
static int
nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
{
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
{ "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
{ "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
{ "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
+ { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
{}
};
struct methods *mthd, *best;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 3b6dc883e150..5eb3e0da7c6e 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_chan *chan = NULL, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
struct nv_dma_class args = {};
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
return nouveau_abi16_put(abi16, -EINVAL);
- list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
- if (chan->chan->handle == (NVDRM_CHAN | info->channel))
+ list_for_each_entry(temp, &abi16->channels, head) {
+ if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
+ chan = temp;
break;
- chan = NULL;
+ }
}
if (!chan)
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_chan *chan = NULL, *temp;
struct nouveau_abi16_ntfy *ntfy;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
- if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
+ list_for_each_entry(temp, &abi16->channels, head) {
+ if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
+ chan = temp;
break;
- chan = NULL;
+ }
}
if (!chan)
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
index d1099365bfc1..c95decf543e9 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -71,12 +71,26 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_drm *drm =
+ container_of(event, struct nouveau_drm, vblank[head]);
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
static int
nouveau_drm_vblank_enable(struct drm_device *dev, int head)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- nouveau_event_get(pdisp->vblank, head, &drm->vblank);
+
+ if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
+ return -EIO;
+ WARN_ON_ONCE(drm->vblank[head].func);
+ drm->vblank[head].func = nouveau_drm_vblank_handler;
+ nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
return 0;
}
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- nouveau_event_put(pdisp->vblank, head, &drm->vblank);
-}
-
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
- struct nouveau_drm *drm =
- container_of(event, struct nouveau_drm, vblank);
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
+ if (drm->vblank[head].func)
+ nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
+ else
+ WARN_ON_ONCE(1);
+ drm->vblank[head].func = NULL;
}
static u64
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
- drm->vblank.func = nouveau_drm_vblank_handler;
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
index b25df374c901..9c39bafbef2c 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -113,7 +113,7 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
- struct nouveau_eventh vblank;
+ struct nouveau_eventh vblank[4];
/* power management */
struct nouveau_pm *pm;
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
index 7f0e6c3f37d1..1ddc03e51bf4 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
{
struct nv50_display_flip *flip = data;
if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data);
+ flip->chan->data)
return true;
usleep_range(1, 2);
return false;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c
index b8015913d382..fa3c56fba294 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
+static bool radeon_read_platform_bios(struct radeon_device *rdev)
+{
+ uint8_t __iomem *bios;
+ size_t size;
+
+ rdev->bios = NULL;
+
+ bios = pci_platform_rom(rdev->pdev, &size);
+ if (!bios) {
+ return false;
+ }
+
+ if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+ return false;
+ }
+ rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+ if (rdev->bios == NULL) {
+ return false;
+ }
+
+ return true;
+}
+
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev)
if (r == false) {
r = radeon_read_disabled_bios(rdev);
}
+ if (r == false) {
+ r = radeon_read_platform_bios(rdev);
+ }
if (r == false || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
diff --git a/trunk/drivers/gpu/drm/tilcdc/Kconfig b/trunk/drivers/gpu/drm/tilcdc/Kconfig
index d24d04013476..e461e9972455 100644
--- a/trunk/drivers/gpu/drm/tilcdc/Kconfig
+++ b/trunk/drivers/gpu/drm/tilcdc/Kconfig
@@ -4,8 +4,7 @@ config DRM_TILCDC
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select OF_VIDEOMODE
- select OF_DISPLAY_TIMING
+ select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
help
Choose this option if you have an TI SoC with LCDC display
diff --git a/trunk/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/trunk/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 580b74e2022b..90ee49786372 100644
--- a/trunk/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/trunk/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -173,7 +173,7 @@ static int panel_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(dev);
struct videomode vm;
- if (videomode_from_timing(timings, &vm, i))
+ if (videomode_from_timings(timings, &vm, i))
break;
drm_display_mode_from_videomode(&vm, mode);
diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c
index fe5cdbcf2636..b44d548c56f8 100644
--- a/trunk/drivers/gpu/drm/udl/udl_connector.c
+++ b/trunk/drivers/gpu/drm/udl/udl_connector.c
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector)
int ret;
edid = (struct edid *)udl_get_edid(udl);
+ if (!edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+ }
/*
* We only read the main block, but if the monitor reports extension
diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c
index 512b01c04ea7..aa341d135867 100644
--- a/trunk/drivers/hid/hid-core.c
+++ b/trunk/drivers/hid/hid-core.c
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev)
hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
return true;
break;
+ case USB_VENDOR_ID_ATMEL_V_USB:
+ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+ * it has the same USB ID as many Atmel V-USB devices. This
+ * usb radio is handled by radio-ma901.c driver so we want
+ * ignore the hid. Check the name, bus, product and ignore
+ * if we have MA901 usb radio.
+ */
+ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+ hdev->bus == BUS_USB &&
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h
index c4388776f4e4..5309fd5eb0eb 100644
--- a/trunk/drivers/hid/hid-ids.h
+++ b/trunk/drivers/hid/hid-ids.h
@@ -158,6 +158,8 @@
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118
+#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0
+#define USB_DEVICE_ID_ATMEL_V_USB 0x05df
#define USB_VENDOR_ID_AUREAL 0x0755
#define USB_DEVICE_ID_AUREAL_W01RN 0x2626
@@ -557,9 +559,6 @@
#define USB_VENDOR_ID_MADCATZ 0x0738
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
-#define USB_VENDOR_ID_MASTERKIT 0x16c0
-#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
-
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
diff --git a/trunk/drivers/hid/hid-magicmouse.c b/trunk/drivers/hid/hid-magicmouse.c
index f7f113ba083e..a8ce44296cfd 100644
--- a/trunk/drivers/hid/hid-magicmouse.c
+++ b/trunk/drivers/hid/hid-magicmouse.c
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
return 0;
}
+static void magicmouse_input_configured(struct hid_device *hdev,
+ struct hid_input *hi)
+
+{
+ struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
+ int ret = magicmouse_setup_input(msc->input, hdev);
+ if (ret) {
+ hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+ /* clean msc->input to notify probe() of the failure */
+ msc->input = NULL;
+ }
+}
+
+
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- /* We do this after hid-input is done parsing reports so that
- * hid-input uses the most natural button and axis IDs.
- */
- if (msc->input) {
- ret = magicmouse_setup_input(msc->input, hdev);
- if (ret) {
- hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
- goto err_stop_hw;
- }
+ if (!msc->input) {
+ hid_err(hdev, "magicmouse input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
}
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = {
.remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
+ .input_configured = magicmouse_input_configured,
};
module_hid_driver(magicmouse_driver);
diff --git a/trunk/drivers/hwspinlock/hwspinlock_core.c b/trunk/drivers/hwspinlock/hwspinlock_core.c
index db713c0dfba4..461a0d739d75 100644
--- a/trunk/drivers/hwspinlock/hwspinlock_core.c
+++ b/trunk/drivers/hwspinlock/hwspinlock_core.c
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
+ pm_runtime_put_noidle(dev);
+ module_put(dev->driver->owner);
return ret;
}
diff --git a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
index 0ceb6e1b0f65..e3085c487ace 100644
--- a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
r = i2c_add_numbered_adapter(adap);
if (r) {
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 5d6675013864..1a38dd7dfe4e 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3c, idle_cpu_hsw),
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
+ ICPU(0x46, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
index 08a6c6d39e56..911205d3d5a0 100644
--- a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -44,7 +44,7 @@
#include "qib.h"
#include "qib_7220.h"
-#define SD7220_FW_NAME "intel/sd7220.fw"
+#define SD7220_FW_NAME "qlogic/sd7220.fw"
MODULE_FIRMWARE(SD7220_FW_NAME);
/*
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 1daa97913b7d..0bfd8cf25200 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
- case 0x18803: /* DTH2242 Grip Pen */
+ case 0x18802: /* DTH2242 Grip Pen */
case 0x022:
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBC =
- { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
+ { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x26 =
{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
{ USB_DEVICE_WACOM(0x59) },
- { USB_DEVICE_WACOM(0x5D) },
+ { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
- { USB_DEVICE_WACOM(0xF6) },
+ { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/trunk/drivers/iommu/amd_iommu.c b/trunk/drivers/iommu/amd_iommu.c
index b287ca33833d..830183737b0f 100644
--- a/trunk/drivers/iommu/amd_iommu.c
+++ b/trunk/drivers/iommu/amd_iommu.c
@@ -173,7 +173,7 @@ static inline u16 get_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- return calc_devid(pdev->bus->number, pdev->devfn);
+ return PCI_DEVID(pdev->bus->number, pdev->devfn);
}
static struct iommu_dev_data *get_dev_data(struct device *dev)
@@ -649,26 +649,26 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
case EVENT_TYPE_ILL_DEV:
printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_IO_FAULT:
printk("IO_PAGE_FAULT device=%02x:%02x.%x "
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domid, address, flags);
break;
case EVENT_TYPE_DEV_TAB_ERR:
printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
"domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
domid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
@@ -682,13 +682,13 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
case EVENT_TYPE_IOTLB_INV_TO:
printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
"address=0x%016llx]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address);
break;
case EVENT_TYPE_INV_DEV_REQ:
printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
"address=0x%016llx flags=0x%04x]\n",
- PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
address, flags);
break;
default:
diff --git a/trunk/drivers/iommu/amd_iommu_init.c b/trunk/drivers/iommu/amd_iommu_init.c
index e3c2d74b7684..2f46881256a2 100644
--- a/trunk/drivers/iommu/amd_iommu_init.c
+++ b/trunk/drivers/iommu/amd_iommu_init.c
@@ -406,7 +406,7 @@ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
u32 cap;
cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
- update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
+ update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
return 0;
}
@@ -423,7 +423,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
p += sizeof(*h);
end += h->length;
- find_last_devid_on_pci(PCI_BUS(h->devid),
+ find_last_devid_on_pci(PCI_BUS_NUM(h->devid),
PCI_SLOT(h->devid),
PCI_FUNC(h->devid),
h->cap_ptr);
@@ -784,10 +784,10 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
" last device %02x:%02x.%x flags: %02x\n",
- PCI_BUS(iommu->first_device),
+ PCI_BUS_NUM(iommu->first_device),
PCI_SLOT(iommu->first_device),
PCI_FUNC(iommu->first_device),
- PCI_BUS(iommu->last_device),
+ PCI_BUS_NUM(iommu->last_device),
PCI_SLOT(iommu->last_device),
PCI_FUNC(iommu->last_device),
e->flags);
@@ -801,7 +801,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
"flags: %02x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -813,7 +813,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SELECT_RANGE_START\t "
"devid: %02x:%02x.%x flags: %02x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags);
@@ -827,11 +827,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
"flags: %02x devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS(e->ext >> 8),
+ PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -846,11 +846,11 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_ALIAS_RANGE\t\t "
"devid: %02x:%02x.%x flags: %02x "
"devid_to: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags,
- PCI_BUS(e->ext >> 8),
+ PCI_BUS_NUM(e->ext >> 8),
PCI_SLOT(e->ext >> 8),
PCI_FUNC(e->ext >> 8));
@@ -864,7 +864,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
"flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -877,7 +877,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
"%02x:%02x.%x flags: %02x ext: %08x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid),
e->flags, e->ext);
@@ -890,7 +890,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
case IVHD_DEV_RANGE_END:
DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
- PCI_BUS(e->devid),
+ PCI_BUS_NUM(e->devid),
PCI_SLOT(e->devid),
PCI_FUNC(e->devid));
@@ -924,7 +924,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
var, (int)handle,
- PCI_BUS(devid),
+ PCI_BUS_NUM(devid),
PCI_SLOT(devid),
PCI_FUNC(devid));
@@ -1086,7 +1086,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
DUMP_printk("device: %02x:%02x.%01x cap: %04x "
"seg: %d flags: %01x info %04x\n",
- PCI_BUS(h->devid), PCI_SLOT(h->devid),
+ PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
PCI_FUNC(h->devid), h->cap_ptr,
h->pci_seg, h->flags, h->info);
DUMP_printk(" mmio-addr: %016llx\n",
@@ -1116,7 +1116,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
- iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
+ iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
iommu->devid & 0xff);
if (!iommu->dev)
return -ENODEV;
@@ -1128,9 +1128,9 @@ static int iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
&misc);
- iommu->first_device = calc_devid(MMIO_GET_BUS(range),
+ iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range),
MMIO_GET_FD(range));
- iommu->last_device = calc_devid(MMIO_GET_BUS(range),
+ iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
@@ -1388,8 +1388,8 @@ static int __init init_unity_map_range(struct ivmd_header *m)
DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
" range_start: %016llx range_end: %016llx flags: %x\n", s,
- PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
- PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
+ PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
+ PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
e->address_start, e->address_end, m->flags);
diff --git a/trunk/drivers/iommu/amd_iommu_types.h b/trunk/drivers/iommu/amd_iommu_types.h
index e38ab438bb34..ec36cf63e0ca 100644
--- a/trunk/drivers/iommu/amd_iommu_types.h
+++ b/trunk/drivers/iommu/amd_iommu_types.h
@@ -24,6 +24,7 @@
#include
#include
#include
+#include
/*
* Maximum number of IOMMUs supported
@@ -315,9 +316,6 @@
#define MAX_DOMAIN_ID 65536
-/* FIXME: move this macro to */
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
/* Protection domain flags */
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
@@ -703,13 +701,6 @@ extern int amd_iommu_max_glx_val;
*/
extern void iommu_flush_all_caches(struct amd_iommu *iommu);
-/* takes bus and device/function and returns the device id
- * FIXME: should that be in generic PCI code? */
-static inline u16 calc_devid(u8 bus, u8 devfn)
-{
- return (((u16)bus) << 8) | devfn;
-}
-
static inline int get_ioapic_devid(int id)
{
struct devid_map *entry;
diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c
index a32e0d5aa45f..fc6aebf1e4b2 100644
--- a/trunk/drivers/irqchip/irq-gic.c
+++ b/trunk/drivers/irqchip/irq-gic.c
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
- return -ENXIO;
+ /* the genirq layer expects 0 if we can't retrigger in hardware */
+ return 0;
}
#ifdef CONFIG_SMP
diff --git a/trunk/drivers/md/dm-cache-target.c b/trunk/drivers/md/dm-cache-target.c
index 66120bd46d15..10744091e6ca 100644
--- a/trunk/drivers/md/dm-cache-target.c
+++ b/trunk/drivers/md/dm-cache-target.c
@@ -6,6 +6,7 @@
#include "dm.h"
#include "dm-bio-prison.h"
+#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include
@@ -201,10 +202,15 @@ struct per_bio_data {
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
- /* writethrough fields */
+ /*
+ * writethrough fields. These MUST remain at the end of this
+ * structure and the 'cache' member must be the first as it
+ * is used to determine the offsetof the writethrough fields.
+ */
struct cache *cache;
dm_cblock_t cblock;
bio_end_io_t *saved_bi_end_io;
+ struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache)
/*----------------------------------------------------------------
* Per bio data
*--------------------------------------------------------------*/
-static struct per_bio_data *get_per_bio_data(struct bio *bio)
+
+/*
+ * If using writeback, leave out struct per_bio_data's writethrough fields.
+ */
+#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
+#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
+
+static size_t get_per_bio_data_size(struct cache *cache)
+{
+ return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+}
+
+static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
{
- struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio)
+static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ struct per_bio_data *pb = get_per_bio_data(bio, data_size);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
bio->bi_end_io = pb->saved_bi_end_io;
if (err) {
@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err)
return;
}
+ dm_bio_restore(&pb->bio_details, bio);
remap_to_cache(pb->cache, bio, pb->cblock);
/*
@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err)
static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
pb->cache = cache;
pb->cblock = cblock;
pb->saved_bi_end_io = bio->bi_end_io;
+ dm_bio_record(&pb->bio_details, bio);
bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock);
@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void process_flush_bio(struct cache *cache, struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
BUG_ON(bio->bi_size);
if (!pb->req_nr)
@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
dm_oblock_t block = get_bio_block(cache, bio);
struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
struct policy_result lookup_result;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block);
bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti;
ti->private = cache;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->num_flush_bios = 2;
ti->flush_supported = true;
@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features));
+ ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r;
dm_oblock_t block = get_bio_block(cache, bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false;
bool discarded_block;
struct dm_bio_prison_cell *cell;
@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio);
+ pb = init_per_bio_data(bio, pb_data_size);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct cache *cache = ti->private;
unsigned long flags;
- struct per_bio_data *pb = get_per_bio_data(bio);
+ size_t pb_data_size = get_per_bio_data_size(cache);
+ struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
if (pb->tick) {
policy_tick(cache->policy);
diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c
index 7e469260fe5e..9a0bdad9ad8f 100644
--- a/trunk/drivers/md/dm.c
+++ b/trunk/drivers/md/dm.c
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
+ trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c
index 24909eb13fec..f4e87bfc7567 100644
--- a/trunk/drivers/md/raid5.c
+++ b/trunk/drivers/md/raid5.c
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
+ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+ raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
}
}
@@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0)
+ if (remaining == 0) {
+ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+ raid_bio, 0);
bio_endio(raid_bio, 0);
+ }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
diff --git a/trunk/drivers/media/dvb-frontends/mb86a20s.c b/trunk/drivers/media/dvb-frontends/mb86a20s.c
index f19cd7367040..4faaf8053f26 100644
--- a/trunk/drivers/media/dvb-frontends/mb86a20s.c
+++ b/trunk/drivers/media/dvb-frontends/mb86a20s.c
@@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
__func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000,
rate, rate);
- state->estimated_rate[i] = rate;
+ state->estimated_rate[layer] = rate;
}
diff --git a/trunk/drivers/media/pci/cx25821/cx25821-video.c b/trunk/drivers/media/pci/cx25821/cx25821-video.c
index d4de021dc844..31ce7698acb9 100644
--- a/trunk/drivers/media/pci/cx25821/cx25821-video.c
+++ b/trunk/drivers/media/pci/cx25821/cx25821-video.c
@@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
spin_lock_init(&dev->slock);
- for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) {
+ for (i = 0; i < VID_CHANNEL_NUM; ++i) {
cx25821_init_controls(dev, i);
cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper,
diff --git a/trunk/drivers/media/platform/Kconfig b/trunk/drivers/media/platform/Kconfig
index 05d7b6333461..a0639e779973 100644
--- a/trunk/drivers/media/platform/Kconfig
+++ b/trunk/drivers/media/platform/Kconfig
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/trunk/drivers/media/radio/radio-ma901.c b/trunk/drivers/media/radio/radio-ma901.c
index c61f590029ad..348dafc0318a 100644
--- a/trunk/drivers/media/radio/radio-ma901.c
+++ b/trunk/drivers/media/radio/radio-ma901.c
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
static int usb_ma901radio_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_device *dev = interface_to_usbdev(intf);
struct ma901radio_device *radio;
int retval = 0;
+ /* Masterkit MA901 usb radio has the same USB ID as many others
+ * Atmel V-USB devices. Let's make additional checks to be sure
+ * that this is our device.
+ */
+
+ if (dev->product && dev->manufacturer &&
+ (strncmp(dev->product, "MA901", 5) != 0
+ || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
+ return -ENODEV;
+
radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
if (!radio) {
dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig
index 39c2ecadb273..ea98f7e9ccd1 100644
--- a/trunk/drivers/misc/vmw_vmci/Kconfig
+++ b/trunk/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI
+ depends on X86 && PCI && NET
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/trunk/drivers/mtd/mtdchar.c b/trunk/drivers/mtd/mtdchar.c
index 92ab30ab00dc..dc571ebc1aa0 100644
--- a/trunk/drivers/mtd/mtdchar.c
+++ b/trunk/drivers/mtd/mtdchar.c
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
}
#endif
-static inline unsigned long get_vm_size(struct vm_area_struct *vma)
-{
- return vma->vm_end - vma->vm_start;
-}
-
-static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
-{
- return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
-}
-
-/*
- * Set a new vm offset.
- *
- * Verify that the incoming offset really works as a page offset,
- * and that the offset and size fit in a resource_size_t.
- */
-static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
-{
- pgoff_t pgoff = off >> PAGE_SHIFT;
- if (off != (resource_size_t) pgoff << PAGE_SHIFT)
- return -EINVAL;
- if (off + get_vm_size(vma) - 1 < off)
- return -EINVAL;
- vma->vm_pgoff = pgoff;
- return 0;
-}
-
/*
* set up a mapping for shared memory segments
*/
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- resource_size_t start, off;
- unsigned long len, vma_len;
/* This is broken because it assumes the MTD device is map-based
and that mtd->priv is a valid struct map_info. It should be
replaced with something that uses the mtd_get_unmapped_area()
operation properly. */
if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
- off = get_vm_offset(vma);
- start = map->phys;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
- start &= PAGE_MASK;
- vma_len = get_vm_size(vma);
-
- /* Overflow in off+len? */
- if (vma_len + off < off)
- return -EINVAL;
- /* Does it fit in the mapping? */
- if (vma_len + off > len)
- return -EINVAL;
-
- off += start;
- /* Did that overflow? */
- if (off < start)
- return -EINVAL;
- if (set_vm_offset(vma, off) < 0)
- return -EINVAL;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
#ifdef pgprot_noncached
- if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
+ if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
+ return vm_iomap_memory(vma, map->phys, map->size);
}
return -ENOSYS;
#else
diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c
index 6bbd90e1123c..dbbea0eec134 100644
--- a/trunk/drivers/net/bonding/bond_main.c
+++ b/trunk/drivers/net/bonding/bond_main.c
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
+ netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_del(old_active->dev, ha->addr);
+ netif_addr_unlock_bh(bond->dev);
}
if (new_active) {
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
+ netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_add(new_active->dev, ha->addr);
+ netif_addr_unlock_bh(bond->dev);
}
}
@@ -1901,11 +1905,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach:
+ if (!USES_PRIMARY(bond->params.mode)) {
+ netif_addr_lock_bh(bond_dev);
+ bond_mc_list_flush(bond_dev, slave_dev);
+ netif_addr_unlock_bh(bond_dev);
+ }
+ bond_del_vlans_from_slave(bond, slave_dev);
write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave);
+ if (bond->primary_slave == new_slave)
+ bond->primary_slave = NULL;
write_unlock_bh(&bond->lock);
+ if (bond->curr_active_slave == new_slave) {
+ read_lock(&bond->lock);
+ write_lock_bh(&bond->curr_slave_lock);
+ bond_change_active_slave(bond, NULL);
+ bond_select_active_slave(bond);
+ write_unlock_bh(&bond->curr_slave_lock);
+ read_unlock(&bond->lock);
+ }
+ slave_disable_netpoll(new_slave);
err_close:
+ slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_unset_master:
@@ -1976,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev,
return -EINVAL;
}
+ write_unlock_bh(&bond->lock);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
netdev_rx_handler_unregister(slave_dev);
- write_unlock_bh(&bond->lock);
- synchronize_net();
write_lock_bh(&bond->lock);
if (!all && !bond->params.fail_over_mac) {
@@ -3169,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev);
- struct bonding *bond = slave->bond;
- struct net_device *bond_dev = slave->bond->dev;
+ struct bonding *bond;
+ struct net_device *bond_dev;
u32 old_speed;
u8 old_duplex;
+ /* A netdev event can be generated while enslaving a device
+ * before netdev_rx_handler_register is called in which case
+ * slave will be NULL
+ */
+ if (!slave)
+ return NOTIFY_DONE;
+ bond_dev = slave->bond->dev;
+ bond = slave->bond;
+
switch (event) {
case NETDEV_UNREGISTER:
if (bond->setup_by_slave)
@@ -3287,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
*/
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
{
- struct ethhdr *data = (struct ethhdr *)skb->data;
- struct iphdr *iph;
- struct ipv6hdr *ipv6h;
+ const struct ethhdr *data;
+ const struct iphdr *iph;
+ const struct ipv6hdr *ipv6h;
u32 v6hash;
- __be32 *s, *d;
+ const __be32 *s, *d;
if (skb->protocol == htons(ETH_P_IP) &&
- skb_network_header_len(skb) >= sizeof(*iph)) {
+ pskb_network_may_pull(skb, sizeof(*iph))) {
iph = ip_hdr(skb);
+ data = (struct ethhdr *)skb->data;
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
(data->h_dest[5] ^ data->h_source[5])) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
- skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+ pskb_network_may_pull(skb, sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb);
+ data = (struct ethhdr *)skb->data;
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3319,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
{
u32 layer4_xor = 0;
- struct iphdr *iph;
- struct ipv6hdr *ipv6h;
- __be32 *s, *d;
- __be16 *layer4hdr;
+ const struct iphdr *iph;
+ const struct ipv6hdr *ipv6h;
+ const __be32 *s, *d;
+ const __be16 *l4 = NULL;
+ __be16 _l4[2];
+ int noff = skb_network_offset(skb);
+ int poff;
if (skb->protocol == htons(ETH_P_IP) &&
- skb_network_header_len(skb) >= sizeof(*iph)) {
+ pskb_may_pull(skb, noff + sizeof(*iph))) {
iph = ip_hdr(skb);
- if (!ip_is_fragment(iph) &&
- (iph->protocol == IPPROTO_TCP ||
- iph->protocol == IPPROTO_UDP) &&
- (skb_headlen(skb) - skb_network_offset(skb) >=
- iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
- layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
- layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+ poff = proto_ports_offset(iph->protocol);
+
+ if (!ip_is_fragment(iph) && poff >= 0) {
+ l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
+ sizeof(_l4), &_l4);
+ if (l4)
+ layer4_xor = ntohs(l4[0] ^ l4[1]);
}
return (layer4_xor ^
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
- skb_network_header_len(skb) >= sizeof(*ipv6h)) {
+ pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb);
- if ((ipv6h->nexthdr == IPPROTO_TCP ||
- ipv6h->nexthdr == IPPROTO_UDP) &&
- (skb_headlen(skb) - skb_network_offset(skb) >=
- sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
- layer4hdr = (__be16 *)(ipv6h + 1);
- layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
+ poff = proto_ports_offset(ipv6h->nexthdr);
+ if (poff >= 0) {
+ l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
+ sizeof(_l4), &_l4);
+ if (l4)
+ layer4_xor = ntohs(l4[0] ^ l4[1]);
}
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
@@ -4847,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net)
static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bonding *bond, *tmp_bond;
+ LIST_HEAD(list);
bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
+
+ /* Kill off any bonds created after unregistering bond rtnl ops */
+ rtnl_lock();
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
}
static struct pernet_operations bond_net_ops = {
diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c
index db103e03ba05..ea7a388f4843 100644
--- a/trunk/drivers/net/bonding/bond_sysfs.c
+++ b/trunk/drivers/net/bonding/bond_sysfs.c
@@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out;
}
if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+ pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
@@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0]) {
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
+ if (new_value) {
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0])
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function
* is called.
*/
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ if (!new_value) {
+ cancel_delayed_work_sync(&bond->arp_work);
+ } else {
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
}
-
out:
rtnl_unlock();
return ret;
@@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 1, INT_MAX);
+ bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
goto out;
}
if (new_value < 0) {
- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 1, INT_MAX);
+ pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 1, INT_MAX);
+ bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
- } else {
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate) {
- bond->params.arp_validate =
- BOND_ARP_VALIDATE_NONE;
- }
- }
-
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
+ }
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, new_value);
+ bond->params.miimon = new_value;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (new_value && bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate)
+ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
+ if (!new_value) {
+ cancel_delayed_work_sync(&bond->mii_work);
+ } else {
cancel_delayed_work_sync(&bond->arp_work);
queue_delayed_work(bond->wq, &bond->mii_work, 0);
}
diff --git a/trunk/drivers/net/can/mcp251x.c b/trunk/drivers/net/can/mcp251x.c
index f32b9fc6a983..9aa0c64c33c8 100644
--- a/trunk/drivers/net/can/mcp251x.c
+++ b/trunk/drivers/net/can/mcp251x.c
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ unsigned long flags;
int ret;
ret = open_candev(net);
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)
priv->tx_skb = NULL;
priv->tx_len = 0;
+ flags = IRQF_ONESHOT;
+ if (pdata->irq_flags)
+ flags |= pdata->irq_flags;
+ else
+ flags |= IRQF_TRIGGER_FALLING;
+
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
- DEVICE_NAME, priv);
+ flags, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/trunk/drivers/net/can/sja1000/Kconfig b/trunk/drivers/net/can/sja1000/Kconfig
index b39ca5b3ea7f..ff2ba86cd4a4 100644
--- a/trunk/drivers/net/can/sja1000/Kconfig
+++ b/trunk/drivers/net/can/sja1000/Kconfig
@@ -46,6 +46,7 @@ config CAN_EMS_PCI
config CAN_PEAK_PCMCIA
tristate "PEAK PCAN-PC Card"
depends on PCMCIA
+ depends on HAS_IOPORT
---help---
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
from PEAK-System (http://www.peak-system.com). To compile this
diff --git a/trunk/drivers/net/can/sja1000/plx_pci.c b/trunk/drivers/net/can/sja1000/plx_pci.c
index a042cdc260dc..3c18d7d000ed 100644
--- a/trunk/drivers/net/can/sja1000/plx_pci.c
+++ b/trunk/drivers/net/can/sja1000/plx_pci.c
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
*/
if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
REG_CR_BASICCAN_INITIAL &&
- (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
flag = 1;
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
* See states on p. 23 of the Datasheet.
*/
if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
- priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
return flag;
diff --git a/trunk/drivers/net/can/sja1000/sja1000.c b/trunk/drivers/net/can/sja1000/sja1000.c
index daf4013a8fc7..e4df307eaa90 100644
--- a/trunk/drivers/net/can/sja1000/sja1000.c
+++ b/trunk/drivers/net/can/sja1000/sja1000.c
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
*/
spin_lock_irqsave(&priv->cmdreg_lock, flags);
priv->write_reg(priv, REG_CMR, val);
- priv->read_reg(priv, REG_SR);
+ priv->read_reg(priv, SJA1000_REG_SR);
spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
}
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
n++;
- status = priv->read_reg(priv, REG_SR);
+ status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller due to hw unplug */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
/* receive interrupt */
while (status & SR_RBS) {
sja1000_rx(dev);
- status = priv->read_reg(priv, REG_SR);
+ status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
diff --git a/trunk/drivers/net/can/sja1000/sja1000.h b/trunk/drivers/net/can/sja1000/sja1000.h
index afa99847a510..aa48e053da27 100644
--- a/trunk/drivers/net/can/sja1000/sja1000.h
+++ b/trunk/drivers/net/can/sja1000/sja1000.h
@@ -56,7 +56,7 @@
/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
#define REG_MOD 0x00
#define REG_CMR 0x01
-#define REG_SR 0x02
+#define SJA1000_REG_SR 0x02
#define REG_IR 0x03
#define REG_IER 0x04
#define REG_ALC 0x0B
diff --git a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
index 6433b81256cd..8e0c4a001939 100644
--- a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
- const u32 *prop;
- int err, irq, res_size, prop_size;
+ u32 prop;
+ int err, irq, res_size;
void __iomem *base;
err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->read_reg = sja1000_ofp_read_reg;
priv->write_reg = sja1000_ofp_write_reg;
- prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- priv->can.clock.freq = *prop / 2;
+ err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
else
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
- prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- priv->ocr |= *prop & OCR_MODE_MASK;
+ err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
+ if (!err)
+ priv->ocr |= prop & OCR_MODE_MASK;
else
priv->ocr |= OCR_MODE_NORMAL; /* default */
- prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
+ if (!err)
+ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
else
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
- prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
- if (prop && (prop_size == sizeof(u32)) && *prop) {
- u32 divider = priv->can.clock.freq * 2 / *prop;
+ err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
+ if (!err && prop) {
+ u32 divider = priv->can.clock.freq * 2 / prop;
if (divider > 1)
priv->cdr |= divider / 2 - 1;
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->cdr |= CDR_CLK_OFF; /* default */
}
- prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
- if (!prop)
+ if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
priv->cdr |= CDR_CBP; /* default */
priv->irq_flags = IRQF_SHARED;
diff --git a/trunk/drivers/net/ethernet/8390/ax88796.c b/trunk/drivers/net/ethernet/8390/ax88796.c
index cab306a9888e..e1d26433d619 100644
--- a/trunk/drivers/net/ethernet/8390/ax88796.c
+++ b/trunk/drivers/net/ethernet/8390/ax88796.c
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
struct ei_device *ei_local;
struct ax_device *ax;
struct resource *irq, *mem, *mem2;
- resource_size_t mem_size, mem2_size = 0;
+ unsigned long mem_size, mem2_size = 0;
int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 829b5ad71d0d..b5fd934585e9 100644
--- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
/* how about 0x2000 */
#define MAX_TX_BUF_LEN 0x2000
#define MAX_TX_BUF_SHIFT 13
-/*#define MAX_TX_BUF_LEN 0x3000 */
+#define MAX_TSO_SEG_SIZE 0x3c00
/* rrs word 1 bit 0:31 */
#define RRS_RX_CSUM_MASK 0xFFFF
@@ -438,7 +438,6 @@ struct atl1e_adapter {
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
- bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;
diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 92f4734f860d..ac25f05ff68f 100644
--- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
}
static int atl1e_request_irq(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
- int flags = 0;
int err = 0;
- adapter->have_msi = true;
- err = pci_enable_msi(pdev);
- if (err) {
- netdev_dbg(netdev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- adapter->have_msi = false;
- }
-
- if (!adapter->have_msi)
- flags |= IRQF_SHARED;
- err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
+ err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
+ netdev);
if (err) {
netdev_dbg(adapter->netdev,
"Unable to allocate interrupt Error: %d\n", err);
- if (adapter->have_msi)
- pci_disable_msi(pdev);
return err;
}
netdev_dbg(netdev, "atl1e_request_irq OK\n");
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
+ netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c2..57619dd4a92b 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ /* initialize FW coalescing state machines in RAM */
+ bnx2x_update_coalesce(bp);
+
/* setup the leading queue */
rc = bnx2x_setup_leading(bp);
if (rc) {
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
- u16 flags = REG_RD16(bp, addr);
+ u8 flags = REG_RD8(bp, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
- REG_WR16(bp, addr, flags);
+ REG_WR8(bp, addr, flags);
DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64a..0283f343b0d1 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
- int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
-
- if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
- bnx2x_kr2_recovery(params, vars, phy);
- return;
- }
+ int sigdet;
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* since some switches tend to reinit the AN process and clear the
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
vars->check_kr2_recovery_cnt--;
return;
}
+
+ sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
+ bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No sigdet\n");
+ }
+ return;
+ }
+
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..c50696b396f1 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
- if (!NO_FCOE(bp)) {
+ if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
+ if (!CHIP_IS_E1x(bp))
+ /* block FW from writing to host */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+
/* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
@@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
+ bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;
diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c
index 67d2663b3974..17a972734ba7 100644
--- a/trunk/drivers/net/ethernet/broadcom/tg3.c
+++ b/trunk/drivers/net/ethernet/broadcom/tg3.c
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
if (j + len > block_end)
goto partno;
- memcpy(tp->fw_ver, &vpd_data[j], len);
- strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+ if (len >= sizeof(tp->fw_ver))
+ len = sizeof(tp->fw_ver) - 1;
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+ &vpd_data[j]);
}
partno:
diff --git a/trunk/drivers/net/ethernet/calxeda/xgmac.c b/trunk/drivers/net/ethernet/calxeda/xgmac.c
index a170065b5973..b0ebc9f6d55e 100644
--- a/trunk/drivers/net/ethernet/calxeda/xgmac.c
+++ b/trunk/drivers/net/ethernet/calxeda/xgmac.c
@@ -163,6 +163,7 @@
#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+ /* Mask power mgt interrupt */
+ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
+
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
+ if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
+ break;
+
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode & WAKE_MAGIC)
- pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
if (mode & WAKE_UCAST)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
diff --git a/trunk/drivers/net/ethernet/davicom/dm9000.c b/trunk/drivers/net/ethernet/davicom/dm9000.c
index 8cdf02503d13..9eada8e86078 100644
--- a/trunk/drivers/net/ethernet/davicom/dm9000.c
+++ b/trunk/drivers/net/ethernet/davicom/dm9000.c
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
tmp = readl(reg);
}
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+ if (db->in_suspend)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+/* Read a word from phyxcer */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int reg_save;
+ int ret;
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Issue phyxcer read command */
+ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait read complete */
+
+ spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+
+ /* The read data keeps on REG_0D & REG_0E */
+ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ mutex_unlock(&db->addr_lock);
+
+ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+ return ret;
+}
+
+/* Write a word to phyxcer */
+static void
+dm9000_phy_write(struct net_device *dev,
+ int phyaddr_unused, int reg, int value)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned long reg_save;
+
+ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Fill the written data into REG_0D & REG_0E */
+ iow(db, DM9000_EPDRL, value);
+ iow(db, DM9000_EPDRH, value >> 8);
+
+ /* Issue phyxcer write command */
+ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait write complete */
+
+ spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ mutex_unlock(&db->addr_lock);
+}
+
/* dm9000_set_io
*
* select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
return 0;
}
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
- if (db->in_suspend)
- mdelay(ms);
- else
- msleep(ms);
-}
-
-/*
- * Read a word from phyxcer
- */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned int reg_save;
- int ret;
-
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock,flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock,flags);
-
- dm9000_msleep(db, 1); /* Wait read complete */
-
- spin_lock_irqsave(&db->lock,flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
-
- /* The read data keeps on REG_0D & REG_0E */
- ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock,flags);
-
- mutex_unlock(&db->addr_lock);
-
- dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
- return ret;
-}
-
-/*
- * Write a word to phyxcer
- */
-static void
-dm9000_phy_write(struct net_device *dev,
- int phyaddr_unused, int reg, int value)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned long reg_save;
-
- dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock,flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- /* Fill the written data into REG_0D & REG_0E */
- iow(db, DM9000_EPDRL, value);
- iow(db, DM9000_EPDRH, value >> 8);
-
- iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock, flags);
-
- dm9000_msleep(db, 1); /* Wait write complete */
-
- spin_lock_irqsave(&db->lock,flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
-
- spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
-}
-
static void
dm9000_shutdown(struct net_device *dev)
{
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- dm9000_reset(db);
+ /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
+ * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
+ * while probe stage.
+ */
+
+ iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
diff --git a/trunk/drivers/net/ethernet/davicom/dm9000.h b/trunk/drivers/net/ethernet/davicom/dm9000.h
index 55688bd1a3ef..9ce058adabab 100644
--- a/trunk/drivers/net/ethernet/davicom/dm9000.h
+++ b/trunk/drivers/net/ethernet/davicom/dm9000.h
@@ -69,7 +69,9 @@
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
-#define NCR_LBK (3<<1)
+
+#define NCR_RESERVED (3<<1)
+#define NCR_MAC_LBK (1<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
@@ -167,5 +169,12 @@
#define ISR_LNKCHNG (1<<5)
#define ISR_UNDERRUN (1<<4)
+/* Davicom MII registers.
+ */
+
+#define MII_DM_DSPCR 0x1b /* DSP Control Register */
+
+#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
+
#endif /* _DM9000X_H_ */
diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288b..2886c9b63f90 100644
--- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (vlan_tx_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- __vlan_put_tag(skb, vlan_tag);
- skb->vlan_tci = 0;
+ skb = __vlan_put_tag(skb, vlan_tag);
+ if (skb)
+ skb->vlan_tci = 0;
}
return skb;
diff --git a/trunk/drivers/net/ethernet/freescale/fec.c b/trunk/drivers/net/ethernet/freescale/fec.c
index 911d0253dbb2..73195f643c9c 100644
--- a/trunk/drivers/net/ethernet/freescale/fec.c
+++ b/trunk/drivers/net/ethernet/freescale/fec.c
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct bufdesc *bdp;
+ unsigned int i;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
+}
+
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+ fec_enet_bd_init(ndev);
+
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
- fep->cur_rx = fep->rx_bd_base;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
@@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
} else {
if (fep->link) {
fec_stop(ndev);
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
- struct bufdesc *bdp;
- unsigned int i;
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
+ memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
- fep->dirty_tx = bdp;
-
fec_restart(ndev, 0);
return 0;
diff --git a/trunk/drivers/net/ethernet/intel/e100.c b/trunk/drivers/net/ethernet/intel/e100.c
index ec800b093e7e..d2bea3f07c73 100644
--- a/trunk/drivers/net/ethernet/intel/e100.c
+++ b/trunk/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
}
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
- void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+ int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
struct cb *cb;
unsigned long flags;
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
nic->cbs_avail--;
cb->skb = skb;
+ err = cb_prepare(nic, cb, skb);
+ if (err)
+ goto err_unlock;
+
if (unlikely(!nic->cbs_avail))
err = -ENOSPC;
- cb_prepare(nic, cb, skb);
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic)
nic->mii.mdio_write = mdio_write;
}
-static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ return 0;
}
/*************************************************************************
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
return fw;
}
-static void e100_setup_ucode(struct nic *nic, struct cb *cb,
+static int e100_setup_ucode(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
const struct firmware *fw = (void *)skb;
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb,
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
cb->command = cpu_to_le16(cb_ucode | cb_el);
+ return 0;
}
static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return err;
}
-static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_iaaddr);
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
+ return 0;
}
-static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_dump);
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
offsetof(struct mem, dump_buf));
+ return 0;
}
static int e100_phy_check_without_mii(struct nic *nic)
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic)
return 0;
}
-static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct netdev_hw_addr *ha;
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
+ return 0;
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data)
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
}
-static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
+static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
+ dma_addr_t dma_addr;
cb->command = nic->tx_command;
+ dma_addr = pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE);
+ /* If we can't map the skb, have the upper layer try later */
+ if (pci_dma_mapping_error(nic->pdev, dma_addr))
+ return -ENOMEM;
+
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
* testing, ie sending frames with bad CRC.
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
- cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE));
- /* check for mapping failure? */
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb_tx_timestamp(skb);
+ return 0;
}
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 43462d596a4e..ffd287196bf8 100644
--- a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
+ ret_val = 4;
+ goto err_nomem;
+ }
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
- ret_val = 4;
+ ret_val = 5;
goto err_nomem;
}
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
if (!rxdr->desc) {
- ret_val = 5;
+ ret_val = 6;
goto err_nomem;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
- ret_val = 6;
+ ret_val = 7;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
+ ret_val = 8;
+ goto err_nomem;
+ }
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
index 948b86ffa4f0..7e615e2bf7e6 100644
--- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -848,11 +848,16 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
}
}
- if (!buffer_info->dma)
+ if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
diff --git a/trunk/drivers/net/ethernet/intel/igb/igb.h b/trunk/drivers/net/ethernet/intel/igb/igb.h
index 25151401c2ab..ab577a763a20 100644
--- a/trunk/drivers/net/ethernet/intel/igb/igb.h
+++ b/trunk/drivers/net/ethernet/intel/igb/igb.h
@@ -284,18 +284,10 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
- IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
-#define ring_uses_build_skb(ring) \
- test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define set_ring_build_skb_enabled(ring) \
- set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-#define clear_ring_build_skb_enabled(ring) \
- clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
-
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c
index 8496adfc6a68..64f75291e3a5 100644
--- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
-static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
- struct igb_ring *rx_ring)
-{
-#define IGB_MAX_BUILD_SKB_SIZE \
- (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
- (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
-
- /* set build_skb flag */
- if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
- set_ring_build_skb_enabled(rx_ring);
- else
- clear_ring_build_skb_enabled(rx_ring);
-}
-
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *rx_ring = adapter->rx_ring[i];
- igb_set_rx_buffer_len(adapter, rx_ring);
- igb_configure_rx_ring(adapter, rx_ring);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
-static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc)
-{
- struct igb_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
- void *page_addr;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(NET_SKB_PAD +
- NET_IP_ALIGN +
- size);
-#endif
-
- /* If we spanned a buffer we have a huge mess so test for it */
- BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- page_addr = page_address(page) + rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
-#endif
-
- /* build an skb to around the page buffer */
- skb = build_skb(page_addr, truesize);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
- __skb_put(skb, size);
-
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
-
- if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
-
- return skb;
-}
-
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb();
/* retrieve a buffer from the ring */
- if (ring_uses_build_skb(rx_ring))
- skb = igb_build_rx_buffer(rx_ring, rx_desc);
- else
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
- if (ring_uses_build_skb(rx_ring))
- return NET_SKB_PAD + NET_IP_ALIGN;
- else
- return 0;
-}
-
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
- bi->page_offset +
- igb_rx_offset(rx_ring));
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
diff --git a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ea4808373435..b5f94abe3cff 100644
--- a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2159,6 +2159,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2172,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
rx_desc->status = 0;
- if (++i == rx_ring->count) i = 0;
+ if (++i == rx_ring->count)
+ i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index db5611ae407e..79f4a26ea6cc 100644
--- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void)
ixgbe_dbg_init();
#endif /* CONFIG_DEBUG_FS */
+ ret = pci_register_driver(&ixgbe_driver);
+ if (ret) {
+#ifdef CONFIG_DEBUG_FS
+ ixgbe_dbg_exit();
+#endif /* CONFIG_DEBUG_FS */
+ return ret;
+ }
+
#ifdef CONFIG_IXGBE_DCA
dca_register_notify(&dca_notifier);
#endif
- ret = pci_register_driver(&ixgbe_driver);
- return ret;
+ return 0;
}
module_init(ixgbe_init_module);
diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d44b4d21268c..97e33669c0b9 100644
--- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
+ if (adapter->vfinfo[vf].pf_vlan)
+ err = ixgbe_set_vf_vlan(adapter, false,
+ adapter->vfinfo[vf].pf_vlan,
+ vf);
+ if (err)
+ goto out;
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
diff --git a/trunk/drivers/net/ethernet/marvell/Kconfig b/trunk/drivers/net/ethernet/marvell/Kconfig
index edfba9370922..434e33c527df 100644
--- a/trunk/drivers/net/ethernet/marvell/Kconfig
+++ b/trunk/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
+ select PHYLIB
---help---
This driver supports the MDIO interface found in the network
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -45,7 +46,6 @@ config MVMDIO
config MVNETA
tristate "Marvell Armada 370/XP network interface support"
depends on MACH_ARMADA_370_XP
- select PHYLIB
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/trunk/drivers/net/ethernet/marvell/mvneta.c b/trunk/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969bc..a47a097c21e1 100644
--- a/trunk/drivers/net/ethernet/marvell/mvneta.c
+++ b/trunk/drivers/net/ethernet/marvell/mvneta.c
@@ -374,7 +374,6 @@ static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
-static int txq_def;
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -1475,7 +1474,8 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
+ u16 txq_id = skb_get_queue_mapping(skb);
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
struct netdev_queue *nq;
int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_def);
+ nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
+ dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
if (!dev)
return -ENOMEM;
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev)
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
goto err_deinit;
}
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->priv_flags |= IFF_UNICAST_FLT;
-
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev);
@@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
-module_param(txq_def, int, S_IRUGO);
diff --git a/trunk/drivers/net/ethernet/marvell/sky2.c b/trunk/drivers/net/ethernet/marvell/sky2.c
index fc07ca35721b..6a0e671fcecd 100644
--- a/trunk/drivers/net/ethernet/marvell/sky2.c
+++ b/trunk/drivers/net/ethernet/marvell/sky2.c
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
- tp = space - 2048/8;
+ tp = space - 8192/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {
diff --git a/trunk/drivers/net/ethernet/marvell/sky2.h b/trunk/drivers/net/ethernet/marvell/sky2.h
index 615ac63ea860..ec6dcd80152b 100644
--- a/trunk/drivers/net/ethernet/marvell/sky2.h
+++ b/trunk/drivers/net/ethernet/marvell/sky2.h
@@ -2074,7 +2074,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
-#define GMAC_DEF_MSK GM_IS_TX_FF_UR
+#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
};
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f278b10ef714..30d78f806dc3 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
{
- unsigned int i;
- for (i = ETH_ALEN - 1; i; --i) {
+ int i;
+ for (i = ETH_ALEN - 1; i >= 0; --i) {
dst_mac[i] = src_mac & 0xff;
src_mac >>= 8;
}
diff --git a/trunk/drivers/net/ethernet/micrel/ks8851.c b/trunk/drivers/net/ethernet/micrel/ks8851.c
index 33bcb63d56a2..8fb481252e2c 100644
--- a/trunk/drivers/net/ethernet/micrel/ks8851.c
+++ b/trunk/drivers/net/ethernet/micrel/ks8851.c
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
for (; rxfc != 0; rxfc--) {
rxh = ks8851_rdreg32(ks, KS_RXFHSR);
rxstat = rxh & 0xffff;
- rxlen = rxh >> 16;
+ rxlen = (rxh >> 16) & 0xfff;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb3..edd63f1230f3 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+ /* Make sure carrier is off and queue is stopped during loopback */
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
{
struct qlcnic_cmd_args cmd;
+ struct net_device *netdev = adapter->netdev;
int ret = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_TX, &ret);
if (ret) {
- dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+ netdev_err(netdev, "Error getting Tx stats\n");
goto out;
}
/* Get MAC stats */
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_MAC, &ret);
if (ret) {
- dev_info(&adapter->pdev->dev,
- "Error getting Rx stats\n");
+ netdev_err(netdev, "Error getting MAC stats\n");
goto out;
}
/* Get Rx stats */
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_RX, &ret);
if (ret)
- dev_info(&adapter->pdev->dev,
- "Error getting Tx stats\n");
+ netdev_err(netdev, "Error getting Rx stats\n");
out:
qlcnic_free_mbx_args(&cmd);
}
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff3..5fa847fe388a 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -358,8 +358,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
opcode = TX_ETHER_PKT;
- if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
- skb_shinfo(skb)->gso_size > 0) {
+ if (skb_is_gso(skb)) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc3..5ef328af61d0 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -200,10 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
- if (!err)
+ if (!err) {
err = len;
- else
ahw->beacon_state = b_state;
+ }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h b/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
index a131d7b5d2fe..7e8d68263963 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.31"
+#define DRV_VERSION "v1.00.00.32"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 6f316ab23257..0780e039b271 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
+ ecmd->autoneg = AUTONEG_ENABLE;
} else {
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7eb..8033555e53c2 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1434,11 +1434,13 @@ static int ql_map_send(struct ql_adapter *qdev,
}
/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
+ struct rx_ring *rx_ring)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
+ rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ goto err_out;
+ }
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
- return (unsigned long)length;
- }
-
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c
index 28fb50a1e9c3..4ecbe64a758d 100644
--- a/trunk/drivers/net/ethernet/realtek/r8169.c
+++ b/trunk/drivers/net/ethernet/realtek/r8169.c
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_speed_down(struct rtl8169_private *tp)
+{
+ u32 adv;
+ int lpa;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ lpa = rtl_readphy(tp, MII_LPA);
+
+ if (lpa & (LPA_10HALF | LPA_10FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
+ else if (lpa & (LPA_100HALF | LPA_100FULL))
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
+ else
+ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0);
+
+ rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
+ adv);
+}
+
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
return false;
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
+ rtl_speed_down(tp);
rtl_wol_suspend_quirk(tp);
return true;
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c
index bf5e3cf97c4d..6ed333fe5c04 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.c
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
- if (mdp->link == PHY_DOWN)
- link_stat = 0;
- else
- link_stat = PHY_ST_LINK;
+ goto ignore_link;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
}
}
+ignore_link:
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- u32 intr_status = 0;
+ unsigned long intr_status;
spin_lock(&mdp->lock);
- /* Get interrpt stat */
+ /* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR);
+ /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+ * enabled since it's the one that comes thru regardless of the mask,
+ * and we need to fully handle it in sh_eth_error() in order to quench
+ * it as it doesn't get cleared by just writing 1 to the ECI bit...
+ */
+ intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
- if (phydev->link != PHY_DOWN) {
+ if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
- if (mdp->link == PHY_DOWN) {
+ if (!mdp->link) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
- mdp->link = PHY_DOWN;
+ mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_disable(ndev);
}
if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
- mdp->link = PHY_DOWN;
+ mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.h b/trunk/drivers/net/ethernet/renesas/sh_eth.h
index e6655678458e..828be4515008 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.h
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.h
@@ -723,7 +723,7 @@ struct sh_eth_private {
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
- enum phy_state link;
+ int link;
phy_interface_t phy_interface;
int msg_enable;
int speed;
diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 0c74a702d461..50617c5a0bdb 100644
--- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/trunk/drivers/net/ethernet/ti/cpsw.c b/trunk/drivers/net/ethernet/ti/cpsw.c
index df32a090d08e..4781d3d8e182 100644
--- a/trunk/drivers/net/ethernet/ti/cpsw.c
+++ b/trunk/drivers/net/ethernet/ti/cpsw.c
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
+ netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
if (data->dual_emac) {
- if (of_property_read_u32(node, "dual_emac_res_vlan",
+ if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
pr_err("Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
diff --git a/trunk/drivers/net/ethernet/ti/davinci_emac.c b/trunk/drivers/net/ethernet/ti/davinci_emac.c
index ae1b77aa199f..72300bc9e378 100644
--- a/trunk/drivers/net/ethernet/ti/davinci_emac.c
+++ b/trunk/drivers/net/ethernet/ti/davinci_emac.c
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
+ netif_wake_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c
index 1cd77483da50..f5f0f09e4cc5 100644
--- a/trunk/drivers/net/hyperv/netvsc.c
+++ b/trunk/drivers/net/hyperv/netvsc.c
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device,
packet->trans_id;
/* Notify the layer above us */
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.send_completion_ctx);
+ if (nvsc_packet)
+ nvsc_packet->completion.send.send_completion(
+ nvsc_packet->completion.send.
+ send_completion_ctx);
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
+ u64 req_id;
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device,
0xFFFFFFFF;
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+ if (packet->completion.send.send_completion)
+ req_id = (u64)packet;
+ else
+ req_id = 0;
+
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(device->channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
- (unsigned long)packet);
+ req_id);
} else {
ret = vmbus_sendpacket(device->channel, &sendMessage,
sizeof(struct nvsp_message),
- (unsigned long)packet,
+ req_id,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-
}
if (ret == 0) {
diff --git a/trunk/drivers/net/hyperv/netvsc_drv.c b/trunk/drivers/net/hyperv/netvsc_drv.c
index 5f85205cd12b..8341b62e5521 100644
--- a/trunk/drivers/net/hyperv/netvsc_drv.c
+++ b/trunk/drivers/net/hyperv/netvsc_drv.c
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
- netif_wake_queue(net);
ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
- netif_tx_disable(net);
}
}
diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c
index 2b657d4d63a8..0775f0aefd1e 100644
--- a/trunk/drivers/net/hyperv/rndis_filter.c
+++ b/trunk/drivers/net/hyperv/rndis_filter.c
@@ -61,9 +61,6 @@ struct rndis_request {
static void rndis_filter_send_completion(void *ctx);
-static void rndis_filter_send_request_completion(void *ctx);
-
-
static struct rndis_device *get_rndis_device(void)
{
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion_ctx = req;/* packet; */
- packet->completion.send.send_completion =
- rndis_filter_send_request_completion;
- packet->completion.send.send_completion_tid = (unsigned long)dev;
+ packet->completion.send.send_completion = NULL;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx)
/* Pass it back to the original handler */
filter_pkt->completion(filter_pkt->completion_ctx);
}
-
-
-static void rndis_filter_send_request_completion(void *ctx)
-{
- /* Noop */
-}
diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c
index b7c457adc0dc..729ed533bb33 100644
--- a/trunk/drivers/net/tun.c
+++ b/trunk/drivers/net/tun.c
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun->flags & TUN_TAP_MQ &&
(tun->numqueues + tun->numdisabled > 1))
- return err;
+ return -EBUSY;
}
else {
char *name;
diff --git a/trunk/drivers/net/usb/cdc_mbim.c b/trunk/drivers/net/usb/cdc_mbim.c
index 16c842997291..6bd91676d2cb 100644
--- a/trunk/drivers/net/usb/cdc_mbim.c
+++ b/trunk/drivers/net/usb/cdc_mbim.c
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
goto error;
if (skb) {
- if (skb->len <= sizeof(ETH_HLEN))
+ if (skb->len <= ETH_HLEN)
goto error;
/* mapping VLANs to MBIM sessions:
diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c
index 968d5d50751d..2a3579f67910 100644
--- a/trunk/drivers/net/usb/qmi_wwan.c
+++ b/trunk/drivers/net/usb/qmi_wwan.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -52,6 +53,96 @@ struct qmi_wwan_state {
struct usb_interface *data;
};
+/* default ethernet address used by the modem */
+static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+
+/* Make up an ethernet header if the packet doesn't have one.
+ *
+ * A firmware bug common among several devices cause them to send raw
+ * IP packets under some circumstances. There is no way for the
+ * driver/host to know when this will happen. And even when the bug
+ * hits, some packets will still arrive with an intact header.
+ *
+ * The supported devices are only capably of sending IPv4, IPv6 and
+ * ARP packets on a point-to-point link. Any packet with an ethernet
+ * header will have either our address or a broadcast/multicast
+ * address as destination. ARP packets will always have a header.
+ *
+ * This means that this function will reliably add the appropriate
+ * header iff necessary, provided our hardware address does not start
+ * with 4 or 6.
+ *
+ * Another common firmware bug results in all packets being addressed
+ * to 00:a0:c6:00:00:00 despite the host address being different.
+ * This function will also fixup such packets.
+ */
+static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ __be16 proto;
+
+ /* usbnet rx_complete guarantees that skb->len is at least
+ * hard_header_len, so we can inspect the dest address without
+ * checking skb->len
+ */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ proto = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ proto = htons(ETH_P_IPV6);
+ break;
+ case 0x00:
+ if (is_multicast_ether_addr(skb->data))
+ return 1;
+ /* possibly bogus destination - rewrite just in case */
+ skb_reset_mac_header(skb);
+ goto fix_dest;
+ default:
+ /* pass along other packets without modifications */
+ return 1;
+ }
+ if (skb_headroom(skb) < ETH_HLEN)
+ return 0;
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ eth_hdr(skb)->h_proto = proto;
+ memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
+fix_dest:
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
+ return 1;
+}
+
+/* very simplistic detection of IPv4 or IPv6 headers */
+static bool possibly_iphdr(const char *data)
+{
+ return (data[0] & 0xd0) == 0x40;
+}
+
+/* disallow addresses which may be confused with IP headers */
+static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+ struct sockaddr *addr = p;
+
+ ret = eth_prepare_mac_addr_change(dev, p);
+ if (ret < 0)
+ return ret;
+ if (possibly_iphdr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ eth_commit_mac_addr_change(dev, p);
+ return 0;
+}
+
+static const struct net_device_ops qmi_wwan_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = qmi_wwan_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
/* using a counter to merge subdriver requests with our own into a combined state */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
@@ -229,6 +320,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
usb_driver_release_interface(driver, info->data);
}
+ /* Never use the same address on both ends of the link, even
+ * if the buggy firmware told us to.
+ */
+ if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
+ eth_hw_addr_random(dev->net);
+
+ /* make MAC addr easily distinguishable from an IP header */
+ if (possibly_iphdr(dev->net->dev_addr)) {
+ dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
+ dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
+ }
+ dev->net->netdev_ops = &qmi_wwan_netdev_ops;
err:
return status;
}
@@ -307,6 +410,7 @@ static const struct driver_info qmi_wwan_info = {
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
+ .rx_fixup = qmi_wwan_rx_fixup,
};
#define HUAWEI_VENDOR_ID 0x12D1
diff --git a/trunk/drivers/net/usb/smsc75xx.c b/trunk/drivers/net/usb/smsc75xx.c
index 9abe51710f22..1a15ec14c386 100644
--- a/trunk/drivers/net/usb/smsc75xx.c
+++ b/trunk/drivers/net/usb/smsc75xx.c
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+ return -EINVAL;
- int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
+ ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set mac rx frame length\n");
return ret;
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
- ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
+ ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set max rx frame length\n");
return ret;
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
dev->net->stats.rx_frame_errors++;
} else {
- /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
- if (unlikely(size > (ETH_FRAME_LEN + 12))) {
+ /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);
diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 28fd99203f64..bdee2ed67219 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
{0x00008258, 0x00000000},
{0x0000825c, 0x40000000},
{0x00008260, 0x00080922},
- {0x00008264, 0x9bc00010},
+ {0x00008264, 0x9d400010},
{0x00008268, 0xffffffff},
{0x0000826c, 0x0000ffff},
{0x00008270, 0x00000000},
diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 467b60014b7b..73fe8d6db566 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
u32 sz, i;
struct channel_detector *cd;
- cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+ cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
if (cd == NULL)
goto fail;
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_KERNEL);
+ cd->detectors = kzalloc(sz, GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 91b8dceeadb1..5e48c5515b8c 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
{
struct pulse_elem *p = pool_get_pulse_elem();
if (p == NULL) {
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p == NULL) {
DFS_POOL_STAT_INC(pulse_alloc_error);
return false;
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
ps.deadline_ts = ps.first_ts + ps.dur;
new_ps = pool_get_pseq_elem();
if (new_ps == NULL) {
- new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
+ new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
if (new_ps == NULL) {
DFS_POOL_STAT_INC(pseq_alloc_error);
return false;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 716058b67557..a47f5e05fc04 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
* required version.
*/
if (priv->fw_version_major != MAJOR_VERSION_REQ ||
- priv->fw_version_minor != MINOR_VERSION_REQ) {
+ priv->fw_version_minor < MINOR_VERSION_REQ) {
dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
return -EINVAL;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/link.c b/trunk/drivers/net/wireless/ath/ath9k/link.c
index 39c84ecf6a42..7fdac6c7b3ea 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/link.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/link.c
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+ if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
+ ieee80211_queue_work(sc->hw, &sc->hw_check_work);
}
/*
diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c
index 6e66f9c6782b..988372d218a4 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/main.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/main.c
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
if (r) {
ath_err(common,
"Unable to reset channel, reset status %d\n", r);
+
+ ath9k_hw_enable_interrupts(ah);
+ ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
+
goto out;
}
diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c
index 38bc5a7997ff..122146943bf2 100644
--- a/trunk/drivers/net/wireless/b43/dma.c
+++ b/trunk/drivers/net/wireless/b43/dma.c
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
struct b43_dmadesc_meta *meta;
+ static const struct b43_txstatus fake; /* filled with 0 */
+ const struct b43_txstatus *txstat;
int slot, firstused;
bool frame_succeed;
+ int skip;
+ static u8 err_out1, err_out2;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
firstused = ring->current_slot - ring->used_slots + 1;
if (firstused < 0)
firstused = ring->nr_slots + firstused;
+
+ skip = 0;
if (unlikely(slot != firstused)) {
/* This possibly is a firmware bug and will result in
- * malfunction, memory leaks and/or stall of DMA functionality. */
- b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
- "Expected %d, but got %d\n",
- ring->index, firstused, slot);
- return;
+ * malfunction, memory leaks and/or stall of DMA functionality.
+ */
+ if (slot == next_slot(ring, next_slot(ring, firstused))) {
+ /* If a single header/data pair was missed, skip over
+ * the first two slots in an attempt to recover.
+ */
+ slot = firstused;
+ skip = 2;
+ if (!err_out1) {
+ /* Report the error once. */
+ b43dbg(dev->wl,
+ "Skip on DMA ring %d slot %d.\n",
+ ring->index, slot);
+ err_out1 = 1;
+ }
+ } else {
+ /* More than a single header/data pair were missed.
+ * Report this error once.
+ */
+ if (!err_out2)
+ b43dbg(dev->wl,
+ "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ err_out2 = 1;
+ return;
+ }
}
ops = ring->ops;
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
slot, firstused, ring->index);
break;
}
+
if (meta->skb) {
struct b43_private_tx_info *priv_info =
- b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+ unmap_descbuffer(ring, meta->dmaaddr,
+ meta->skb->len, 1);
kfree(priv_info->bouncebuffer);
priv_info->bouncebuffer = NULL;
} else {
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
struct ieee80211_tx_info *info;
if (unlikely(!meta->skb)) {
- /* This is a scatter-gather fragment of a frame, so
- * the skb pointer must not be NULL. */
+ /* This is a scatter-gather fragment of a frame,
+ * so the skb pointer must not be NULL.
+ */
b43dbg(dev->wl, "TX status unexpected NULL skb "
"at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index);
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/*
* Call back to inform the ieee80211 subsystem about
- * the status of the transmission.
+ * the status of the transmission. When skipping over
+ * a missed TX status report, use a status structure
+ * filled with zeros to indicate that the frame was not
+ * sent (frame_count 0) and not acknowledged
*/
- frame_succeed = b43_fill_txstatus_report(dev, info, status);
+ if (unlikely(skip))
+ txstat = &fake;
+ else
+ txstat = status;
+
+ frame_succeed = b43_fill_txstatus_report(dev, info,
+ txstat);
#ifdef CONFIG_B43_DEBUG
if (frame_succeed)
ring->nr_succeed_tx_packets++;
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
- if (meta->is_last_fragment) {
+ if (meta->is_last_fragment && !skip) {
/* This is the last scatter-gather
* fragment of the frame. We are done. */
break;
}
slot = next_slot(ring, slot);
+ if (skip > 0)
+ --skip;
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
diff --git a/trunk/drivers/net/wireless/b43/phy_n.c b/trunk/drivers/net/wireless/b43/phy_n.c
index 3c35382ee6c2..b70f220bc4b3 100644
--- a/trunk/drivers/net/wireless/b43/phy_n.c
+++ b/trunk/drivers/net/wireless/b43/phy_n.c
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u16 clip_off[2] = { 0xFFFF, 0xFFFF };
u8 vcm_final = 0;
- s8 offset[4];
+ s32 offset[4];
s32 results[8][4] = { };
s32 results_min[4] = { };
s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
for (i = 0; i < 4; i += 2) {
s32 curr;
- s32 mind = 40;
+ s32 mind = 0x100000;
s32 minpoll = 249;
u8 minvcm = 0;
if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
u8 regs_save_radio[2];
u16 regs_save_phy[2];
- s8 offset[4];
+ s32 offset[4];
u8 core;
u8 rail;
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
}
for (i = 0; i < 4; i++) {
- s32 mind = 40;
+ s32 mind = 0x100000;
u8 minvcm = 0;
s32 minpoll = 249;
s32 curr;
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
- /* FIXME */
+ ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
+ avoid);
break;
#endif
}
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4469321c0eb3..35fc68be158d 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
goto err;
}
- /* External image takes precedence if specified */
if (brcmf_sdbrcm_download_code_file(bus)) {
brcmf_err("dongle image file download failed\n");
goto err;
}
- /* External nvram takes precedence if specified */
- if (brcmf_sdbrcm_download_nvram(bus))
+ if (brcmf_sdbrcm_download_nvram(bus)) {
brcmf_err("dongle nvram file download failed\n");
+ goto err;
+ }
/* Take arm out of reset */
if (brcmf_sdbrcm_download_state(bus, false)) {
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 2af9c0f0798d..78da3eff75e8 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1891,8 +1891,10 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
+ u8 keybuf[8];
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
- if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
- u8 keybuf[8];
+ if ((ifp->vif->mode != WL_MODE_AP) &&
+ (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
break;
case WLAN_CIPHER_SUITE_TKIP:
if (ifp->vif->mode != WL_MODE_AP) {
- brcmf_dbg(CONN, "Swapping key\n");
+ brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
- switch (wsec & ~SES_OW_ENABLED) {
- case WEP_ENABLED:
+ if (wsec & WEP_ENABLED) {
sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
params.cipher = WLAN_CIPHER_SUITE_WEP104;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
}
- break;
- case TKIP_ENABLED:
+ } else if (wsec & TKIP_ENABLED) {
params.cipher = WLAN_CIPHER_SUITE_TKIP;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
- break;
- case AES_ENABLED:
+ } else if (wsec & AES_ENABLED) {
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
- break;
- default:
+ } else {
brcmf_err("Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
@@ -3824,8 +3823,9 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err = -EPERM;
+ s32 err;
struct brcmf_fil_bss_enable_le bss_enable;
+ struct brcmf_join_params join_params;
brcmf_dbg(TRACE, "Enter\n");
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
- if (err < 0) {
- brcmf_err("setting AP mode failed %d\n", err);
- goto exit;
- }
+
+ memset(&join_params, 0, sizeof(join_params));
+ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
+ &join_params, sizeof(join_params));
+ if (err < 0)
+ brcmf_err("SET SSID error (%d)\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
- if (err < 0) {
+ if (err < 0)
brcmf_err("BRCMF_C_UP error %d\n", err);
- goto exit;
- }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
+ if (err < 0)
+ brcmf_err("setting AP mode failed %d\n", err);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
+ if (err < 0)
+ brcmf_err("setting INFRA mode failed %d\n", err);
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
-exit:
return err;
}
@@ -4122,10 +4126,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP)
},
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
- },
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_P2P_GO);
wiphy->iface_combinations = brcmf_iface_combos;
wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index c6451c61407a..e2340b231aa1 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
}
}
+/**
+ * This function frees the WL per-device resources.
+ *
+ * This function frees resources owned by the WL device pointed to
+ * by the wl parameter.
+ *
+ * precondition: can both be called locked and unlocked
+ *
+ */
+static void brcms_free(struct brcms_info *wl)
+{
+ struct brcms_timer *t, *next;
+
+ /* free ucode data */
+ if (wl->fw.fw_cnt)
+ brcms_ucode_data_free(&wl->ucode);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
+
+ /* kill dpc */
+ tasklet_kill(&wl->tasklet);
+
+ if (wl->pub) {
+ brcms_debugfs_detach(wl->pub);
+ brcms_c_module_unregister(wl->pub, "linux", wl);
+ }
+
+ /* free common resources */
+ if (wl->wlc) {
+ brcms_c_detach(wl->wlc);
+ wl->wlc = NULL;
+ wl->pub = NULL;
+ }
+
+ /* virtual interface deletion is deferred so we cannot spinwait */
+
+ /* wait for all pending callbacks to complete */
+ while (atomic_read(&wl->callbacks) > 0)
+ schedule();
+
+ /* free timers */
+ for (t = wl->timers; t; t = next) {
+ next = t->next;
+#ifdef DEBUG
+ kfree(t->name);
+#endif
+ kfree(t);
+ }
+}
+
+/*
+* called from both kernel as from this kernel module (error flow on attach)
+* precondition: perimeter lock is not acquired.
+*/
+static void brcms_remove(struct bcma_device *pdev)
+{
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+ struct brcms_info *wl = hw->priv;
+
+ if (wl->wlc) {
+ wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+ wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+ ieee80211_unregister_hw(hw);
+ }
+
+ brcms_free(wl);
+
+ bcma_set_drvdata(pdev, NULL);
+ ieee80211_free_hw(hw);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static void brcms_release_fw(struct brcms_info *wl)
+{
+ int i;
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ release_firmware(wl->fw.fw_bin[i]);
+ release_firmware(wl->fw.fw_hdr[i]);
+ }
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
+{
+ int status;
+ struct device *device = &pdev->dev;
+ char fw_name[100];
+ int i;
+
+ memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ if (brcms_firmwares[i] == NULL)
+ break;
+ sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ wl->fw.hdr_num_entries[i] =
+ wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+ }
+ wl->fw.fw_cnt = i;
+ status = brcms_ucode_data_init(wl, &wl->ucode);
+ brcms_release_fw(wl);
+ return status;
+}
+
static void brcms_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+ if (!wl->ucode.bcm43xx_bomminor) {
+ err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+ if (err) {
+ brcms_remove(wl->wlc->hw->d11core);
+ return -ENOENT;
+ }
+ }
+
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data)
wake_up(&wl->tx_flush_wq);
}
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
-{
- int status;
- struct device *device = &pdev->dev;
- char fw_name[100];
- int i;
-
- memset(&wl->fw, 0, sizeof(struct brcms_firmware));
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- if (brcms_firmwares[i] == NULL)
- break;
- sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- wl->fw.hdr_num_entries[i] =
- wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
- }
- wl->fw.fw_cnt = i;
- return brcms_ucode_data_init(wl, &wl->ucode);
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static void brcms_release_fw(struct brcms_info *wl)
-{
- int i;
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- release_firmware(wl->fw.fw_bin[i]);
- release_firmware(wl->fw.fw_hdr[i]);
- }
-}
-
-/**
- * This function frees the WL per-device resources.
- *
- * This function frees resources owned by the WL device pointed to
- * by the wl parameter.
- *
- * precondition: can both be called locked and unlocked
- *
- */
-static void brcms_free(struct brcms_info *wl)
-{
- struct brcms_timer *t, *next;
-
- /* free ucode data */
- if (wl->fw.fw_cnt)
- brcms_ucode_data_free(&wl->ucode);
- if (wl->irq)
- free_irq(wl->irq, wl);
-
- /* kill dpc */
- tasklet_kill(&wl->tasklet);
-
- if (wl->pub) {
- brcms_debugfs_detach(wl->pub);
- brcms_c_module_unregister(wl->pub, "linux", wl);
- }
-
- /* free common resources */
- if (wl->wlc) {
- brcms_c_detach(wl->wlc);
- wl->wlc = NULL;
- wl->pub = NULL;
- }
-
- /* virtual interface deletion is deferred so we cannot spinwait */
-
- /* wait for all pending callbacks to complete */
- while (atomic_read(&wl->callbacks) > 0)
- schedule();
-
- /* free timers */
- for (t = wl->timers; t; t = next) {
- next = t->next;
-#ifdef DEBUG
- kfree(t->name);
-#endif
- kfree(t);
- }
-}
-
-/*
-* called from both kernel as from this kernel module (error flow on attach)
-* precondition: perimeter lock is not acquired.
-*/
-static void brcms_remove(struct bcma_device *pdev)
-{
- struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
- struct brcms_info *wl = hw->priv;
-
- if (wl->wlc) {
- wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
- wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- ieee80211_unregister_hw(hw);
- }
-
- brcms_free(wl);
-
- bcma_set_drvdata(pdev, NULL);
- ieee80211_free_hw(hw);
-}
-
static irqreturn_t brcms_isr(int irq, void *dev_id)
{
struct brcms_info *wl;
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
- /* prepare ucode */
- if (brcms_request_fw(wl, pdev) < 0) {
- wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
- "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
- brcms_release_fw(wl);
- brcms_remove(pdev);
- return NULL;
- }
-
/* common load-time initialization */
wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
- brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
KBUILD_MODNAME, err);
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 21a824232478..18d37645e2cd 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
- ((lna2 & 0x3) << 4) |
- ((lna1 & 0x3) << 2) |
- ((lna1 & 0x3) << 0);
+ ((lna2 &
+ 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
}
mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
- mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
- mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
}
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
-static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
- u16 tia_gain, u16 lna2_gain)
-{
- u32 i_thresh_l, q_thresh_l;
- u32 i_thresh_h, q_thresh_h;
- struct lcnphy_iq_est iq_est_h, iq_est_l;
-
- wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
- lna2_gain, 0);
-
- wlc_lcnphy_rx_gain_override_enable(pi, true);
- wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
- udelay(500);
- write_radio_reg(pi, RADIO_2064_REG112, 0);
- if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
- return false;
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
- udelay(500);
- write_radio_reg(pi, RADIO_2064_REG112, 0);
- if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
- return false;
-
- i_thresh_l = (iq_est_l.i_pwr << 1);
- i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
-
- q_thresh_l = (iq_est_l.q_pwr << 1);
- q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
- if ((iq_est_h.i_pwr > i_thresh_l) &&
- (iq_est_h.i_pwr < i_thresh_h) &&
- (iq_est_h.q_pwr > q_thresh_l) &&
- (iq_est_h.q_pwr < q_thresh_h))
- return true;
-
- return false;
-}
-
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain, lna2_gain, biq1_gain;
- bool set_gain;
+ int tia_gain;
+ u32 received_power, rx_pwr_threshold;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
- WARN_ON(module != 1);
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
-
- for (i = 0; i < 11; i++)
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
-
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+ if (module == 1) {
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- set_gain = false;
-
- lna2_gain = 3;
- while ((lna2_gain >= 0) && !set_gain) {
- tia_gain = 4;
-
- while ((tia_gain >= 0) && !set_gain) {
- biq1_gain = 6;
-
- while ((biq1_gain >= 0) && !set_gain) {
- set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
- (u16)
- biq1_gain,
- (u16)
- tia_gain,
- (u16)
- lna2_gain);
- biq1_gain -= 1;
- }
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+
+ tia_gain = 8;
+ rx_pwr_threshold = 950;
+ while (tia_gain > 0) {
tia_gain -= 1;
+ wlc_lcnphy_set_rx_gain_by_distribution(pi,
+ 0, 0, 2, 2,
+ (u16)
+ tia_gain, 1, 0);
+ udelay(500);
+
+ received_power =
+ wlc_lcnphy_measure_digital_power(pi, 2000);
+ if (received_power < rx_pwr_threshold)
+ break;
}
- lna2_gain -= 1;
- }
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
- if (set_gain)
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
- else
- result = false;
+ wlc_lcnphy_stop_tx_tone(pi);
- wlc_lcnphy_stop_tx_tone(pi);
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
- write_phy_reg(pi, 0x631, Core1TxControl_old);
-
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
- wlc_lcnphy_clear_trsw_override(pi);
+ wlc_lcnphy_clear_trsw_override(pi);
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
- for (i = 0; i < 11; i++)
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
- if (tx_gain_override_old)
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- else
- wlc_lcnphy_disable_tx_gain_override(pi);
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
- wlc_lcnphy_rx_gain_override_enable(pi, false);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
+ }
cal_done:
kfree(ptr);
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
-
- if (!(pi->sh->boardflags & BFL_FEM)) {
- u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
- 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
-
- write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
- write_radio_reg(pi, RADIO_2064_REG091, 0x3);
- write_radio_reg(pi, RADIO_2064_REG038, 0x3);
-
- write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
- }
}
static int
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
- mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
- mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
- mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
- mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
- mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
- mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
- mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
- mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
- mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
- mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
- u8 tssi_sel;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
- if (pi->sh->boardflags & BFL_FEM) {
- tssi_sel = 0x1;
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
- } else {
- tssi_sel = 0xe;
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
- }
+ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
- mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
- mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
- mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
- mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
- u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
-
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
-
- mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
- mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
-
- wlc_lcnphy_set_bbmult(pi, 0x0);
-
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
- wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
- mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
- mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
- mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
- mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
- mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
- pa_gain = 0x60;
+ pa_gain = 0x70;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
-
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
for (j = 0; j < 128; j++) {
- if (pi->sh->boardflags & BFL_FEM)
- gm_gain = gain_table[j].gm;
- else
- gm_gain = 15;
-
+ gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
- if (pi->sh->boardflags & BFL_FEM)
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
- else
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
-
- if (!(pi->sh->boardflags & BFL_FEM)) {
- write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
- write_radio_reg(pi, RADIO_2064_REG033, 0x19);
- write_radio_reg(pi, RADIO_2064_REG039, 0xe);
- }
-
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
- if (!(pi->sh->boardflags & BFL_FEM)) {
- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
- tab.tbl_width = 16;
- tab.tbl_ptr = &val;
- tab.tbl_len = 1;
+ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+ tab.tbl_width = 16;
+ tab.tbl_ptr = &val;
+ tab.tbl_len = 1;
- val = 150;
- tab.tbl_offset = 0;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 114;
+ tab.tbl_offset = 0;
+ wlc_lcnphy_write_table(pi, &tab);
- val = 220;
- tab.tbl_offset = 1;
- wlc_lcnphy_write_table(pi, &tab);
- }
+ val = 130;
+ tab.tbl_offset = 1;
+ wlc_lcnphy_write_table(pi, &tab);
+
+ val = 6;
+ tab.tbl_offset = 8;
+ wlc_lcnphy_write_table(pi, &tab);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
- wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+ if ((pi->sh->boardflags & BFL_FEM) &&
+ (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index b7e95acc2084..622c01ca72c5 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
};
static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
- 0x0009,
0x000a,
- 0x0005,
- 0x0006,
0x0009,
- 0x000a,
- 0x0005,
0x0006,
- 0x0009,
- 0x000a,
0x0005,
- 0x0006,
- 0x0009,
0x000a,
- 0x0005,
- 0x0006,
0x0009,
- 0x000a,
- 0x0005,
0x0006,
- 0x0009,
- 0x000a,
0x0005,
- 0x0006,
- 0x0009,
0x000a,
- 0x0005,
- 0x0006,
0x0009,
- 0x000a,
- 0x0005,
0x0006,
- 0x0009,
- 0x000a,
0x0005,
- 0x0006,
- 0x0009,
0x000a,
- 0x0005,
- 0x0006,
0x0009,
- 0x000a,
- 0x0005,
0x0006,
- 0x0009,
- 0x000a,
0x0005,
- 0x0006,
+ 0x000a,
0x0009,
+ 0x0006,
+ 0x0005,
0x000a,
+ 0x0009,
+ 0x0006,
0x0005,
+ 0x000a,
+ 0x0009,
0x0006,
+ 0x0005,
+ 0x000a,
0x0009,
+ 0x0006,
+ 0x0005,
0x000a,
+ 0x0009,
+ 0x0006,
0x0005,
+ 0x000a,
+ 0x0009,
0x0006,
+ 0x0005,
+ 0x000a,
0x0009,
+ 0x0006,
+ 0x0005,
0x000a,
+ 0x0009,
+ 0x0006,
0x0005,
+ 0x000a,
+ 0x0009,
0x0006,
+ 0x0005,
+ 0x000a,
0x0009,
+ 0x0006,
+ 0x0005,
0x000a,
+ 0x0009,
+ 0x0006,
0x0005,
+ 0x000a,
+ 0x0009,
0x0006,
+ 0x0005,
};
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
index e8324b5e5bfe..6c7493c2d698 100644
--- a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
int rate_idx;
int i;
u32 rate;
- u8 use_green = il4965_rs_use_green(il, sta);
+ u8 use_green;
u8 active_tbl = 0;
u8 valid_tx_ant;
struct il_station_priv *sta_priv;
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
if (!sta || !lq_sta)
return;
+ use_green = il4965_rs_use_green(il, sta);
sta_priv = (void *)sta->drv_priv;
i = lq_sta->last_txrate_idx;
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
index 86ea5f4c3939..44ca0e57f9f7 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1261,6 +1261,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return -EIO;
}
+ /*
+ * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
+ * in iwl_down but cancel the workers only later.
+ */
+ if (!priv->ucode_loaded) {
+ IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
+ return -EIO;
+ }
+
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 23be948cf162..a82b6b39d4ff 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
+ /*
+ * If we go idle, then clearly no "passive-no-rx"
+ * workaround is needed any more, this is a reset.
+ */
+ iwlagn_lift_passive_no_rx(priv);
+ }
+
if (unlikely(!iwl_is_ready(priv))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
mutex_unlock(&priv->mutex);
@@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
- /*
- * If we disassociate while there are pending
- * frames, just wake up the queues and let the
- * frames "escape" ... This shouldn't really
- * be happening to start with, but we should
- * not get stuck in this case either since it
- * can happen if userspace gets confused.
- */
- iwlagn_lift_passive_no_rx(priv);
-
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (ctx->ctxid == IWL_RXON_CTX_BSS)
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
index 6aec2df3bb27..d1a670d7b10c 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
memset(&info->status, 0, sizeof(info->status));
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
- iwl_is_associated_ctx(ctx) && ctx->vif &&
+ ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
/* block and stop all queues */
priv->passive_no_rx = true;
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c b/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 736fe9bb140e..1a4ac9236a44 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
+ priv->ucode_loaded = true;
+
if (ucode_type != IWL_UCODE_WOWLAN) {
/* delay a bit to give rfkill time to run */
msleep(5);
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
- priv->ucode_loaded = true;
-
return 0;
}
diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
index 17bedc50e753..12c4f31ca8fb 100644
--- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans_pcie->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans_pcie->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
* op_mode.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans_pcie->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
}
diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8595c16f74de..cb5c6792e3a8 100644
--- a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy = 0;
- if (!cmd->len)
+ if (!cmd->len[i])
continue;
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c
index a44023a7bd57..8aaf56ade4d9 100644
--- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
}
}
- for (i = 0; i < request->n_channels; i++) {
+ for (i = 0; i < min_t(u32, request->n_channels,
+ MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
chan = request->channels[i];
priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
diff --git a/trunk/drivers/net/wireless/mwifiex/pcie.c b/trunk/drivers/net/wireless/mwifiex/pcie.c
index 5c395e2e6a2b..feb204613397 100644
--- a/trunk/drivers/net/wireless/mwifiex/pcie.c
+++ b/trunk/drivers/net/wireless/mwifiex/pcie.c
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+ skb_push(skb, INTF_HEADER_LEN);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
diff --git a/trunk/drivers/net/wireless/mwifiex/scan.c b/trunk/drivers/net/wireless/mwifiex/scan.c
index d215b4d3c51b..e7f6deaf715e 100644
--- a/trunk/drivers/net/wireless/mwifiex/scan.c
+++ b/trunk/drivers/net/wireless/mwifiex/scan.c
@@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
- if (!priv->scan_request)
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev, "wait internal scan\n");
mwifiex_wait_queue_complete(adapter, cmd_node);
+ }
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@@ -1793,7 +1795,12 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
- mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev,
+ "complete internal scan\n");
+ mwifiex_complete_cmd(adapter,
+ adapter->curr_cmd);
+ }
}
if (priv->report_scan_result)
priv->report_scan_result = false;
diff --git a/trunk/drivers/net/wireless/rt2x00/Kconfig b/trunk/drivers/net/wireless/rt2x00/Kconfig
index 2bf4efa33186..76cd47eb901e 100644
--- a/trunk/drivers/net/wireless/rt2x00/Kconfig
+++ b/trunk/drivers/net/wireless/rt2x00/Kconfig
@@ -20,6 +20,7 @@ if RT2X00
config RT2400PCI
tristate "Ralink rt2400 (PCI/PCMCIA) support"
depends on PCI
+ select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -31,6 +32,7 @@ config RT2400PCI
config RT2500PCI
tristate "Ralink rt2500 (PCI/PCMCIA) support"
depends on PCI
+ select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -43,6 +45,7 @@ config RT61PCI
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_PCI
+ select RT2X00_LIB_MMIO
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_ITU_T
@@ -57,6 +60,7 @@ config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
depends on PCI || SOC_RT288X || SOC_RT305X
select RT2800_LIB
+ select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI if PCI
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
select RT2X00_LIB_FIRMWARE
@@ -185,6 +189,9 @@ endif
config RT2800_LIB
tristate
+config RT2X00_LIB_MMIO
+ tristate
+
config RT2X00_LIB_PCI
tristate
select RT2X00_LIB
diff --git a/trunk/drivers/net/wireless/rt2x00/Makefile b/trunk/drivers/net/wireless/rt2x00/Makefile
index 349d5b8284a4..f069d8bc5b67 100644
--- a/trunk/drivers/net/wireless/rt2x00/Makefile
+++ b/trunk/drivers/net/wireless/rt2x00/Makefile
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
+obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
index 221beaaa83f1..dcfb54e0c516 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -34,6 +34,7 @@
#include
#include "rt2x00.h"
+#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2400pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
index 39edc59e8d03..e1d2dc9ed28a 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -34,6 +34,7 @@
#include
#include "rt2x00.h"
+#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2500pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
index ded73da4de0b..ba5a05625aaa 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -41,6 +41,7 @@
#include
#include "rt2x00.h"
+#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2x00soc.h"
#include "rt2800lib.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
new file mode 100644
index 000000000000..d84a680ba0c9
--- /dev/null
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
@@ -0,0 +1,216 @@
+/*
+ Copyright (C) 2004 - 2009 Ivo van Doorn
+
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2x00mmio
+ Abstract: rt2x00 generic mmio device routines.
+ */
+
+#include
+#include
+#include
+#include
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+
+/*
+ * Register access.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg)
+{
+ unsigned int i;
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00pci_register_read(rt2x00dev, offset, reg);
+ if (!rt2x00_get_field32(*reg, field))
+ return 1;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ printk_once(KERN_ERR "%s() Indirect register access failed: "
+ "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
+ *reg = ~0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
+
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue = rt2x00dev->rx;
+ struct queue_entry *entry;
+ struct queue_entry_priv_pci *entry_priv;
+ struct skb_frame_desc *skbdesc;
+ int max_rx = 16;
+
+ while (--max_rx) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ entry_priv = entry->priv_data;
+
+ if (rt2x00dev->ops->lib->get_entry_state(entry))
+ break;
+
+ /*
+ * Fill in desc fields of the skb descriptor
+ */
+ skbdesc = get_skb_frame_desc(entry->skb);
+ skbdesc->desc = entry_priv->desc;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * DMA is already done, notify rt2x00lib that
+ * it finished successfully.
+ */
+ rt2x00lib_dmastart(entry);
+ rt2x00lib_dmadone(entry);
+
+ /*
+ * Send the frame to rt2x00lib for further processing.
+ */
+ rt2x00lib_rxdone(entry, GFP_ATOMIC);
+ }
+
+ return !max_rx;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
+
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+
+ for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
+
+/*
+ * Device initialization handlers.
+ */
+static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv;
+ void *addr;
+ dma_addr_t dma;
+ unsigned int i;
+
+ /*
+ * Allocate DMA memory for descriptor and buffer.
+ */
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ &dma, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ memset(addr, 0, queue->limit * queue->desc_size);
+
+ /*
+ * Initialize all queue entries to contain valid addresses.
+ */
+ for (i = 0; i < queue->limit; i++) {
+ entry_priv = queue->entries[i].priv_data;
+ entry_priv->desc = addr + i * queue->desc_size;
+ entry_priv->desc_dma = dma + i * queue->desc_size;
+ }
+
+ return 0;
+}
+
+static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv =
+ queue->entries[0].priv_data;
+
+ if (entry_priv->desc)
+ dma_free_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ entry_priv->desc, entry_priv->desc_dma);
+ entry_priv->desc = NULL;
+}
+
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ int status;
+
+ /*
+ * Allocate DMA
+ */
+ queue_for_each(rt2x00dev, queue) {
+ status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
+ if (status)
+ goto exit;
+ }
+
+ /*
+ * Register interrupt handler.
+ */
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ if (status) {
+ ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
+ rt2x00dev->irq, status);
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
+
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * Free irq line.
+ */
+ free_irq(rt2x00dev->irq, rt2x00dev);
+
+ /*
+ * Free DMA
+ */
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
+
+/*
+ * rt2x00mmio module information.
+ */
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2x00 mmio library");
+MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
new file mode 100644
index 000000000000..4ecaf60175bf
--- /dev/null
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
@@ -0,0 +1,119 @@
+/*
+ Copyright (C) 2004 - 2009 Ivo van Doorn
+
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: rt2x00mmio
+ Abstract: Data structures for the rt2x00mmio module.
+ */
+
+#ifndef RT2X00MMIO_H
+#define RT2X00MMIO_H
+
+#include
+
+/*
+ * Register access.
+ */
+static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 *value)
+{
+ *value = readl(rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ void *value, const u32 length)
+{
+ memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
+}
+
+static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 value)
+{
+ writel(value, rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const void *value,
+ const u32 length)
+{
+ __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
+}
+
+/**
+ * rt2x00pci_regbusy_read - Read from register with busy check
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ * @offset: Register offset
+ * @field: Field to check if register is busy
+ * @reg: Pointer to where register contents should be stored
+ *
+ * This function will read the given register, and checks if the
+ * register is busy. If it is, it will sleep for a couple of
+ * microseconds before reading the register again. If the register
+ * is not read after a certain timeout, this function will return
+ * FALSE.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg);
+
+/**
+ * struct queue_entry_priv_pci: Per entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor
+ * @desc_dma: DMA pointer to &desc.
+ * @data: Pointer to device's entry memory.
+ * @data_dma: DMA pointer to &data.
+ */
+struct queue_entry_priv_pci {
+ __le32 *desc;
+ dma_addr_t desc_dma;
+};
+
+/**
+ * rt2x00pci_rxdone - Handle RX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ *
+ * Returns true if there are still rx frames pending and false if all
+ * pending rx frames were processed.
+ */
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00pci_flush_queue - Flush data queue
+ * @queue: Data queue to stop
+ * @drop: True to drop all pending frames.
+ *
+ * This will wait for a maximum of 100ms, waiting for the queues
+ * to become empty.
+ */
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
+
+/*
+ * Device initialization handlers.
+ */
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2X00MMIO_H */
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0a..e87865e33113 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -32,182 +32,6 @@
#include "rt2x00.h"
#include "rt2x00pci.h"
-/*
- * Register access.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg)
-{
- unsigned int i;
-
- if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- return 0;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, offset, reg);
- if (!rt2x00_get_field32(*reg, field))
- return 1;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- ERROR(rt2x00dev, "Indirect register access failed: "
- "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
- *reg = ~0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
-
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue = rt2x00dev->rx;
- struct queue_entry *entry;
- struct queue_entry_priv_pci *entry_priv;
- struct skb_frame_desc *skbdesc;
- int max_rx = 16;
-
- while (--max_rx) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- entry_priv = entry->priv_data;
-
- if (rt2x00dev->ops->lib->get_entry_state(entry))
- break;
-
- /*
- * Fill in desc fields of the skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
-
- /*
- * DMA is already done, notify rt2x00lib that
- * it finished successfully.
- */
- rt2x00lib_dmastart(entry);
- rt2x00lib_dmadone(entry);
-
- /*
- * Send the frame to rt2x00lib for further processing.
- */
- rt2x00lib_rxdone(entry, GFP_ATOMIC);
- }
-
- return !max_rx;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
-
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
-{
- unsigned int i;
-
- for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
- msleep(10);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
-
-/*
- * Device initialization handlers.
- */
-static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv;
- void *addr;
- dma_addr_t dma;
- unsigned int i;
-
- /*
- * Allocate DMA memory for descriptor and buffer.
- */
- addr = dma_alloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- &dma, GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- memset(addr, 0, queue->limit * queue->desc_size);
-
- /*
- * Initialize all queue entries to contain valid addresses.
- */
- for (i = 0; i < queue->limit; i++) {
- entry_priv = queue->entries[i].priv_data;
- entry_priv->desc = addr + i * queue->desc_size;
- entry_priv->desc_dma = dma + i * queue->desc_size;
- }
-
- return 0;
-}
-
-static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv =
- queue->entries[0].priv_data;
-
- if (entry_priv->desc)
- dma_free_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- entry_priv->desc, entry_priv->desc_dma);
- entry_priv->desc = NULL;
-}
-
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- int status;
-
- /*
- * Allocate DMA
- */
- queue_for_each(rt2x00dev, queue) {
- status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
- if (status)
- goto exit;
- }
-
- /*
- * Register interrupt handler.
- */
- status = request_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
- if (status) {
- ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
- rt2x00dev->irq, status);
- goto exit;
- }
-
- return 0;
-
-exit:
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
-
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- /*
- * Free irq line.
- */
- free_irq(rt2x00dev->irq, rt2x00dev);
-
- /*
- * Free DMA
- */
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
-
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
index e2c99f2b9a14..60d90b20f8b9 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -35,94 +35,6 @@
*/
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
-/*
- * Register access.
- */
-static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
-{
- *value = readl(rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- void *value, const u32 length)
-{
- memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
-}
-
-static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 value)
-{
- writel(value, rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const void *value,
- const u32 length)
-{
- __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
-}
-
-/**
- * rt2x00pci_regbusy_read - Read from register with busy check
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- * @offset: Register offset
- * @field: Field to check if register is busy
- * @reg: Pointer to where register contents should be stored
- *
- * This function will read the given register, and checks if the
- * register is busy. If it is, it will sleep for a couple of
- * microseconds before reading the register again. If the register
- * is not read after a certain timeout, this function will return
- * FALSE.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg);
-
-/**
- * struct queue_entry_priv_pci: Per entry PCI specific information
- *
- * @desc: Pointer to device descriptor
- * @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
- */
-struct queue_entry_priv_pci {
- __le32 *desc;
- dma_addr_t desc_dma;
-};
-
-/**
- * rt2x00pci_rxdone - Handle RX done events
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- *
- * Returns true if there are still rx frames pending and false if all
- * pending rx frames were processed.
- */
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00pci_flush_queue - Flush data queue
- * @queue: Data queue to stop
- * @drop: True to drop all pending frames.
- *
- * This will wait for a maximum of 100ms, waiting for the queues
- * to become empty.
- */
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
-
-/*
- * Device initialization handlers.
- */
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
-
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
index f95792cfcf89..9e3c8ff53e3f 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
@@ -35,6 +35,7 @@
#include
#include "rt2x00.h"
+#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt61pci.h"
diff --git a/trunk/drivers/nfc/microread/mei.c b/trunk/drivers/nfc/microread/mei.c
index eef38cfd812e..ca33ae193935 100644
--- a/trunk/drivers/nfc/microread/mei.c
+++ b/trunk/drivers/nfc/microread/mei.c
@@ -22,7 +22,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -32,9 +32,6 @@
#define MICROREAD_DRIVER_NAME "microread"
-#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
- 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
-
struct mei_nfc_hdr {
u8 cmd;
u8 status;
@@ -48,7 +45,7 @@ struct mei_nfc_hdr {
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
struct microread_mei_phy {
- struct mei_device *mei_device;
+ struct mei_cl_device *device;
struct nfc_hci_dev *hdev;
int powered;
@@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb)
MEI_DUMP_SKB_OUT("mei frame sent", skb);
- r = mei_send(phy->device, skb->data, skb->len);
+ r = mei_cl_send(phy->device, skb->data, skb->len);
if (r > 0)
r = 0;
return r;
}
-static void microread_event_cb(struct mei_device *device, u32 events,
+static void microread_event_cb(struct mei_cl_device *device, u32 events,
void *context)
{
struct microread_mei_phy *phy = context;
@@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
if (phy->hard_fault != 0)
return;
- if (events & BIT(MEI_EVENT_RX)) {
+ if (events & BIT(MEI_CL_EVENT_RX)) {
struct sk_buff *skb;
int reply_size;
@@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events,
if (!skb)
return;
- reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ);
+ reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
kfree(skb);
return;
@@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = {
.disable = microread_mei_disable,
};
-static int microread_mei_probe(struct mei_device *device,
- const struct mei_id *id)
+static int microread_mei_probe(struct mei_cl_device *device,
+ const struct mei_cl_device_id *id)
{
struct microread_mei_phy *phy;
int r;
@@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device,
}
phy->device = device;
- mei_set_clientdata(device, phy);
+ mei_cl_set_drvdata(device, phy);
- r = mei_register_event_cb(device, microread_event_cb, phy);
+ r = mei_cl_register_event_cb(device, microread_event_cb, phy);
if (r) {
pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
goto err_out;
@@ -186,9 +183,9 @@ static int microread_mei_probe(struct mei_device *device,
return r;
}
-static int microread_mei_remove(struct mei_device *device)
+static int microread_mei_remove(struct mei_cl_device *device)
{
- struct microread_mei_phy *phy = mei_get_clientdata(device);
+ struct microread_mei_phy *phy = mei_cl_get_drvdata(device);
pr_info("Removing microread\n");
@@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device)
return 0;
}
-static struct mei_id microread_mei_tbl[] = {
- { MICROREAD_DRIVER_NAME, MICROREAD_UUID },
+static struct mei_cl_device_id microread_mei_tbl[] = {
+ { MICROREAD_DRIVER_NAME },
/* required last entry */
{ }
};
-
MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
-static struct mei_driver microread_driver = {
+static struct mei_cl_driver microread_driver = {
.id_table = microread_mei_tbl,
.name = MICROREAD_DRIVER_NAME,
@@ -225,7 +221,7 @@ static int microread_mei_init(void)
pr_debug(DRIVER_DESC ": %s\n", __func__);
- r = mei_driver_register(µread_driver);
+ r = mei_cl_driver_register(µread_driver);
if (r) {
pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
return r;
@@ -236,7 +232,7 @@ static int microread_mei_init(void)
static void microread_mei_exit(void)
{
- mei_driver_unregister(µread_driver);
+ mei_cl_driver_unregister(µread_driver);
}
module_init(microread_mei_init);
diff --git a/trunk/drivers/pci/bus.c b/trunk/drivers/pci/bus.c
index 8647dc6f52d0..748f8f3e9ff5 100644
--- a/trunk/drivers/pci/bus.c
+++ b/trunk/drivers/pci/bus.c
@@ -202,20 +202,16 @@ void pci_bus_add_devices(const struct pci_bus *bus)
if (dev->is_added)
continue;
retval = pci_bus_add_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error adding device (%d)\n",
+ retval);
}
list_for_each_entry(dev, &bus->devices, bus_list) {
BUG_ON(!dev->is_added);
-
child = dev->subordinate;
-
- if (!child)
- continue;
- pci_bus_add_devices(child);
-
- if (child->is_added)
- continue;
- child->is_added = 1;
+ if (child)
+ pci_bus_add_devices(child);
}
}
diff --git a/trunk/drivers/pci/hotplug/Kconfig b/trunk/drivers/pci/hotplug/Kconfig
index 13e9e63a7266..9fcb87f353d4 100644
--- a/trunk/drivers/pci/hotplug/Kconfig
+++ b/trunk/drivers/pci/hotplug/Kconfig
@@ -52,15 +52,12 @@ config HOTPLUG_PCI_IBM
When in doubt, say N.
config HOTPLUG_PCI_ACPI
- tristate "ACPI PCI Hotplug driver"
- depends on (!ACPI_DOCK && ACPI) || (ACPI_DOCK)
+ bool "ACPI PCI Hotplug driver"
+ depends on HOTPLUG_PCI=y && ((!ACPI_DOCK && ACPI) || (ACPI_DOCK))
help
Say Y here if you have a system that supports PCI Hotplug using
ACPI.
- To compile this driver as a module, choose M here: the
- module will be called acpiphp.
-
When in doubt, say N.
config HOTPLUG_PCI_ACPI_IBM
diff --git a/trunk/drivers/pci/hotplug/acpiphp.h b/trunk/drivers/pci/hotplug/acpiphp.h
index b70ac00a117e..6fdd49c6f0b9 100644
--- a/trunk/drivers/pci/hotplug/acpiphp.h
+++ b/trunk/drivers/pci/hotplug/acpiphp.h
@@ -73,8 +73,9 @@ static inline const char *slot_name(struct slot *slot)
*/
struct acpiphp_bridge {
struct list_head list;
+ struct list_head slots;
+ struct kref ref;
acpi_handle handle;
- struct acpiphp_slot *slots;
/* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
struct acpiphp_func *func;
@@ -97,7 +98,7 @@ struct acpiphp_bridge {
* PCI slot information for each *physical* PCI slot
*/
struct acpiphp_slot {
- struct acpiphp_slot *next;
+ struct list_head node;
struct acpiphp_bridge *bridge; /* parent */
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
@@ -119,7 +120,6 @@ struct acpiphp_slot {
*/
struct acpiphp_func {
struct acpiphp_slot *slot; /* parent */
- struct acpiphp_bridge *bridge; /* Ejectable PCI-to-PCI bridge */
struct list_head sibling;
struct notifier_block nb;
@@ -146,10 +146,6 @@ struct acpiphp_attention_info
#define ACPI_PCI_HOST_HID "PNP0A03"
/* ACPI _STA method value (ignore bit 4; battery present) */
-#define ACPI_STA_PRESENT (0x00000001)
-#define ACPI_STA_ENABLED (0x00000002)
-#define ACPI_STA_SHOW_IN_UI (0x00000004)
-#define ACPI_STA_FUNCTIONING (0x00000008)
#define ACPI_STA_ALL (0x0000000f)
/* bridge flags */
@@ -174,25 +170,24 @@ struct acpiphp_attention_info
/* function prototypes */
/* acpiphp_core.c */
-extern int acpiphp_register_attention(struct acpiphp_attention_info*info);
-extern int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
-extern int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
-extern void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
+int acpiphp_register_attention(struct acpiphp_attention_info*info);
+int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
+void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
-extern int acpiphp_glue_init (void);
-extern void acpiphp_glue_exit (void);
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
-extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_disable_slot (struct acpiphp_slot *slot);
-extern int acpiphp_eject_slot (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
-extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
+int acpiphp_enable_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_slot(struct acpiphp_slot *slot);
+int acpiphp_eject_slot(struct acpiphp_slot *slot);
+u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
/* variables */
extern bool acpiphp_debug;
+extern bool acpiphp_disabled;
#endif /* _ACPIPHP_H */
diff --git a/trunk/drivers/pci/hotplug/acpiphp_core.c b/trunk/drivers/pci/hotplug/acpiphp_core.c
index c2fd3095701f..ca8127950fcd 100644
--- a/trunk/drivers/pci/hotplug/acpiphp_core.c
+++ b/trunk/drivers/pci/hotplug/acpiphp_core.c
@@ -37,6 +37,7 @@
#include
#include
+#include
#include
#include
#include
@@ -48,6 +49,7 @@
#define SLOT_NAME_SIZE 21 /* {_SUN} */
bool acpiphp_debug;
+bool acpiphp_disabled;
/* local variables */
static struct acpiphp_attention_info *attention_info;
@@ -60,7 +62,9 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+MODULE_PARM_DESC(disable, "disable acpiphp driver");
module_param_named(debug, acpiphp_debug, bool, 0644);
+module_param_named(disable, acpiphp_disabled, bool, 0444);
/* export the attention callback registration methods */
EXPORT_SYMBOL_GPL(acpiphp_register_attention);
@@ -351,27 +355,9 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
}
-static int __init acpiphp_init(void)
+void __init acpiphp_init(void)
{
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- if (acpi_pci_disabled)
- return 0;
-
- /* read all the ACPI info from the system */
- /* initialize internal data structure etc. */
- return acpiphp_glue_init();
-}
-
-
-static void __exit acpiphp_exit(void)
-{
- if (acpi_pci_disabled)
- return;
-
- /* deallocate internal data structures etc. */
- acpiphp_glue_exit();
+ info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ acpiphp_disabled ? ", disabled by user; please report a bug"
+ : "");
}
-
-module_init(acpiphp_init);
-module_exit(acpiphp_exit);
diff --git a/trunk/drivers/pci/hotplug/acpiphp_glue.c b/trunk/drivers/pci/hotplug/acpiphp_glue.c
index 270fdbadc19c..96fed19c6d90 100644
--- a/trunk/drivers/pci/hotplug/acpiphp_glue.c
+++ b/trunk/drivers/pci/hotplug/acpiphp_glue.c
@@ -54,6 +54,7 @@
#include "acpiphp.h"
static LIST_HEAD(bridge_list);
+static DEFINE_MUTEX(bridge_mutex);
#define MY_NAME "acpiphp_glue"
@@ -61,6 +62,7 @@ static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static void free_bridge(struct kref *kref);
/* callback routine to check for the existence of a pci dock device */
static acpi_status
@@ -76,6 +78,39 @@ is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
}
}
+static inline void get_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_get(&bridge->ref);
+}
+
+static inline void put_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_put(&bridge->ref, free_bridge);
+}
+
+static void free_bridge(struct kref *kref)
+{
+ struct acpiphp_bridge *bridge;
+ struct acpiphp_slot *slot, *next;
+ struct acpiphp_func *func, *tmp;
+
+ bridge = container_of(kref, struct acpiphp_bridge, ref);
+
+ list_for_each_entry_safe(slot, next, &bridge->slots, node) {
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
+ kfree(func);
+ }
+ kfree(slot);
+ }
+
+ /* Release reference acquired by acpiphp_bridge_handle_to_function() */
+ if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)
+ put_bridge(bridge->func->slot->bridge);
+ put_device(&bridge->pci_bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
+}
+
/*
* the _DCK method can do funny things... and sometimes not
* hah-hah funny.
@@ -154,9 +189,10 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
acpi_handle tmp;
acpi_status status = AE_OK;
unsigned long long adr, sun;
- int device, function, retval;
+ int device, function, retval, found = 0;
struct pci_bus *pbus = bridge->pci_bus;
struct pci_dev *pdev;
+ u32 val;
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
@@ -170,7 +206,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
- pdev = pbus->self;
+ pdev = bridge->pci_dev;
if (pdev && device_is_managed_by_native_pciehp(pdev))
return AE_OK;
@@ -178,7 +214,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!newfunc)
return AE_NO_MEMORY;
- INIT_LIST_HEAD(&newfunc->sibling);
newfunc->handle = handle;
newfunc->function = function;
@@ -207,14 +242,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
/* search for objects that share the same slot */
- for (slot = bridge->slots; slot; slot = slot->next)
+ list_for_each_entry(slot, &bridge->slots, node)
if (slot->device == device) {
if (slot->sun != sun)
warn("sibling found, but _SUN doesn't match!\n");
+ found = 1;
break;
}
- if (!slot) {
+ if (!found) {
slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
if (!slot) {
kfree(newfunc);
@@ -227,9 +263,9 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
INIT_LIST_HEAD(&slot->funcs);
mutex_init(&slot->crit_sect);
- slot->next = bridge->slots;
- bridge->slots = slot;
-
+ mutex_lock(&bridge_mutex);
+ list_add_tail(&slot->node, &bridge->slots);
+ mutex_unlock(&bridge_mutex);
bridge->nr_slots++;
dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
@@ -247,13 +283,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
newfunc->slot = slot;
+ mutex_lock(&bridge_mutex);
list_add_tail(&newfunc->sibling, &slot->funcs);
+ mutex_unlock(&bridge_mutex);
- pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
- if (pdev) {
+ if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
+ &val, 60*1000))
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
- pci_dev_put(pdev);
- }
if (is_dock_device(handle)) {
/* we don't want to call this device's _EJ0
@@ -290,7 +326,9 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
err_exit:
bridge->nr_slots--;
- bridge->slots = slot->next;
+ mutex_lock(&bridge_mutex);
+ list_del(&slot->node);
+ mutex_unlock(&bridge_mutex);
kfree(slot);
kfree(newfunc);
@@ -315,13 +353,17 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
acpi_status status;
/* must be added to the list prior to calling register_slot */
+ mutex_lock(&bridge_mutex);
list_add(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_mutex);
/* register all slot objects under this bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
register_slot, NULL, bridge, NULL);
if (ACPI_FAILURE(status)) {
+ mutex_lock(&bridge_mutex);
list_del(&bridge->list);
+ mutex_unlock(&bridge_mutex);
return;
}
@@ -351,178 +393,46 @@ static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle
{
struct acpiphp_bridge *bridge;
struct acpiphp_slot *slot;
- struct acpiphp_func *func;
+ struct acpiphp_func *func = NULL;
+ mutex_lock(&bridge_mutex);
list_for_each_entry(bridge, &bridge_list, list) {
- for (slot = bridge->slots; slot; slot = slot->next) {
+ list_for_each_entry(slot, &bridge->slots, node) {
list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->handle == handle)
+ if (func->handle == handle) {
+ get_bridge(func->slot->bridge);
+ mutex_unlock(&bridge_mutex);
return func;
+ }
}
}
}
+ mutex_unlock(&bridge_mutex);
return NULL;
}
-static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
-{
- acpi_handle dummy_handle;
- struct acpiphp_func *func;
-
- if (ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle))) {
- bridge->flags |= BRIDGE_HAS_EJ0;
-
- dbg("found ejectable p2p bridge\n");
-
- /* make link between PCI bridge and PCI function */
- func = acpiphp_bridge_handle_to_function(bridge->handle);
- if (!func)
- return;
- bridge->func = func;
- func->bridge = bridge;
- }
-}
-
-
-/* allocate and initialize host bridge data structure */
-static void add_host_bridge(struct acpi_pci_root *root)
-{
- struct acpiphp_bridge *bridge;
- acpi_handle handle = root->device->handle;
-
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL)
- return;
-
- bridge->handle = handle;
-
- bridge->pci_bus = root->bus;
-
- init_bridge_misc(bridge);
-}
-
-
-/* allocate and initialize PCI-to-PCI bridge data structure */
-static void add_p2p_bridge(acpi_handle *handle)
-{
- struct acpiphp_bridge *bridge;
-
- bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL) {
- err("out of memory\n");
- return;
- }
-
- bridge->handle = handle;
- config_p2p_bridge_flags(bridge);
-
- bridge->pci_dev = acpi_get_pci_dev(handle);
- bridge->pci_bus = bridge->pci_dev->subordinate;
- if (!bridge->pci_bus) {
- err("This is not a PCI-to-PCI bridge!\n");
- goto err;
- }
-
- /*
- * Grab a ref to the subordinate PCI bus in case the bus is
- * removed via PCI core logical hotplug. The ref pins the bus
- * (which we access during module unload).
- */
- get_device(&bridge->pci_bus->dev);
-
- init_bridge_misc(bridge);
- return;
- err:
- pci_dev_put(bridge->pci_dev);
- kfree(bridge);
- return;
-}
-
-
-/* callback routine to find P2P bridges */
-static acpi_status
-find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(handle);
- if (!dev || !dev->subordinate)
- goto out;
-
- /* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(handle) > 0)) {
- dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
- add_p2p_bridge(handle);
- }
-
- /* search P2P bridges under this p2p bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
-
- out:
- pci_dev_put(dev);
- return AE_OK;
-}
-
-
-/* find hot-pluggable slots, and then find P2P bridge */
-static int add_bridge(struct acpi_pci_root *root)
-{
- acpi_status status;
- unsigned long long tmp;
- acpi_handle dummy_handle;
- acpi_handle handle = root->device->handle;
-
- /* if the bridge doesn't have _STA, we assume it is always there */
- status = acpi_get_handle(handle, "_STA", &dummy_handle);
- if (ACPI_SUCCESS(status)) {
- status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dbg("%s: _STA evaluation failure\n", __func__);
- return 0;
- }
- if ((tmp & ACPI_STA_FUNCTIONING) == 0)
- /* don't register this object */
- return 0;
- }
-
- /* check if this bridge has ejectable slots */
- if (detect_ejectable_slots(handle) > 0) {
- dbg("found PCI host-bus bridge with hot-pluggable slots\n");
- add_host_bridge(root);
- }
-
- /* search P2P bridges under this host bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, NULL, NULL, NULL);
-
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
-
- return 0;
-}
-
static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
{
struct acpiphp_bridge *bridge;
+ mutex_lock(&bridge_mutex);
list_for_each_entry(bridge, &bridge_list, list)
- if (bridge->handle == handle)
+ if (bridge->handle == handle) {
+ get_bridge(bridge);
+ mutex_unlock(&bridge_mutex);
return bridge;
+ }
+ mutex_unlock(&bridge_mutex);
return NULL;
}
static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
- struct acpiphp_slot *slot, *next;
- struct acpiphp_func *func, *tmp;
+ struct acpiphp_slot *slot;
+ struct acpiphp_func *func;
acpi_status status;
acpi_handle handle = bridge->handle;
@@ -543,10 +453,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
err("failed to install interrupt notify handler\n");
}
- slot = bridge->slots;
- while (slot) {
- next = slot->next;
- list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
+ list_for_each_entry(slot, &bridge->slots, node) {
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (is_dock_device(func->handle)) {
unregister_hotplug_dock_device(func->handle);
unregister_dock_notifier(&func->nb);
@@ -558,63 +466,13 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- list_del(&func->sibling);
- kfree(func);
}
acpiphp_unregister_hotplug_slot(slot);
- list_del(&slot->funcs);
- kfree(slot);
- slot = next;
}
- /*
- * Only P2P bridges have a pci_dev
- */
- if (bridge->pci_dev)
- put_device(&bridge->pci_bus->dev);
-
- pci_dev_put(bridge->pci_dev);
+ mutex_lock(&bridge_mutex);
list_del(&bridge->list);
- kfree(bridge);
-}
-
-static acpi_status
-cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- struct acpiphp_bridge *bridge;
-
- /* cleanup p2p bridges under this P2P bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- cleanup_p2p_bridge, NULL, NULL, NULL);
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
-
- return AE_OK;
-}
-
-static void remove_bridge(struct acpi_pci_root *root)
-{
- struct acpiphp_bridge *bridge;
- acpi_handle handle = root->device->handle;
-
- /* cleanup p2p bridges under this host bridge
- in a depth-first manner */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- (u32)1, cleanup_p2p_bridge, NULL, NULL, NULL);
-
- /*
- * On root bridges with hotplug slots directly underneath (ie,
- * no p2p bridge between), we call cleanup_bridge().
- *
- * The else clause cleans up root bridges that either had no
- * hotplug slots at all, or had a p2p bridge underneath.
- */
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge)
- cleanup_bridge(bridge);
+ mutex_unlock(&bridge_mutex);
}
static int power_on_slot(struct acpiphp_slot *slot)
@@ -798,6 +656,7 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
}
}
}
+
/**
* enable_device - enable, configure a slot
* @slot: slot to be enabled
@@ -810,9 +669,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
struct pci_dev *dev;
struct pci_bus *bus = slot->bridge->pci_bus;
struct acpiphp_func *func;
- int retval = 0;
int num, max, pass;
- acpi_status status;
if (slot->flags & SLOT_ENABLED)
goto err_exit;
@@ -867,23 +724,11 @@ static int __ref enable_device(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
continue;
}
-
- if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE &&
- dev->hdr_type != PCI_HEADER_TYPE_CARDBUS) {
- pci_dev_put(dev);
- continue;
- }
-
- status = find_p2p_bridge(func->handle, (u32)1, bus, NULL);
- if (ACPI_FAILURE(status))
- warn("find_p2p_bridge failed (error code = 0x%x)\n",
- status);
- pci_dev_put(dev);
}
err_exit:
- return retval;
+ return 0;
}
/* return first device in slot, acquiring a reference on it */
@@ -912,23 +757,6 @@ static int disable_device(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
struct pci_dev *pdev;
- struct pci_bus *bus = slot->bridge->pci_bus;
-
- /* The slot will be enabled when func 0 is added, so check
- func 0 before disable the slot. */
- pdev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0));
- if (!pdev)
- goto err_exit;
- pci_dev_put(pdev);
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->bridge) {
- /* cleanup p2p bridges under this P2P bridge */
- cleanup_p2p_bridge(func->bridge->handle,
- (u32)1, NULL, NULL);
- func->bridge = NULL;
- }
- }
/*
* enable_device() enumerates all functions in this device via
@@ -947,7 +775,6 @@ static int disable_device(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
-err_exit:
return 0;
}
@@ -1037,7 +864,7 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
enabled = disabled = 0;
- for (slot = bridge->slots; slot; slot = slot->next) {
+ list_for_each_entry(slot, &bridge->slots, node) {
unsigned int status = get_slot_status(slot);
if (slot->flags & SLOT_ENABLED) {
if (status == ACPI_STA_ALL)
@@ -1082,11 +909,11 @@ static void acpiphp_set_hpp_values(struct pci_bus *bus)
*/
static void acpiphp_sanitize_bus(struct pci_bus *bus)
{
- struct pci_dev *dev;
+ struct pci_dev *dev, *tmp;
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
for (i=0; iresource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -1118,6 +945,7 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
dbg("%s: re-enumerating slots under %s\n",
__func__, objname);
acpiphp_check_bridge(bridge);
+ put_bridge(bridge);
}
return AE_OK ;
}
@@ -1195,6 +1023,7 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
+ put_bridge(bridge);
}
/**
@@ -1208,6 +1037,8 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
void *context)
{
+ struct acpiphp_bridge *bridge = context;
+
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
@@ -1216,6 +1047,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
+ get_bridge(bridge);
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
}
@@ -1270,6 +1102,7 @@ static void _handle_hotplug_event_func(struct work_struct *work)
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_func */
+ put_bridge(func->slot->bridge);
}
/**
@@ -1283,6 +1116,8 @@ static void _handle_hotplug_event_func(struct work_struct *work)
static void handle_hotplug_event_func(acpi_handle handle, u32 type,
void *context)
{
+ struct acpiphp_func *func = context;
+
/*
* Currently the code adds all hotplug events to the kacpid_wq
* queue when it should add hotplug events to the kacpi_hotplug_wq.
@@ -1291,33 +1126,69 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
* For now just re-add this work to the kacpi_hotplug_wq so we
* don't deadlock on hotplug actions.
*/
+ get_bridge(func->slot->bridge);
alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
}
-static struct acpi_pci_driver acpi_pci_hp_driver = {
- .add = add_bridge,
- .remove = remove_bridge,
-};
-
-/**
- * acpiphp_glue_init - initializes all PCI hotplug - ACPI glue data structures
+/*
+ * Create hotplug slots for the PCI bus.
+ * It should always return 0 to avoid skipping following notifiers.
*/
-int __init acpiphp_glue_init(void)
+void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
{
- acpi_pci_register_driver(&acpi_pci_hp_driver);
+ acpi_handle dummy_handle;
+ struct acpiphp_bridge *bridge;
- return 0;
-}
+ if (acpiphp_disabled)
+ return;
+ if (detect_ejectable_slots(handle) <= 0)
+ return;
-/**
- * acpiphp_glue_exit - terminates all PCI hotplug - ACPI glue data structures
- *
- * This function frees all data allocated in acpiphp_glue_init().
- */
-void acpiphp_glue_exit(void)
+ bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
+ if (bridge == NULL) {
+ err("out of memory\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&bridge->slots);
+ kref_init(&bridge->ref);
+ bridge->handle = handle;
+ bridge->pci_dev = pci_dev_get(bus->self);
+ bridge->pci_bus = bus;
+
+ /*
+ * Grab a ref to the subordinate PCI bus in case the bus is
+ * removed via PCI core logical hotplug. The ref pins the bus
+ * (which we access during module unload).
+ */
+ get_device(&bus->dev);
+
+ if (!pci_is_root_bus(bridge->pci_bus) &&
+ ACPI_SUCCESS(acpi_get_handle(bridge->handle,
+ "_EJ0", &dummy_handle))) {
+ dbg("found ejectable p2p bridge\n");
+ bridge->flags |= BRIDGE_HAS_EJ0;
+ bridge->func = acpiphp_bridge_handle_to_function(handle);
+ }
+
+ init_bridge_misc(bridge);
+}
+
+/* Destroy hotplug slots associated with the PCI bus */
+void acpiphp_remove_slots(struct pci_bus *bus)
{
- acpi_pci_unregister_driver(&acpi_pci_hp_driver);
+ struct acpiphp_bridge *bridge, *tmp;
+
+ if (acpiphp_disabled)
+ return;
+
+ list_for_each_entry_safe(bridge, tmp, &bridge_list, list)
+ if (bridge->pci_bus == bus) {
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+ break;
+ }
}
/**
@@ -1396,7 +1267,7 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
sta = get_slot_status(slot);
- return (sta & ACPI_STA_SHOW_IN_UI) ? 0 : 1;
+ return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1;
}
diff --git a/trunk/drivers/pci/hotplug/cpci_hotplug.h b/trunk/drivers/pci/hotplug/cpci_hotplug.h
index 9fff878cf026..1356211431d0 100644
--- a/trunk/drivers/pci/hotplug/cpci_hotplug.h
+++ b/trunk/drivers/pci/hotplug/cpci_hotplug.h
@@ -75,28 +75,36 @@ static inline const char *slot_name(struct slot *slot)
return hotplug_slot_name(slot->hotplug_slot);
}
-extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
-extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
-extern int cpci_hp_unregister_bus(struct pci_bus *bus);
-extern int cpci_hp_start(void);
-extern int cpci_hp_stop(void);
+int cpci_hp_register_controller(struct cpci_hp_controller *controller);
+int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
+int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
+int cpci_hp_unregister_bus(struct pci_bus *bus);
+int cpci_hp_start(void);
+int cpci_hp_stop(void);
/*
* Internal function prototypes, these functions should not be used by
* board/chassis drivers.
*/
-extern u8 cpci_get_attention_status(struct slot *slot);
-extern u8 cpci_get_latch_status(struct slot *slot);
-extern u8 cpci_get_adapter_status(struct slot *slot);
-extern u16 cpci_get_hs_csr(struct slot * slot);
-extern int cpci_set_attention_status(struct slot *slot, int status);
-extern int cpci_check_and_clear_ins(struct slot * slot);
-extern int cpci_check_ext(struct slot * slot);
-extern int cpci_clear_ext(struct slot * slot);
-extern int cpci_led_on(struct slot * slot);
-extern int cpci_led_off(struct slot * slot);
-extern int cpci_configure_slot(struct slot *slot);
-extern int cpci_unconfigure_slot(struct slot *slot);
+u8 cpci_get_attention_status(struct slot *slot);
+u8 cpci_get_latch_status(struct slot *slot);
+u8 cpci_get_adapter_status(struct slot *slot);
+u16 cpci_get_hs_csr(struct slot * slot);
+int cpci_set_attention_status(struct slot *slot, int status);
+int cpci_check_and_clear_ins(struct slot * slot);
+int cpci_check_ext(struct slot * slot);
+int cpci_clear_ext(struct slot * slot);
+int cpci_led_on(struct slot * slot);
+int cpci_led_off(struct slot * slot);
+int cpci_configure_slot(struct slot *slot);
+int cpci_unconfigure_slot(struct slot *slot);
+
+#ifdef CONFIG_HOTPLUG_PCI_CPCI
+int cpci_hotplug_init(int debug);
+void cpci_hotplug_exit(void);
+#else
+static inline int cpci_hotplug_init(int debug) { return 0; }
+static inline void cpci_hotplug_exit(void) { }
+#endif
#endif /* _CPCI_HOTPLUG_H */
diff --git a/trunk/drivers/pci/hotplug/cpqphp.h b/trunk/drivers/pci/hotplug/cpqphp.h
index d8ffc7366801..516b87738b6e 100644
--- a/trunk/drivers/pci/hotplug/cpqphp.h
+++ b/trunk/drivers/pci/hotplug/cpqphp.h
@@ -404,50 +404,44 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs(void);
-extern void cpqhp_shutdown_debugfs(void);
-extern void cpqhp_create_debugfs_files(struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
+void cpqhp_initialize_debugfs(void);
+void cpqhp_shutdown_debugfs(void);
+void cpqhp_create_debugfs_files(struct controller *ctrl);
+void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
-extern int cpqhp_find_available_resources(struct controller *ctrl,
- void __iomem *rom_start);
-extern int cpqhp_event_start_thread(void);
-extern void cpqhp_event_stop_thread(void);
-extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
- unsigned char index);
-extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
+void cpqhp_pushbutton_thread(unsigned long event_pointer);
+irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+int cpqhp_event_start_thread(void);
+void cpqhp_event_stop_thread(void);
+struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
-extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
+int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
- u8 slot);
-extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
- int is_hot_plug);
-extern int cpqhp_save_base_addr_length(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_used_resources(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_configure_board(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_save_slot_config(struct controller *ctrl,
- struct pci_func *new_slot);
-extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
-extern void cpqhp_destroy_board_resources(struct pci_func *func);
-extern int cpqhp_return_board_resources (struct pci_func *func,
- struct resource_lists *resources);
-extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
-extern int cpqhp_configure_device(struct controller *ctrl,
- struct pci_func *func);
-extern int cpqhp_unconfigure_device(struct pci_func *func);
+int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug);
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func);
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot);
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+void cpqhp_destroy_board_resources(struct pci_func *func);
+int cpqhp_return_board_resources(struct pci_func *func,
+ struct resource_lists *resources);
+void cpqhp_destroy_resource_list(struct resource_lists *resources);
+int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func);
+int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
diff --git a/trunk/drivers/pci/hotplug/cpqphp_nvram.h b/trunk/drivers/pci/hotplug/cpqphp_nvram.h
index e89c0702119d..34e4e54fcf15 100644
--- a/trunk/drivers/pci/hotplug/cpqphp_nvram.h
+++ b/trunk/drivers/pci/hotplug/cpqphp_nvram.h
@@ -30,26 +30,26 @@
#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM
-static inline void compaq_nvram_init (void __iomem *rom_start)
+static inline void compaq_nvram_init(void __iomem *rom_start)
{
return;
}
-static inline int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
+static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl)
{
return 0;
}
-static inline int compaq_nvram_store (void __iomem *rom_start)
+static inline int compaq_nvram_store(void __iomem *rom_start)
{
return 0;
}
#else
-extern void compaq_nvram_init (void __iomem *rom_start);
-extern int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl);
-extern int compaq_nvram_store (void __iomem *rom_start);
+void compaq_nvram_init(void __iomem *rom_start);
+int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl);
+int compaq_nvram_store(void __iomem *rom_start);
#endif
diff --git a/trunk/drivers/pci/hotplug/ibmphp.h b/trunk/drivers/pci/hotplug/ibmphp.h
index a8d391a4957d..8c5b25871d02 100644
--- a/trunk/drivers/pci/hotplug/ibmphp.h
+++ b/trunk/drivers/pci/hotplug/ibmphp.h
@@ -275,17 +275,17 @@ extern struct list_head ibmphp_slot_head;
* FUNCTION PROTOTYPES *
***********************************************************/
-extern void ibmphp_free_ebda_hpc_queue (void);
-extern int ibmphp_access_ebda (void);
-extern struct slot *ibmphp_get_slot_from_physical_num (u8);
-extern int ibmphp_get_total_hp_slots (void);
-extern void ibmphp_free_ibm_slot (struct slot *);
-extern void ibmphp_free_bus_info_queue (void);
-extern void ibmphp_free_ebda_pci_rsrc_queue (void);
-extern struct bus_info *ibmphp_find_same_bus_num (u32);
-extern int ibmphp_get_bus_index (u8);
-extern u16 ibmphp_get_total_controllers (void);
-extern int ibmphp_register_pci (void);
+void ibmphp_free_ebda_hpc_queue(void);
+int ibmphp_access_ebda(void);
+struct slot *ibmphp_get_slot_from_physical_num(u8);
+int ibmphp_get_total_hp_slots(void);
+void ibmphp_free_ibm_slot(struct slot *);
+void ibmphp_free_bus_info_queue(void);
+void ibmphp_free_ebda_pci_rsrc_queue(void);
+struct bus_info *ibmphp_find_same_bus_num(u32);
+int ibmphp_get_bus_index(u8);
+u16 ibmphp_get_total_controllers(void);
+int ibmphp_register_pci(void);
/* passed parameters */
#define MEM 0
@@ -381,24 +381,24 @@ struct res_needed {
/* functions */
-extern int ibmphp_rsrc_init (void);
-extern int ibmphp_add_resource (struct resource_node *);
-extern int ibmphp_remove_resource (struct resource_node *);
-extern int ibmphp_find_resource (struct bus_node *, u32, struct resource_node **, int);
-extern int ibmphp_check_resource (struct resource_node *, u8);
-extern int ibmphp_remove_bus (struct bus_node *, u8);
-extern void ibmphp_free_resources (void);
-extern int ibmphp_add_pfmem_from_mem (struct resource_node *);
-extern struct bus_node *ibmphp_find_res_bus (u8);
-extern void ibmphp_print_test (void); /* for debugging purposes */
+int ibmphp_rsrc_init(void);
+int ibmphp_add_resource(struct resource_node *);
+int ibmphp_remove_resource(struct resource_node *);
+int ibmphp_find_resource(struct bus_node *, u32, struct resource_node **, int);
+int ibmphp_check_resource(struct resource_node *, u8);
+int ibmphp_remove_bus(struct bus_node *, u8);
+void ibmphp_free_resources(void);
+int ibmphp_add_pfmem_from_mem(struct resource_node *);
+struct bus_node *ibmphp_find_res_bus(u8);
+void ibmphp_print_test(void); /* for debugging purposes */
-extern void ibmphp_hpc_initvars (void);
-extern int ibmphp_hpc_readslot (struct slot *, u8, u8 *);
-extern int ibmphp_hpc_writeslot (struct slot *, u8);
-extern void ibmphp_lock_operations (void);
-extern void ibmphp_unlock_operations (void);
-extern int ibmphp_hpc_start_poll_thread (void);
-extern void ibmphp_hpc_stop_poll_thread (void);
+void ibmphp_hpc_initvars(void);
+int ibmphp_hpc_readslot(struct slot *, u8, u8 *);
+int ibmphp_hpc_writeslot(struct slot *, u8);
+void ibmphp_lock_operations(void);
+void ibmphp_unlock_operations(void);
+int ibmphp_hpc_start_poll_thread(void);
+void ibmphp_hpc_stop_poll_thread(void);
//----------------------------------------------------------------------------
@@ -749,11 +749,11 @@ struct controller {
/* Functions */
-extern int ibmphp_init_devno (struct slot **); /* This function is called from EBDA, so we need it not be static */
-extern int ibmphp_do_disable_slot (struct slot *slot_cur);
-extern int ibmphp_update_slot_info (struct slot *); /* This function is called from HPC, so we need it to not be be static */
-extern int ibmphp_configure_card (struct pci_func *, u8);
-extern int ibmphp_unconfigure_card (struct slot **, int);
+int ibmphp_init_devno(struct slot **); /* This function is called from EBDA, so we need it not be static */
+int ibmphp_do_disable_slot(struct slot *slot_cur);
+int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
+int ibmphp_configure_card(struct pci_func *, u8);
+int ibmphp_unconfigure_card(struct slot **, int);
extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
#endif //__IBMPHP_H
diff --git a/trunk/drivers/pci/hotplug/pci_hotplug_core.c b/trunk/drivers/pci/hotplug/pci_hotplug_core.c
index 202f4a969eb5..ec20f74c8981 100644
--- a/trunk/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/trunk/drivers/pci/hotplug/pci_hotplug_core.c
@@ -41,6 +41,7 @@
#include
#include
#include "../pci.h"
+#include "cpci_hotplug.h"
#define MY_NAME "pci_hotplug"
@@ -63,14 +64,6 @@ static bool debug;
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
-#ifdef CONFIG_HOTPLUG_PCI_CPCI
-extern int cpci_hotplug_init(int debug);
-extern void cpci_hotplug_exit(void);
-#else
-static inline int cpci_hotplug_init(int debug) { return 0; }
-static inline void cpci_hotplug_exit(void) { }
-#endif
-
/* Weee, fun with macros... */
#define GET_STATUS(name,type) \
static int get_##name (struct hotplug_slot *slot, type *value) \
@@ -524,13 +517,11 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
*
* Returns 0 if successful, anything else for an error.
*/
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
- struct hotplug_slot_info *info)
+int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+ struct hotplug_slot_info *info)
{
- struct pci_slot *slot;
if (!hotplug || !info)
return -ENODEV;
- slot = hotplug->pci_slot;
memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
diff --git a/trunk/drivers/pci/hotplug/pciehp.h b/trunk/drivers/pci/hotplug/pciehp.h
index 2c113de94323..7fb326983ed6 100644
--- a/trunk/drivers/pci/hotplug/pciehp.h
+++ b/trunk/drivers/pci/hotplug/pciehp.h
@@ -127,15 +127,15 @@ struct controller {
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
-extern int pciehp_sysfs_enable_slot(struct slot *slot);
-extern int pciehp_sysfs_disable_slot(struct slot *slot);
-extern u8 pciehp_handle_attention_button(struct slot *p_slot);
-extern u8 pciehp_handle_switch_change(struct slot *p_slot);
-extern u8 pciehp_handle_presence_change(struct slot *p_slot);
-extern u8 pciehp_handle_power_fault(struct slot *p_slot);
-extern int pciehp_configure_device(struct slot *p_slot);
-extern int pciehp_unconfigure_device(struct slot *p_slot);
-extern void pciehp_queue_pushbutton_work(struct work_struct *work);
+int pciehp_sysfs_enable_slot(struct slot *slot);
+int pciehp_sysfs_disable_slot(struct slot *slot);
+u8 pciehp_handle_attention_button(struct slot *p_slot);
+u8 pciehp_handle_switch_change(struct slot *p_slot);
+u8 pciehp_handle_presence_change(struct slot *p_slot);
+u8 pciehp_handle_power_fault(struct slot *p_slot);
+int pciehp_configure_device(struct slot *p_slot);
+int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
@@ -166,8 +166,8 @@ static inline const char *slot_name(struct slot *slot)
#include
#include
-extern void __init pciehp_acpi_slot_detection_init(void);
-extern int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
+void __init pciehp_acpi_slot_detection_init(void);
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
static inline void pciehp_firmware_init(void)
{
diff --git a/trunk/drivers/pci/hotplug/pciehp_acpi.c b/trunk/drivers/pci/hotplug/pciehp_acpi.c
index 24d709b7388c..ead7c534095e 100644
--- a/trunk/drivers/pci/hotplug/pciehp_acpi.c
+++ b/trunk/drivers/pci/hotplug/pciehp_acpi.c
@@ -90,7 +90,7 @@ static int __init dummy_probe(struct pcie_device *dev)
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- slot->number = slot_cap >> 19;
+ slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
list_for_each_entry(tmp, &dummy_slots, list) {
if (tmp->number == slot->number)
dup_slot_id++;
diff --git a/trunk/drivers/pci/hotplug/rpadlpar.h b/trunk/drivers/pci/hotplug/rpadlpar.h
index 4a0a59b82eae..81df93931ad0 100644
--- a/trunk/drivers/pci/hotplug/rpadlpar.h
+++ b/trunk/drivers/pci/hotplug/rpadlpar.h
@@ -15,10 +15,10 @@
#ifndef _RPADLPAR_IO_H_
#define _RPADLPAR_IO_H_
-extern int dlpar_sysfs_init(void);
-extern void dlpar_sysfs_exit(void);
+int dlpar_sysfs_init(void);
+void dlpar_sysfs_exit(void);
-extern int dlpar_add_slot(char *drc_name);
-extern int dlpar_remove_slot(char *drc_name);
+int dlpar_add_slot(char *drc_name);
+int dlpar_remove_slot(char *drc_name);
#endif
diff --git a/trunk/drivers/pci/hotplug/rpaphp.h b/trunk/drivers/pci/hotplug/rpaphp.h
index df5677440a08..3135856e5e1c 100644
--- a/trunk/drivers/pci/hotplug/rpaphp.h
+++ b/trunk/drivers/pci/hotplug/rpaphp.h
@@ -86,18 +86,18 @@ extern struct list_head rpaphp_slot_head;
/* function prototypes */
/* rpaphp_pci.c */
-extern int rpaphp_enable_slot(struct slot *slot);
-extern int rpaphp_get_sensor_state(struct slot *slot, int *state);
+int rpaphp_enable_slot(struct slot *slot);
+int rpaphp_get_sensor_state(struct slot *slot, int *state);
/* rpaphp_core.c */
-extern int rpaphp_add_slot(struct device_node *dn);
-extern int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+int rpaphp_add_slot(struct device_node *dn);
+int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
char **drc_name, char **drc_type, int *drc_power_domain);
/* rpaphp_slot.c */
-extern void dealloc_slot_struct(struct slot *slot);
-extern struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
-extern int rpaphp_register_slot(struct slot *slot);
-extern int rpaphp_deregister_slot(struct slot *slot);
+void dealloc_slot_struct(struct slot *slot);
+struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
+int rpaphp_register_slot(struct slot *slot);
+int rpaphp_deregister_slot(struct slot *slot);
#endif /* _PPC64PHP_H */
diff --git a/trunk/drivers/pci/hotplug/s390_pci_hpc.c b/trunk/drivers/pci/hotplug/s390_pci_hpc.c
index 7db249a25016..46a7b738f61f 100644
--- a/trunk/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/trunk/drivers/pci/hotplug/s390_pci_hpc.c
@@ -16,6 +16,7 @@
#include
#include
#include
+#include
#include
#define SLOT_NAME_SIZE 10
@@ -49,6 +50,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return -EIO;
rc = sclp_pci_configure(slot->zdev->fid);
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, rc);
if (!rc) {
slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
/* automatically scan the device after is was configured */
@@ -66,16 +68,16 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
if (!zpci_fn_configured(slot->zdev->state))
return -EIO;
+ rc = zpci_disable_device(slot->zdev);
+ if (rc)
+ return rc;
/* TODO: we rely on the user to unbind/remove the device, is that plausible
* or do we need to trigger that here?
*/
rc = sclp_pci_deconfigure(slot->zdev->fid);
- if (!rc) {
- /* Fixme: better call List-PCI to find the disabled FH
- for the FID since the FH should be opaque... */
- slot->zdev->fh &= 0x7fffffff;
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, rc);
+ if (!rc)
slot->zdev->state = ZPCI_FN_STATE_STANDBY;
- }
return rc;
}
diff --git a/trunk/drivers/pci/hotplug/shpchp.h b/trunk/drivers/pci/hotplug/shpchp.h
index b849f995075a..e260f207a90e 100644
--- a/trunk/drivers/pci/hotplug/shpchp.h
+++ b/trunk/drivers/pci/hotplug/shpchp.h
@@ -168,19 +168,19 @@ struct controller {
#define WRONG_BUS_FREQUENCY 0x0000000D
#define POWER_FAILURE 0x0000000E
-extern int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
-extern void shpchp_remove_ctrl_files(struct controller *ctrl);
-extern int shpchp_sysfs_enable_slot(struct slot *slot);
-extern int shpchp_sysfs_disable_slot(struct slot *slot);
-extern u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
-extern u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
-extern int shpchp_configure_device(struct slot *p_slot);
-extern int shpchp_unconfigure_device(struct slot *p_slot);
-extern void cleanup_slots(struct controller *ctrl);
-extern void shpchp_queue_pushbutton_work(struct work_struct *work);
-extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
+int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
+void shpchp_remove_ctrl_files(struct controller *ctrl);
+int shpchp_sysfs_enable_slot(struct slot *slot);
+int shpchp_sysfs_disable_slot(struct slot *slot);
+u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
+int shpchp_configure_device(struct slot *p_slot);
+int shpchp_unconfigure_device(struct slot *p_slot);
+void cleanup_slots(struct controller *ctrl);
+void shpchp_queue_pushbutton_work(struct work_struct *work);
+int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
diff --git a/trunk/drivers/pci/hotplug/shpchp_sysfs.c b/trunk/drivers/pci/hotplug/shpchp_sysfs.c
index eeb23ceae4a8..e8c31fe20566 100644
--- a/trunk/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/trunk/drivers/pci/hotplug/shpchp_sysfs.c
@@ -85,7 +85,7 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
-int __must_check shpchp_create_ctrl_files (struct controller *ctrl)
+int shpchp_create_ctrl_files (struct controller *ctrl)
{
return device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
}
diff --git a/trunk/drivers/pci/msi.c b/trunk/drivers/pci/msi.c
index 00cc78c7aa04..d40bed726769 100644
--- a/trunk/drivers/pci/msi.c
+++ b/trunk/drivers/pci/msi.c
@@ -22,10 +22,12 @@
#include
#include "pci.h"
-#include "msi.h"
static int pci_msi_enable = 1;
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+
/* Arch hooks */
#ifndef arch_msi_check_device
@@ -111,32 +113,26 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
}
#endif
-static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int enable)
{
u16 control;
- BUG_ON(!pos);
-
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_ENABLE;
if (enable)
control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- }
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
static inline __attribute_const__ u32 msi_mask(unsigned x)
@@ -247,18 +243,18 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 data;
- pci_read_config_dword(dev, msi_lower_address_reg(pos),
- &msg->address_lo);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ &msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_read_config_dword(dev, msi_upper_address_reg(pos),
- &msg->address_hi);
- pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ &msg->address_hi);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
} else {
msg->address_hi = 0;
- pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
}
msg->data = data;
}
@@ -302,24 +298,24 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
} else {
struct pci_dev *dev = entry->dev;
- int pos = entry->msi_attrib.pos;
+ int pos = dev->msi_cap;
u16 msgctl;
- pci_read_config_word(dev, msi_control_reg(pos), &msgctl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
msgctl |= entry->msi_attrib.multiple << 4;
- pci_write_config_word(dev, msi_control_reg(pos), msgctl);
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
- pci_write_config_dword(dev, msi_lower_address_reg(pos),
- msg->address_lo);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ msg->address_lo);
if (entry->msi_attrib.is_64) {
- pci_write_config_dword(dev, msi_upper_address_reg(pos),
- msg->address_hi);
- pci_write_config_word(dev, msi_data_reg(pos, 1),
- msg->data);
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ msg->address_hi);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
+ msg->data);
} else {
- pci_write_config_word(dev, msi_data_reg(pos, 0),
- msg->data);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
+ msg->data);
}
}
entry->msg = *msg;
@@ -391,7 +387,6 @@ static void pci_intx_for_msi(struct pci_dev *dev, int enable)
static void __pci_restore_msi_state(struct pci_dev *dev)
{
- int pos;
u16 control;
struct msi_desc *entry;
@@ -399,22 +394,20 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
return;
entry = irq_get_msi_desc(dev->irq);
- pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 0);
+ msi_set_enable(dev, 0);
arch_restore_msi_irqs(dev, dev->irq);
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
control &= ~PCI_MSI_FLAGS_QSIZE;
control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
static void __pci_restore_msix_state(struct pci_dev *dev)
{
- int pos;
struct msi_desc *entry;
u16 control;
@@ -422,13 +415,12 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
return;
BUG_ON(list_empty(&dev->msi_list));
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
arch_restore_msi_irqs(dev, entry->irq);
@@ -436,7 +428,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
}
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
}
void pci_restore_msi_state(struct pci_dev *dev)
@@ -484,12 +476,12 @@ static struct msi_attribute mode_attribute =
__ATTR(mode, S_IRUGO, show_msi_mode, NULL);
-struct attribute *msi_irq_default_attrs[] = {
+static struct attribute *msi_irq_default_attrs[] = {
&mode_attribute.attr,
NULL
};
-void msi_kobj_release(struct kobject *kobj)
+static void msi_kobj_release(struct kobject *kobj)
{
struct msi_desc *entry = to_msi_desc(kobj);
@@ -552,27 +544,27 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
static int msi_capability_init(struct pci_dev *dev, int nvec)
{
struct msi_desc *entry;
- int pos, ret;
+ int ret;
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+ msi_set_enable(dev, 0); /* Disable MSI during set up */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
if (!entry)
return -ENOMEM;
entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msi_cap;
- entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ?
+ PCI_MSI_MASK_64 : PCI_MSI_MASK_32;
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -598,31 +590,30 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, pos, 1);
+ msi_set_enable(dev, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
return 0;
}
-static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
- unsigned nr_entries)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
u8 bir;
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
- table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
+ pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ table_offset &= PCI_MSIX_TABLE_OFFSET;
phys_addr = pci_resource_start(dev, bir) + table_offset;
return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
-static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
- void __iomem *base, struct msix_entry *entries,
- int nvec)
+static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec)
{
struct msi_desc *entry;
int i;
@@ -642,7 +633,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = entries[i].entry;
entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.pos = dev->msix_cap;
entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
@@ -652,7 +643,7 @@ static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
}
static void msix_program_entries(struct pci_dev *dev,
- struct msix_entry *entries)
+ struct msix_entry *entries)
{
struct msi_desc *entry;
int i = 0;
@@ -682,23 +673,22 @@ static void msix_program_entries(struct pci_dev *dev,
static int msix_capability_init(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
- int pos, ret;
+ int ret;
u16 control;
void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Ensure MSI-X is disabled while it is set up */
control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
/* Request & Map MSI-X table region */
- base = msix_map_region(dev, pos, multi_msix_capable(control));
+ base = msix_map_region(dev, msix_table_size(control));
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ ret = msix_setup_entries(dev, base, entries, nvec);
if (ret)
return ret;
@@ -712,7 +702,7 @@ static int msix_capability_init(struct pci_dev *dev,
* interrupts coming in before they're fully set up.
*/
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
msix_program_entries(dev, entries);
@@ -727,7 +717,7 @@ static int msix_capability_init(struct pci_dev *dev,
dev->msix_enabled = 1;
control &= ~PCI_MSIX_FLAGS_MASKALL;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
return 0;
@@ -795,9 +785,6 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
if (ret)
return ret;
- if (!pci_find_capability(dev, type))
- return -EINVAL;
-
return 0;
}
@@ -816,13 +803,13 @@ static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
*/
int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
{
- int status, pos, maxvec;
+ int status, maxvec;
u16 msgctl;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
+ if (!dev->msi_cap)
return -EINVAL;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
if (nvec > maxvec)
return maxvec;
@@ -847,14 +834,13 @@ EXPORT_SYMBOL(pci_enable_msi_block);
int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
{
- int ret, pos, nvec;
+ int ret, nvec;
u16 msgctl;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!pos)
+ if (!dev->msi_cap)
return -EINVAL;
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
if (maxvec)
@@ -876,21 +862,19 @@ void pci_msi_shutdown(struct pci_dev *dev)
struct msi_desc *desc;
u32 mask;
u16 ctrl;
- unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pos = desc->msi_attrib.pos;
- msi_set_enable(dev, pos, 0);
+ msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
/* Keep cached state to be restored */
__msi_mask_irq(desc, mask, ~mask);
@@ -917,15 +901,13 @@ EXPORT_SYMBOL(pci_disable_msi);
*/
int pci_msix_table_size(struct pci_dev *dev)
{
- int pos;
u16 control;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!pos)
+ if (!dev->msix_cap)
return 0;
- pci_read_config_word(dev, msi_control_reg(pos), &control);
- return multi_msix_capable(control);
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ return msix_table_size(control);
}
/**
@@ -948,7 +930,7 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
int status, nr_entries;
int i, j;
- if (!entries)
+ if (!entries || !dev->msix_cap)
return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
@@ -1048,15 +1030,17 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
- int pos;
INIT_LIST_HEAD(&dev->msi_list);
/* Disable the msi hardware to avoid screaming interrupts
* during boot. This is the power on reset default so
* usually this should be a noop.
*/
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (pos)
- msi_set_enable(dev, pos, 0);
- msix_set_enable(dev, 0);
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (dev->msi_cap)
+ msi_set_enable(dev, 0);
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (dev->msix_cap)
+ msix_set_enable(dev, 0);
}
diff --git a/trunk/drivers/pci/msi.h b/trunk/drivers/pci/msi.h
deleted file mode 100644
index 65c42f80f23e..000000000000
--- a/trunk/drivers/pci/msi.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2003-2004 Intel
- * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
- */
-
-#ifndef MSI_H
-#define MSI_H
-
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
-#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
-#define msi_data_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
-#define msi_mask_reg(base, is64bit) \
- (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
-#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
-#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
-
-#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
-#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
-#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable(control) msix_table_size((control))
-
-#endif /* MSI_H */
diff --git a/trunk/drivers/pci/pci-acpi.c b/trunk/drivers/pci/pci-acpi.c
index dee5dddaa292..e4b1fb2c0f5d 100644
--- a/trunk/drivers/pci/pci-acpi.c
+++ b/trunk/drivers/pci/pci-acpi.c
@@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
return;
}
- if (!pci_dev->pm_cap || !pci_dev->pme_support
- || pci_check_pme_status(pci_dev)) {
- if (pci_dev->pme_poll)
- pci_dev->pme_poll = false;
+ /* Clear PME Status if set. */
+ if (pci_dev->pme_support)
+ pci_check_pme_status(pci_dev);
- pci_wakeup_event(pci_dev);
- pm_runtime_resume(&pci_dev->dev);
- }
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
+
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
if (pci_dev->subordinate)
pci_pme_wakeup_bus(pci_dev->subordinate);
@@ -287,6 +288,32 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.run_wake = acpi_pci_run_wake,
};
+void acpi_pci_add_bus(struct pci_bus *bus)
+{
+ acpi_handle handle = NULL;
+
+ if (bus->bridge)
+ handle = ACPI_HANDLE(bus->bridge);
+ if (acpi_pci_disabled || handle == NULL)
+ return;
+
+ acpi_pci_slot_enumerate(bus, handle);
+ acpiphp_enumerate_slots(bus, handle);
+}
+
+void acpi_pci_remove_bus(struct pci_bus *bus)
+{
+ /*
+ * bus->bridge->acpi_node.handle has already been reset to NULL
+ * when acpi_pci_remove_bus() is called, so don't check ACPI handle.
+ */
+ if (acpi_pci_disabled)
+ return;
+
+ acpiphp_remove_slots(bus);
+ acpi_pci_slot_remove(bus);
+}
+
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
@@ -361,7 +388,11 @@ static int __init acpi_pci_init(void)
ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret)
return 0;
+
pci_set_platform_pm(&acpi_pci_platform_pm);
+ acpi_pci_slot_init();
+ acpiphp_init();
+
return 0;
}
arch_initcall(acpi_pci_init);
diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c
index 1fa1e482a999..79277fb36c6b 100644
--- a/trunk/drivers/pci/pci-driver.c
+++ b/trunk/drivers/pci/pci-driver.c
@@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev)
/*
* Turn off Bus Master bit on the device to tell it to not
- * continue to do DMA
+ * continue to do DMA. Don't touch devices in D3cold or unknown states.
*/
- pci_clear_master(pci_dev);
+ if (pci_dev->current_state <= PCI_D3hot)
+ pci_clear_master(pci_dev);
}
#ifdef CONFIG_PM
diff --git a/trunk/drivers/pci/pci-sysfs.c b/trunk/drivers/pci/pci-sysfs.c
index 9c6e9bb674ec..5b4a9d9cd200 100644
--- a/trunk/drivers/pci/pci-sysfs.c
+++ b/trunk/drivers/pci/pci-sysfs.c
@@ -897,7 +897,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
if (pci_resource_len(pdev, resno) == 0)
return 0;
- nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c
index b099e0025d2b..a899d8bb190d 100644
--- a/trunk/drivers/pci/pci.c
+++ b/trunk/drivers/pci/pci.c
@@ -646,15 +646,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- } else {
+ } else
error = -ENODEV;
- /* Fall back to PCI_D0 if native PM is not supported */
- if (!dev->pm_cap)
- dev->current_state = PCI_D0;
- }
+
+ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ dev->current_state = PCI_D0;
return error;
}
@@ -1575,7 +1571,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
- if (!dev->pm_cap)
+ if (!dev->pme_support)
return;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
@@ -1924,6 +1920,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->wakeup_prepared = false;
dev->pm_cap = 0;
+ dev->pme_support = 0;
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
@@ -1975,8 +1972,6 @@ void pci_pm_init(struct pci_dev *dev)
device_set_wakeup_capable(&dev->dev, true);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
- } else {
- dev->pme_support = 0;
}
}
@@ -2619,7 +2614,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
pci_release_region(pdev, i);
}
-int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
const char *res_name, int excl)
{
int i;
@@ -3699,7 +3694,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
-resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
resource_size_t align = 0;
@@ -3812,7 +3807,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
}
}
-ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
+static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
{
if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
@@ -3823,7 +3818,7 @@ ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
return count;
}
-ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
+static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
{
size_t count;
spin_lock(&resource_alignment_lock);
diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h
index 7346ee68f47d..68678ed76b0d 100644
--- a/trunk/drivers/pci/pci.h
+++ b/trunk/drivers/pci/pci.h
@@ -8,26 +8,25 @@
/* Functions internal to the PCI core code */
-extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
{ return; }
static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
{ return; }
#else
-extern void pci_create_firmware_label_files(struct pci_dev *pdev);
-extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
+void pci_create_firmware_label_files(struct pci_dev *pdev);
+void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
-extern void pci_cleanup_rom(struct pci_dev *dev);
+void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices//resource */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/ */
};
-extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
- struct vm_area_struct *vmai,
- enum pci_mmap_api mmap_api);
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
@@ -60,17 +59,17 @@ struct pci_platform_pm_ops {
int (*run_wake)(struct pci_dev *dev, bool enable);
};
-extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
-extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
-extern void pci_power_up(struct pci_dev *dev);
-extern void pci_disable_enabled_device(struct pci_dev *dev);
-extern int pci_finish_runtime_suspend(struct pci_dev *dev);
-extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
-extern void pci_wakeup_bus(struct pci_bus *bus);
-extern void pci_config_pm_runtime_get(struct pci_dev *dev);
-extern void pci_config_pm_runtime_put(struct pci_dev *dev);
-extern void pci_pm_init(struct pci_dev *dev);
-extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_power_up(struct pci_dev *dev);
+void pci_disable_enabled_device(struct pci_dev *dev);
+int pci_finish_runtime_suspend(struct pci_dev *dev);
+int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+void pci_wakeup_bus(struct pci_bus *bus);
+void pci_config_pm_runtime_get(struct pci_dev *dev);
+void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_init(struct pci_dev *dev);
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
@@ -96,7 +95,7 @@ struct pci_vpd {
struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
};
-extern int pci_vpd_pci22_init(struct pci_dev *dev);
+int pci_vpd_pci22_init(struct pci_dev *dev);
static inline void pci_vpd_release(struct pci_dev *dev)
{
if (dev->vpd)
@@ -105,9 +104,9 @@ static inline void pci_vpd_release(struct pci_dev *dev)
/* PCI /proc functions */
#ifdef CONFIG_PROC_FS
-extern int pci_proc_attach_device(struct pci_dev *dev);
-extern int pci_proc_detach_device(struct pci_dev *dev);
-extern int pci_proc_detach_bus(struct pci_bus *bus);
+int pci_proc_attach_device(struct pci_dev *dev);
+int pci_proc_detach_device(struct pci_dev *dev);
+int pci_proc_detach_bus(struct pci_bus *bus);
#else
static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; }
static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; }
@@ -118,8 +117,8 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
int pci_hp_add_bridge(struct pci_dev *dev);
#ifdef HAVE_PCI_LEGACY
-extern void pci_create_legacy_files(struct pci_bus *bus);
-extern void pci_remove_legacy_files(struct pci_bus *bus);
+void pci_create_legacy_files(struct pci_bus *bus);
+void pci_remove_legacy_files(struct pci_bus *bus);
#else
static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
@@ -134,7 +133,7 @@ extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
-extern void pci_msi_init_pci_dev(struct pci_dev *dev);
+void pci_msi_init_pci_dev(struct pci_dev *dev);
#else
static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
@@ -198,12 +197,11 @@ enum pci_bar_type {
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
-extern int pci_setup_device(struct pci_dev *dev);
-extern int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
-extern int pci_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern void pci_configure_ari(struct pci_dev *dev);
+int pci_setup_device(struct pci_dev *dev);
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int reg);
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+void pci_configure_ari(struct pci_dev *dev);
/**
* pci_ari_enabled - query ARI forwarding status
@@ -217,7 +215,7 @@ static inline int pci_ari_enabled(struct pci_bus *bus)
}
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
-extern void pci_disable_bridge_window(struct pci_dev *dev);
+void pci_disable_bridge_window(struct pci_dev *dev);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -241,7 +239,7 @@ struct pci_sriov {
};
#ifdef CONFIG_PCI_ATS
-extern void pci_restore_ats_state(struct pci_dev *dev);
+void pci_restore_ats_state(struct pci_dev *dev);
#else
static inline void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -249,14 +247,13 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#endif /* CONFIG_PCI_ATS */
#ifdef CONFIG_PCI_IOV
-extern int pci_iov_init(struct pci_dev *dev);
-extern void pci_iov_release(struct pci_dev *dev);
-extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
- enum pci_bar_type *type);
-extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
- int resno);
-extern void pci_restore_iov_state(struct pci_dev *dev);
-extern int pci_iov_bus_range(struct pci_bus *bus);
+int pci_iov_init(struct pci_dev *dev);
+void pci_iov_release(struct pci_dev *dev);
+int pci_iov_resource_bar(struct pci_dev *dev, int resno,
+ enum pci_bar_type *type);
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+void pci_restore_iov_state(struct pci_dev *dev);
+int pci_iov_bus_range(struct pci_bus *bus);
#else
static inline int pci_iov_init(struct pci_dev *dev)
@@ -282,10 +279,10 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
-extern unsigned long pci_cardbus_resource_alignment(struct resource *);
+unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
- struct resource *res)
+ struct resource *res)
{
#ifdef CONFIG_PCI_IOV
int resno = res - dev->resource;
@@ -298,7 +295,7 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
return resource_alignment(res);
}
-extern void pci_enable_acs(struct pci_dev *dev);
+void pci_enable_acs(struct pci_dev *dev);
struct pci_dev_reset_methods {
u16 vendor;
@@ -307,7 +304,7 @@ struct pci_dev_reset_methods {
};
#ifdef CONFIG_PCI_QUIRKS
-extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+int pci_dev_specific_reset(struct pci_dev *dev, int probe);
#else
static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
diff --git a/trunk/drivers/pci/pcie/Kconfig b/trunk/drivers/pci/pcie/Kconfig
index fde4a32a0295..569f82fc9e22 100644
--- a/trunk/drivers/pci/pcie/Kconfig
+++ b/trunk/drivers/pci/pcie/Kconfig
@@ -82,4 +82,4 @@ endchoice
config PCIE_PME
def_bool y
- depends on PCIEPORTBUS && PM_RUNTIME && ACPI
+ depends on PCIEPORTBUS && PM_RUNTIME
diff --git a/trunk/drivers/pci/pcie/aer/aer_inject.c b/trunk/drivers/pci/pcie/aer/aer_inject.c
index 4e24cb8a94ae..587e7e853107 100644
--- a/trunk/drivers/pci/pcie/aer/aer_inject.c
+++ b/trunk/drivers/pci/pcie/aer/aer_inject.c
@@ -212,8 +212,8 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
return ops->read(bus, devfn, where, size, val);
}
-int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 val)
+static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
{
u32 *sim;
struct aer_error *err;
@@ -334,13 +334,13 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- ret = -ENOTTY;
+ ret = -ENODEV;
goto out_put;
}
pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
@@ -350,7 +350,7 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
- ret = -ENOTTY;
+ ret = -EPERM;
goto out_put;
}
diff --git a/trunk/drivers/pci/pcie/aer/aerdrv.h b/trunk/drivers/pci/pcie/aer/aerdrv.h
index 22f840f4dda1..d12c77cd6991 100644
--- a/trunk/drivers/pci/pcie/aer/aerdrv.h
+++ b/trunk/drivers/pci/pcie/aer/aerdrv.h
@@ -110,15 +110,15 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
-extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(struct work_struct *work);
-extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
-extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
-extern irqreturn_t aer_irq(int irq, void *context);
+void aer_do_secondary_bus_reset(struct pci_dev *dev);
+int aer_init(struct pcie_device *dev);
+void aer_isr(struct work_struct *work);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
+irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI_APEI
-extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
#else
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
{
diff --git a/trunk/drivers/pci/pcie/aer/aerdrv_core.c b/trunk/drivers/pci/pcie/aer/aerdrv_core.c
index 564d97f94b6c..8ec8b4f48560 100644
--- a/trunk/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/trunk/drivers/pci/pcie/aer/aerdrv_core.c
@@ -89,8 +89,6 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
return -ENOSPC;
}
-#define PCI_BUS(x) (((x) >> 8) & 0xff)
-
/**
* is_error_source - check whether the device is source of reported error
* @dev: pointer to pci_dev to be checked
@@ -106,7 +104,7 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
- if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
/* Device ID match? */
if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
return true;
diff --git a/trunk/drivers/pci/pcie/pme.c b/trunk/drivers/pci/pcie/pme.c
index 9ca0dc9ffd84..795db1f9d50c 100644
--- a/trunk/drivers/pci/pcie/pme.c
+++ b/trunk/drivers/pci/pcie/pme.c
@@ -19,8 +19,6 @@
#include
#include
#include
-#include